AttributeError: 'Tensor' object has no attribute 'keys' in a ComposedSeq2Seq model

Hi,
Thank you for the great library.

I’m doing data2text generation and for that I’m building a seq2seq model using ComposedSeq2Seq, whose encoder is StackedSelfAttentionEncoder and decoder is AutoRegressiveSeqDecoder with a StackedSelfAttentionDecoderNet.
In the forward pass I get an AttributeError. Find my entire code and the trackeback below.

reader = Seq2SeqDatasetReader(source_tokenizer=WhitespaceTokenizer(),target_tokenizer=WhitespaceTokenizer(),  source_token_indexers={'tokens': SingleIdTokenIndexer()},                         target_token_indexers={'tokens': SingleIdTokenIndexer(namespace='target_tokens')})

train_dataset = reader.read(train_path)
validation_dataset = reader.read(val_path)
test_dataset = reader.read(test_path)
vocab = Vocabulary.from_instances(train_dataset + validation_dataset, min_count={'tokens': 1, 'target_tokens': 1})

train_dataset.index_with(vocab)
validation_dataset.index_with(vocab)

SRC_EMBEDDING_DIM = 128
TG_EMBEDDING_DIM = 128
HIDDEN_DIM = 512
enc_layers = 3
dec_layers = 3
enc_heads = 2
dec_heads = 2
ff_dim = 512
proj_dim = 128
enc_dropout = 0.2
dec_dropout = 0.2
max_decoding_steps = 40
beam = 3
CUDA_DEVICE = 0

src_embedding = Embedding(num_embeddings=vocab.get_vocab_size('tokens'),
                             embedding_dim=SRC_EMBEDDING_DIM)
source_embedder = BasicTextFieldEmbedder({"tokens": src_embedding})

trg_embedding = Embedding(num_embeddings=vocab.get_vocab_size('target_tokens'),
                             embedding_dim=TG_EMBEDDING_DIM) # TODO
target_embedder = BasicTextFieldEmbedder({"target_tokens": trg_embedding})

encoder = StackedSelfAttentionEncoder(input_dim=SRC_EMBEDDING_DIM, hidden_dim=HIDDEN_DIM,
                                      projection_dim=proj_dim, feedforward_hidden_dim=ff_dim, num_layers=enc_layers,
                                      num_attention_heads=enc_heads, dropout_prob=enc_dropout, use_positional_encoding=True)

decoder_net = StackedSelfAttentionDecoderNet(decoding_dim=TG_EMBEDDING_DIM, target_embedding_dim=TG_EMBEDDING_DIM,
                                             feedforward_hidden_dim=128, num_layers=dec_layers,                                             num_attention_heads=dec_heads, use_positional_encoding=True, dropout_prob=dec_dropout)

decoder = AutoRegressiveSeqDecoder(vocab=vocab, decoder_net=decoder_net, max_decoding_steps=max_decoding_steps,
                                target_embedder=target_embedder, target_namespace='target_tokens', beam_size=beam)

model = ComposedSeq2Seq(vocab=vocab, source_text_embedder=source_embedder,
                        encoder=encoder, decoder=decoder)

model = model.cuda(CUDA_DEVICE)

optimizer = optim.Adam(model.parameters(), lr=0.0005)

train_data_loader = PyTorchDataLoader(train_dataset,batch_sampler=BucketBatchSampler(train_dataset,batch_size=16))
dev_data_loader = PyTorchDataLoader(validation_dataset,batch_sampler=BucketBatchSampler(validation_dataset,batch_size=16))

trainer = GradientDescentTrainer(model=model, optimizer=optimizer,data_loader=train_data_loader,
                                 validation_data_loader=dev_data_loader,num_epochs=3)

trainer.train()

When the training starts, this error is raised:

Traceback (most recent call last):
  File "seq2seq_composed.py", line 158, in <module>
    LR = args["lr"]
  File "/home/CE/skr/anaconda3/lib/python3.6/site-packages/allennlp/training/trainer.py", line 867, in train
    train_metrics = self._train_epoch(epoch)
  File "/home/CE/skr/anaconda3/lib/python3.6/site-packages/allennlp/training/trainer.py", line 589, in _train_epoch
    batch_outputs = self.batch_outputs(batch, for_training=True)
  File "/home/CE/skr/anaconda3/lib/python3.6/site-packages/allennlp/training/trainer.py", line 479, in batch_outputs
    output_dict = self._pytorch_model(**batch)
  File "/home/CE/skr/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 722, in _call_impl
    result = self.forward(*input, **kwargs)
  File "/home/CE/skr/anaconda3/lib/python3.6/site-packages/allennlp_models/generation/models/composed_seq2seq.py", line 121, in forward
    return self._decoder(state, target_tokens)
  File "/home/CE/skr/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 722, in _call_impl
    result = self.forward(*input, **kwargs)
  File "/home/CE/skr/anaconda3/lib/python3.6/site-packages/allennlp_models/generation/modules/seq_decoders/auto_regressive.py", line 416, in forward
    output_dict = self._forward_loss(state_forward_loss, target_tokens)
  File "/home/CE/skr/anaconda3/lib/python3.6/site-packages/allennlp_models/generation/modules/seq_decoders/auto_regressive.py", line 161, in _forward_loss
    target_embedding = self.target_embedder(targets)
  File "/home/CE/skr/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 722, in _call_impl
    result = self.forward(*input, **kwargs)
  File "/home/CE/skr/anaconda3/lib/python3.6/site-packages/allennlp/modules/text_field_embedders/basic_text_field_embedder.py", line 56, in forward
    if self._token_embedders.keys() != text_field_input.keys():
AttributeError: 'Tensor' object has no attribute 'keys'

Specs:
OS: Linux
AllenNLP 1.1.0
Python 3.6.5