trainer = Trainer(model=model,
                  optimizer=optimizer,
                  iterator=iterator,
                  train_dataset=train_dataset,
                  validation_dataset=validation_dataset,
                  patience=10,
                  num_epochs=40,
                  histogram_interval=100, should_log_learning_rate=True)

serialization_dir = '/tmp/anything100'
another_log = SummaryWriter(os.path.join(serialization_dir, "log", "embeddings"))
train_log = SummaryWriter(os.path.join(serialization_dir, "log", "train"))
validation_log = SummaryWriter(os.path.join(serialization_dir, "log", "validation"))


trainer._tensorboard = TensorboardWriter(train_log=train_log, validation_log=validation_log)

trainer.train()
# Project the learnt word embeddings
another_log.add_embedding(token_embedding.weight, metadata=token_names, 
                          tag='Sentiment Embeddings')
# Project the Original word embeddings
original_50_weights = _read_pretrained_embeddings_file(glove_fp, 50, vocab, 'tokens_id')
another_log.add_embedding(original_50_weights, metadata=token_names, 
                          tag='Original Embeddings')
train_log.close()
validation_log.close()
another_log.close()

#predictor = SentenceTaggerPredictor(model, dataset_reader=reader)
#tag_logits = predictor.predict("The dog ate the apple")['tag_logits']