# If True, save logdir, otherwise don't save_graph = True print(str(len(pivot_ids))) # Initialize the model m = model(num_docs, vocab_size, num_topics, embedding_size=embed_size, pretrained_embeddings=pretrained_embeddings, freqs=freqs, batch_size=batch_size, save_graph_def=save_graph, restore=MODEL_RESTORE, logdir=model_dir) # Train the model m.train(pivot_ids, target_ids, doc_ids, len(pivot_ids), num_epochs, idx_to_word=idx_to_word, switch_loss_epoch=switch_loss_epoch) # Visualize topics with pyldavis trained_model_data = utils.generate_ldavis_data(clean_data_dir, m, idx_to_word, freqs, vocab_size) np.savez("{}/model_params".format(model_dir), **trained_model_data)
lmbda = 1e-4 logdir = "bias_rc" # Initialize the model m = b_model(num_docs, vocab_size, num_topics, bias_idxes, bias_topics=num_bias_topics, bias_lmbda=bias_lambda, bias_unity=bias_unity, target_bias_topic_cov=0.8, embedding_size=embed_size, pretrained_embeddings=pretrained_embeddings, freqs=freqs, batch_size=batch_size, save_graph_def=save_graph, logdir=logdir) # Train the model m.train(pivot_ids, target_ids, doc_ids, len(pivot_ids), num_epochs, idx_to_word=idx_to_word, switch_loss_epoch=switch_loss_epoch) # Visualize topics with pyldavis utils.generate_ldavis_data(data_path, m, idx_to_word, freqs, vocab_size)
# If True, save logdir, otherwise don't save_graph = True # Initialize the model m = model(num_docs, vocab_size, num_topics, embedding_size=embed_size, pretrained_embeddings=pretrained_embeddings, freqs=freqs, batch_size=batch_size, save_graph_def=save_graph, fixed_words=True, restore=True, logdir="logdir_190403_1240") """ # Train the model m.train(pivot_ids, target_ids, doc_ids, len(pivot_ids), num_epochs, idx_to_word=idx_to_word, switch_loss_epoch=switch_loss_epoch) """ # Visualize topics with pyldavis trained_model_data = utils.generate_ldavis_data(data_path, m, idx_to_word, freqs, vocab_size) np.savez("{}/model_params".format("logdir_190403_1240"), **trained_model_data)