def main(): utils.heading('SETUP') config = configure.Config(mode=FLAGS.mode, model_name=FLAGS.model_name) config.write() with tf.Graph().as_default() as graph: model_trainer = trainer.Trainer(config) summary_writer = tf.summary.FileWriter(config.summaries_dir) checkpoints_saver = tf.train.Saver(max_to_keep=1) best_model_saver = tf.train.Saver(max_to_keep=1) init_op = tf.global_variables_initializer() graph.finalize() with tf.Session() as sess: sess.run(init_op) progress = training_progress.TrainingProgress( config, sess, checkpoints_saver, best_model_saver, config.mode == 'train') utils.log() if config.mode == 'train': utils.heading('START TRAINING ({:})'.format(config.model_name)) model_trainer.train(sess, progress, summary_writer) elif config.mode == 'eval': utils.heading('RUN EVALUATION ({:})'.format(config.model_name)) progress.best_model_saver.restore( sess, tf.train.latest_checkpoint(config.checkpoints_dir)) model_trainer.evaluate_all_tasks(sess, summary_writer, None) else: raise ValueError('Mode must be "train" or "eval"')
def main(data_dir='/content/data'): random.seed(0) utils.log("BUILDING WORD VOCABULARY/EMBEDDINGS") for pretrained in ['glove.6B.50d.txt']: config = configure.Config(data_dir=data_dir, for_preprocessing=True, pretrained_embeddings=pretrained, word_embedding_size=50) embeddings.PretrainedEmbeddingLoader(config).build() utils.log("CONSTRUCTING DEV SETS") for task_name in ["chunk"]: # chunking does not come with a provided dev split, so create one by # selecting a random subset of the data config = configure.Config(data_dir=data_dir, for_preprocessing=True) task_data_dir = os.path.join(config.raw_data_topdir, task_name) + '/' train_sentences = word_level_data.TaggedDataLoader( config, task_name, False).get_labeled_sentences("train") random.shuffle(train_sentences) write_sentences(task_data_dir + 'train_subset.txt', train_sentences[1500:]) write_sentences(task_data_dir + 'dev.txt', train_sentences[:1500]) utils.log("WRITING LABEL MAPPINGS") for task_name in ["chunk"]: for i, label_encoding in enumerate(["BIOES"]): config = configure.Config(data_dir=data_dir, for_preprocessing=True, label_encoding=label_encoding) token_level = task_name in ["ccg", "pos", "depparse"] loader = word_level_data.TaggedDataLoader(config, task_name, token_level) if token_level: if i != 0: continue utils.log("WRITING LABEL MAPPING FOR", task_name.upper()) else: utils.log(" Writing label mapping for", task_name.upper(), label_encoding) utils.log(" ", len(loader.label_mapping), "classes") utils.write_cpickle(loader.label_mapping, loader.label_mapping_path)
def main(data_dir='/content/data'): random.seed(0) utils.log("BUILDING WORD VOCABULARY/EMBEDDINGS") for pretrained in ['glove.6B.100d.txt']: config = configure.Config(data_dir=data_dir, for_preprocessing=True, pretrained_embeddings=pretrained, word_embedding_size=100) embeddings.PretrainedEmbeddingLoader(config).build() utils.log("WRITING LABEL MAPPINGS") for task_name in ["senclass"]: config = configure.Config(data_dir=data_dir, for_preprocessing=True) loader = sentence_level_data.SentenceClassificationDataLoader(config, task_name) utils.log("WRITING LABEL MAPPING FOR", task_name.upper()) utils.log(" ", len(loader.label_mapping), "classes") utils.write_cpickle(loader.label_mapping, loader.label_mapping_path)
def main(): utils.heading('SETUP') config = configure.Config(mode=FLAGS.mode, model_name=FLAGS.model_name) config.write() if config.mode == 'encode': word_vocab = embeddings.get_word_vocab(config) sentence = "Squirrels , for example , would show up , look for the peanut , go away .".split() sentence = ([word_vocab[embeddings.normalize_word(w)] for w in sentence]) print(sentence) return if config.mode == 'decode': word_vocab_reversed = embeddings.get_word_vocab_reversed(config) sentence = "25709 33 42 879 33 86 304 92 33 676 42 32 13406 33 273 445 34".split() sentence = ([word_vocab_reversed[int(w)] for w in sentence]) print(sentence) return if config.mode == 'encode-vi': word_vocab_vi = embeddings.get_word_vocab_vi(config) print(len(word_vocab_vi)) sentence = "Mỗi_một khoa_học_gia đều thuộc một nhóm nghiên_cứu , và mỗi nhóm đều nghiên_cứu rất nhiều đề_tài đa_dạng .".split() sentence = ([word_vocab_vi[embeddings.normalize_word(w)] for w in sentence]) print(sentence) return if config.mode == 'decode-vi': word_vocab_reversed_vi = embeddings.get_word_vocab_reversed_vi(config) sentence = "8976 32085 129 178 17 261 381 5 7 195 261 129 381 60 37 2474 1903 6".split() sentence = ([word_vocab_reversed_vi[int(w)] for w in sentence]) print(sentence) return if config.mode == 'embed': word_embeddings = embeddings.get_word_embeddings(config) word = 50 embed = word_embeddings[word] print(' '.join(str(x) for x in embed)) return if config.mode == 'embed-vi': word_embeddings_vi = embeddings.get_word_embeddings_vi(config) word = 50 embed = word_embeddings_vi[word] print(' '.join(str(x) for x in embed)) return with tf.Graph().as_default() as graph: model_trainer = trainer.Trainer(config) summary_writer = tf.summary.FileWriter(config.summaries_dir) checkpoints_saver = tf.train.Saver(max_to_keep=1) best_model_saver = tf.train.Saver(max_to_keep=1) init_op = tf.global_variables_initializer() graph.finalize() with tf.Session() as sess: sess.run(init_op) progress = training_progress.TrainingProgress( config, sess, checkpoints_saver, best_model_saver, config.mode == 'train') utils.log() if config.mode == 'train': #summary_writer.add_graph(sess.graph) utils.heading('START TRAINING ({:})'.format(config.model_name)) model_trainer.train(sess, progress, summary_writer) elif config.mode == 'eval-train': utils.heading('RUN EVALUATION ({:})'.format(config.model_name)) progress.best_model_saver.restore(sess, tf.train.latest_checkpoint( config.checkpoints_dir)) model_trainer.evaluate_all_tasks(sess, summary_writer, None, train_set=True) elif config.mode == 'eval-dev': utils.heading('RUN EVALUATION ({:})'.format(config.model_name)) progress.best_model_saver.restore(sess, tf.train.latest_checkpoint( config.checkpoints_dir)) model_trainer.evaluate_all_tasks(sess, summary_writer, None, train_set=False) elif config.mode == 'infer': utils.heading('START INFER ({:})'.format(config.model_name)) progress.best_model_saver.restore(sess, tf.train.latest_checkpoint( config.checkpoints_dir)) model_trainer.infer(sess) elif config.mode == 'translate': utils.heading('START TRANSLATE ({:})'.format(config.model_name)) progress.best_model_saver.restore(sess, tf.train.latest_checkpoint( config.checkpoints_dir)) model_trainer.translate(sess) elif config.mode == 'eval-translate-train': utils.heading('RUN EVALUATION ({:})'.format(config.model_name)) progress.best_model_saver.restore(sess, tf.train.latest_checkpoint( config.checkpoints_dir)) model_trainer.evaluate_all_tasks(sess, summary_writer, None, train_set=True, is_translate=True) elif config.mode == 'eval-translate-dev': utils.heading('RUN EVALUATION ({:})'.format(config.model_name)) progress.best_model_saver.restore(sess, tf.train.latest_checkpoint( config.checkpoints_dir)) model_trainer.evaluate_all_tasks(sess, summary_writer, None, train_set=False, is_translate=True) else: raise ValueError('Mode must be "train" or "eval"')