def main(): # Configuration file processing ... # DyNet setting ... # Build the dataset of the training process ## Build data reader data_reader = PTBReader( field_list=['word', 'tag', 'head', 'rel'], root='0\t**root**\t_\t**rpos**\t_\t_\t0\t**rrel**\t_\t_', spacer=r'[\t]',) ## Build vocabulary with pretrained glove vocabulary = Vocabulary() g_word, _ = glove_reader(cfg.GLOVE) pretrained_vocabs = {'glove': g_word} vocabulary.extend_from_pretrained_vocab(pretrained_vocabs) ## Setup datasets datasets_settings = { 'train': DatasetSetting(cfg.TRAIN, True), 'dev': DatasetSetting(cfg.DEV, True), 'test': DatasetSetting(cfg.TEST, True),} datasets = SingleTaskDataset(vocabulary, datasets_settings, data_reader) counters = {'word': Counter(), 'tag': Counter(), 'rel': Counter()} datasets.build_dataset( counters, no_pad_namespace={'rel'}, no_unk_namespace={'rel'}) # Build model ... # Train model train_batch = datasets.get_batches('train', cfg.TRAIN_BATCH_SIZE, True, cmp, True) valid_batch = datasets.get_batches('dev', cfg.TEST_BATCH_SIZE, True, cmp, False) test_batch = datasets.get_batches('test', cfg.TEST_BATCH_SIZE, True, cmp, False)
def __init__(self, model, cfg, vocabulary: Vocabulary): pc = model.add_subcollection() word_num = vocabulary.get_vocab_size('word') self.wlookup = pc.lookup_parameters_from_numpy( np.zeros((word_num, cfg.WORD_DIM), dtype=np.float32)) tag_num = vocabulary.get_vocab_size('tag') self.tlookup = pc.lookup_parameters_from_numpy( np.random.randn(tag_num, cfg.TAG_DIM).astype(np.float32)) _, glove_vec = glove_reader(cfg.GLOVE) glove_dim = len(glove_vec[0]) unk_pad_vec = [[0.0 for _ in range(glove_dim)]] glove_num = vocabulary.get_vocab_size('glove') glove_vec = unk_pad_vec + unk_pad_vec + glove_vec glove_vec = np.array(glove_vec, dtype=np.float32) / np.std(glove_vec) self.glookup = pc.lookup_parameters_from_numpy( glove_vec.astype(np.float32)) self.token_dim = cfg.WORD_DIM + cfg.TAG_DIM self.vocabulary = vocabulary self.pc, self.cfg = pc, cfg self.spec = (cfg, vocabulary)
def main(): # Configuration file processing argparser = argparse.ArgumentParser() argparser.add_argument('--config_file', default='../configs/debug.cfg') argparser.add_argument('--continue_training', action='store_true', help='Load model Continue Training') argparser.add_argument('--name', default='experiment', help='The name of the experiment.') argparser.add_argument('--model', default='s2s', help='s2s: seq2seq-head-selection-model' 's2tDFS: seq2tree-DFS-decoder-model') argparser.add_argument('--gpu', default='0', help='GPU ID (-1 to cpu)') args, extra_args = argparser.parse_known_args() cfg = IniConfigurator(args.config_file, extra_args) # Logger setting logger = dual_channel_logger( __name__, file_path=cfg.LOG_FILE, file_model='w', formatter='%(asctime)s - %(levelname)s - %(message)s', time_formatter='%m-%d %H:%M') from eval.script_evaluator import ScriptEvaluator # DyNet setting os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu import dynet_config dynet_config.set(mem=cfg.DYNET_MEM, random_seed=cfg.DYNET_SEED) dynet_config.set_gpu() import dynet as dy from models.token_representation import TokenRepresentation from antu.nn.dynet.seq2seq_encoders import DeepBiRNNBuilder, orthonormal_VanillaLSTMBuilder from models.graph_nn_decoder import GraphNNDecoder from models.jackknife_decoder import JackKnifeGraphNNDecoder # Build the dataset of the training process # Build data reader data_reader = PTBReader( field_list=['word', 'tag', 'head', 'rel'], root='0\t**root**\t_\t**rcpos**\t**rpos**\t_\t0\t**rrel**\t_\t_', spacer=r'[\t]',) # Build vocabulary with pretrained glove vocabulary = Vocabulary() g_word, _ = glove_reader(cfg.GLOVE) pretrained_vocabs = {'glove': g_word} vocabulary.extend_from_pretrained_vocab(pretrained_vocabs) # Setup datasets datasets_settings = {'train': DatasetSetting(cfg.TRAIN, True), 'dev': DatasetSetting(cfg.DEV, False), 'test': DatasetSetting(cfg.TEST, False), } datasets = PTBDataset(vocabulary, datasets_settings, data_reader) counters = {'word': Counter(), 'tag': Counter(), 'rel': Counter()} datasets.build_dataset(counters, no_pad_namespace={'rel'}, no_unk_namespace={'rel'}) # Build model # Parameter pc = dy.ParameterCollection() LR = 0.0005 trainer = dy.AdamTrainer(pc, LR, cfg.ADAM_BETA1, cfg.ADAM_BETA2, cfg.EPS) # Token Representation Layer token_repre = TokenRepresentation(pc, cfg, datasets.vocabulary, include_pos=True) # BiLSTM Encoder Layer #encoder = BiaffineAttention() #encoder = MultiHeadedAttention(pc, 10, token_repre.token_dim) #encoder = MultiLayerMultiHeadAttention(pc, 10, token_repre.token_dim, num_layers=1) #encoder = MyMultiHeadAttention(None, 6, token_repre.token_dim, 32, 32, model=pc) #encoder = LabelAttention(None, token_repre.token_dim, 128, 128, 112, 128, use_resdrop=True, q_as_matrix=False, residual_dropout=0.1, attention_dropout=0.1, d_positional=None, model=pc) # encoder = Encoder(None, token_repre.token_dim, # num_layers=1, num_heads=2, d_kv = 32, d_ff=1024, d_l=112, # d_positional=None, # num_layers_position_only=0, # relu_dropout=0.1, residual_dropout=0.1, attention_dropout=0.1, # use_lal=True, # lal_d_kv=128, # lal_d_proj=128, # lal_resdrop=True, # lal_pwff=True, # lal_q_as_matrix=False, # lal_partitioned=True, # model=pc) #encoder = ScaledDotProductAttention(pc, 10) encoder = DeepBiRNNBuilder(pc, cfg.ENC_LAYERS, token_repre.token_dim, cfg.ENC_H_DIM, orthonormal_VanillaLSTMBuilder) # GNN Decoder Layer decoder = GraphNNDecoder(pc, cfg, datasets.vocabulary) #decoder = JackKnifeGraphNNDecoder(pc, cfg, datasets.vocabulary) # PTB Evaluator my_eval = ScriptEvaluator(['Valid', 'Test'], datasets.vocabulary) #dy.save(cfg.LAST_FILE, [token_repre, encoder, decoder]) #exit(0) # Build Training Batch def cmp(ins): return len(ins['word']) train_batch = datasets.get_batches('train', cfg.TRAIN_BATCH_SIZE, True, cmp, True) valid_batch = list(datasets.get_batches('dev', cfg.TEST_BATCH_SIZE, False, cmp, False)) test_batch = list(datasets.get_batches('test', cfg.TEST_BATCH_SIZE, False, cmp, False)) #print('-----------------------') # print('TRAIN BATCH IS: ') # # print(train_batch) # indexes, masks, truth = train_batch.__next__() # print(indexes) # print('------------------',end='\n\n\n\n\n\n\n') # print(len(indexes)) # exit(0) # exit(0) # for k in indexes: # print(k) #print(indexes) #print(masks) # Train model BEST_DEV_LAS = BEST_DEV_UAS = BEST_ITER = 0 cnt_iter = -cfg.WARM * cfg.GRAPH_LAYERS valid_loss = [[] for i in range(cfg.GRAPH_LAYERS+3)] logger.info("Experiment name: %s" % args.name) SHA = os.popen('git log -1 | head -n 1 | cut -c 8-13').readline().rstrip() logger.info('Git SHA: %s' % SHA) while cnt_iter < cfg.MAX_ITER: print(cnt_iter, cfg.MAX_ITER) #dy.renew_cg() dy.renew_cg(immediate_compute = True, check_validity = True) cnt_iter += 1 indexes, masks, truth = train_batch.__next__() vectors = token_repre(indexes, True) #vectors = encoder(vectors, np.array(masks['1D']).T) #print(vectors.npvalue) #vectors= encoder(vectors, vectors, vectors, np.array(masks['1D']).T) #vectors= encoder(vectors, vectors, vectors, np.array(masks['1D']).T, cfg.RNN_DROP) vectors = encoder(vectors, None, cfg.RNN_DROP, cfg.RNN_DROP, np.array(masks['1D']).T, False, True) loss, part_loss = decoder(vectors, masks, truth, cnt_iter, True, True) for i, l in enumerate([loss]+part_loss): valid_loss[i].append(l.value()) loss.backward() trainer.learning_rate = LR*cfg.LR_DECAY**(max(cnt_iter, 0)/cfg.LR_ANNEAL) #trainer.learning_rate = cfg.LR*cfg.LR_DECAY**(max(cnt_iter, 0)/cfg.LR_ANNEAL) trainer.update() if cnt_iter % cfg.VALID_ITER: continue # Validation for i in range(len(valid_loss)): valid_loss[i] = str(round(np.mean(valid_loss[i]), 2)) avg_loss = ', '.join(valid_loss) logger.info("") logger.info("Iter: %d-%d, Avg_loss: %s, LR (%f), Best (%d)" % (cnt_iter/cfg.VALID_ITER, cnt_iter, avg_loss, trainer.learning_rate, BEST_ITER)) valid_loss = [[] for i in range(cfg.GRAPH_LAYERS+3)] my_eval.clear('Valid') for indexes, masks, truth in valid_batch: dy.renew_cg() vectors = token_repre(indexes, False) vectors = encoder(vectors, np.array(masks['1D']).T) #vectors= encoder(vectors, vectors, vectors, np.array(masks['1D']).T) #vectors = encoder(vectors, vectors, vectors, np.array(masks['1D']).T, cfg.RNN_DROP) #vectors = encoder(vectors, None, cfg.RNN_DROP, cfg.RNN_DROP, np.array(masks['1D']).T, False, False) pred = decoder(vectors, masks, None, cnt_iter, False, True) my_eval.add_truth('Valid', truth) my_eval.add_pred('Valid', pred) dy.save(cfg.LAST_FILE, [token_repre, encoder, decoder]) if my_eval.evaluation('Valid', cfg.PRED_DEV, cfg.DEV): BEST_ITER = cnt_iter/cfg.VALID_ITER os.system('cp %s.data %s.data' % (cfg.LAST_FILE, cfg.BEST_FILE)) os.system('cp %s.meta %s.meta' % (cfg.LAST_FILE, cfg.BEST_FILE)) # Just record test result my_eval.clear('Test') for indexes, masks, truth in test_batch: dy.renew_cg() vectors = token_repre(indexes, False) vectors = encoder(vectors, np.array(masks['1D']).T) #vectors= encoder(vectors, vectors, vectors, np.array(masks['1D']).T) #vectors = encoder(vectors, vectors, vectors, np.array(masks['1D']).T, cfg.RNN_DROP) #vectors = encoder(vectors, None, cfg.RNN_DROP, cfg.RNN_DROP, np.array(masks['1D']).T, False, False) pred = decoder(vectors, masks, None, cnt_iter, False, True) my_eval.add_truth('Test', truth) my_eval.add_pred('Test', pred) my_eval.evaluation('Test', cfg.PRED_TEST, cfg.TEST) my_eval.print_best_result('Valid') # Final Test test_pc = dy.ParameterCollection() token_repre, encoder, decoder = dy.load(cfg.BEST_FILE, test_pc) my_eval.clear('Test') for indexes, masks, truth in test_batch: dy.renew_cg() vectors = token_repre(indexes, False) vectors = encoder(vectors, np.array(masks['1D']).T) #vectors= encoder(vectors, vectors, vectors, np.array(masks['1D']).T) #vectors = encoder(vectors, vectors, vectors, np.array(masks['1D']).T, cfg.RNN_DROP) #vectors = encoder(vectors, None, cfg.RNN_DROP, cfg.RNN_DROP, np.array(masks['1D']).T, False, False) pred = decoder(vectors, masks, None, 0, False, True) my_eval.add_truth('Test', truth) my_eval.add_pred('Test', pred) my_eval.evaluation('Test', cfg.PRED_TEST, cfg.TEST)
def main(): # Configuration file processing argparser = argparse.ArgumentParser() argparser.add_argument('--config_file', default='../configs/debug.cfg') argparser.add_argument('--continue_training', action='store_true', help='Load model Continue Training') argparser.add_argument('--name', default='experiment', help='The name of the experiment.') argparser.add_argument('--model', default='s2s', help='s2s: seq2seq-head-selection-model' 's2tBFS: seq2tree-BFS-decoder-model' 's2tDFS: seq2tree-DFS-decoder-model') argparser.add_argument('--gpu', default='0', help='GPU ID (-1 to cpu)') args, extra_args = argparser.parse_known_args() cfg = IniConfigurator(args.config_file, extra_args) # Logger setting logger = dual_channel_logger( __name__, file_path=cfg.LOG_FILE, file_model='w', formatter='%(asctime)s - %(levelname)s - %(message)s', time_formatter='%m-%d %H:%M') from eval.script_evaluator import ScriptEvaluator # DyNet setting os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu import dynet_config dynet_config.set(mem=cfg.DYNET_MEM, random_seed=cfg.DYNET_SEED) dynet_config.set_gpu() import dynet as dy from models.token_representation import TokenRepresentation from antu.nn.dynet.seq2seq_encoders import DeepBiRNNBuilder, orthonormal_VanillaLSTMBuilder from models.graph_nn_decoder import GraphNNDecoder # Build the dataset of the training process # Build data reader data_reader = PTBReader( field_list=['word', 'tag', 'head', 'rel'], root='0\t**root**\t_\t**rcpos**\t**rpos**\t_\t0\t**rrel**\t_\t_', spacer=r'[\t]', ) # Build vocabulary with pretrained glove vocabulary = Vocabulary() g_word, _ = glove_reader(cfg.GLOVE) pretrained_vocabs = {'glove': g_word} vocabulary.extend_from_pretrained_vocab(pretrained_vocabs) # Setup datasets datasets_settings = { 'train': DatasetSetting(cfg.TRAIN, True), 'dev': DatasetSetting(cfg.DEV, False), 'test': DatasetSetting(cfg.TEST, False), } datasets = PTBDataset(vocabulary, datasets_settings, data_reader) counters = {'word': Counter(), 'tag': Counter(), 'rel': Counter()} datasets.build_dataset(counters, no_pad_namespace={'rel'}, no_unk_namespace={'rel'}) # Build model # Parameter pc = dy.ParameterCollection() trainer = dy.AdamTrainer(pc, alpha=cfg.LR, beta_1=cfg.ADAM_BETA1, beta_2=cfg.ADAM_BETA2, eps=cfg.EPS) # Token Representation Layer token_repre = TokenRepresentation(pc, cfg, datasets.vocabulary) # BiLSTM Encoder Layer encoder = DeepBiRNNBuilder(pc, cfg.ENC_LAYERS, token_repre.token_dim, cfg.ENC_H_DIM, orthonormal_VanillaLSTMBuilder) # GNN Decoder Layer decoder = GraphNNDecoder(pc, cfg, datasets.vocabulary) # PTB Evaluator my_eval = ScriptEvaluator(['Valid', 'Test'], datasets.vocabulary) # Build Training Batch def cmp(ins): return len(ins['word']) train_batch = datasets.get_batches('train', cfg.TRAIN_BATCH_SIZE, True, cmp, True) valid_batch = list( datasets.get_batches('dev', cfg.TEST_BATCH_SIZE, False, cmp, False)) test_batch = list( datasets.get_batches('test', cfg.TEST_BATCH_SIZE, False, cmp, False)) # Train model BEST_DEV_LAS = BEST_DEV_UAS = BEST_ITER = cnt_iter = 0 valid_loss = [[] for i in range(cfg.GRAPH_LAYERS + 3)] logger.info("Experiment name: %s" % args.name) SHA = os.popen('git log -1 | head -n 1 | cut -c 8-13').readline().rstrip() logger.info('Git SHA: %s' % SHA) while cnt_iter < cfg.MAX_ITER: dy.renew_cg() cnt_iter += 1 indexes, masks, truth = train_batch.__next__() vectors = token_repre(indexes, True) vectors = encoder(vectors, None, cfg.RNN_DROP, cfg.RNN_DROP, np.array(masks['1D']).T, True) loss, part_loss = decoder(vectors, masks, truth, True, True) for i, l in enumerate([loss] + part_loss): valid_loss[i].append(l.value()) loss.backward() trainer.learning_rate = cfg.LR * cfg.LR_DECAY**(cnt_iter / cfg.LR_ANNEAL) trainer.update() if cnt_iter % cfg.VALID_ITER: continue # Validation for i in range(len(valid_loss)): valid_loss[i] = str(round(np.mean(valid_loss[i]), 2)) avg_loss = ', '.join(valid_loss) logger.info("") logger.info("Iter: %d-%d, Avg_loss: %s, LR (%f), Best (%d)" % (cnt_iter / cfg.VALID_ITER, cnt_iter, avg_loss, trainer.learning_rate, BEST_ITER)) valid_loss = [[] for i in range(cfg.GRAPH_LAYERS + 3)] my_eval.clear('Valid') for indexes, masks, truth in valid_batch: dy.renew_cg() vectors = token_repre(indexes, False) vectors = encoder(vectors, None, cfg.RNN_DROP, cfg.RNN_DROP, np.array(masks['1D']).T, False) pred = decoder(vectors, masks, None, False, True) my_eval.add_truth('Valid', truth) my_eval.add_pred('Valid', pred) dy.save(cfg.LAST_FILE, [token_repre, encoder, decoder]) if my_eval.evaluation('Valid', cfg.PRED_DEV, cfg.DEV): BEST_ITER = cnt_iter / cfg.VALID_ITER os.system('cp %s.data %s.data' % (cfg.LAST_FILE, cfg.BEST_FILE)) os.system('cp %s.meta %s.meta' % (cfg.LAST_FILE, cfg.BEST_FILE)) # Just record test result my_eval.clear('Test') for indexes, masks, truth in test_batch: dy.renew_cg() vectors = token_repre(indexes, False) vectors = encoder(vectors, None, cfg.RNN_DROP, cfg.RNN_DROP, np.array(masks['1D']).T, False) pred = decoder(vectors, masks, None, False, True) my_eval.add_truth('Test', truth) my_eval.add_pred('Test', pred) my_eval.evaluation('Test', cfg.PRED_TEST, cfg.TEST) my_eval.print_best_result('Valid') test_pc = dy.ParameterCollection() token_repre, encoder, decoder = dy.load(cfg.BEST_FILE, test_pc) my_eval.clear('Test') test_batch = datasets.get_batches('test', cfg.TEST_BATCH_SIZE, False, cmp, False) for indexes, masks, truth in test_batch: dy.renew_cg() vectors = token_repre(indexes, False) vectors = encoder(vectors, None, cfg.RNN_DROP, cfg.RNN_DROP, np.array(masks['1D']).T, False) pred = decoder(vectors, masks, None, False, True) my_eval.add_truth('Test', truth) my_eval.add_pred('Test', pred) my_eval.evaluation('Test', cfg.PRED_TEST, cfg.TEST)
def main(): # Configuration file processing argparser = argparse.ArgumentParser() argparser.add_argument('--config_file', default='../configs/debug.cfg') argparser.add_argument('--continue_training', action='store_true', help='Load model Continue Training') argparser.add_argument('--name', default='experiment', help='The name of the experiment.') argparser.add_argument('--model', default='s2s', help='s2s: seq2seq-head-selection-model' 's2tBFS: seq2tree-BFS-decoder-model' 's2tDFS: seq2tree-DFS-decoder-model') argparser.add_argument('--gpu', default='0', help='GPU ID (-1 to cpu)') args, extra_args = argparser.parse_known_args() cfg = IniConfigurator(args.config_file, extra_args) # Logger setting logger = dual_channel_logger( __name__, file_path=cfg.LOG_FILE, file_model='w', formatter='%(asctime)s - %(levelname)s - %(message)s', time_formatter='%m-%d %H:%M') from eval.script_evaluator import ScriptEvaluator # DyNet setting os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu import dynet_config dynet_config.set(mem=cfg.DYNET_MEM, random_seed=cfg.DYNET_SEED) dynet_config.set_gpu() import dynet as dy # Build the dataset of the training process # Build data reader data_reader = PTBReader( field_list=['word', 'tag', 'head', 'rel'], root='0\t**root**\t_\t**rcpos**\t**rpos**\t_\t0\t**rrel**\t_\t_', spacer=r'[\t]',) # Build vocabulary with pretrained glove vocabulary = Vocabulary() g_word, _ = glove_reader(cfg.GLOVE) pretrained_vocabs = {'glove': g_word} vocabulary.extend_from_pretrained_vocab(pretrained_vocabs) # Setup datasets datasets_settings = {'train': DatasetSetting(cfg.TRAIN, True), 'dev': DatasetSetting(cfg.DEV, False), 'test': DatasetSetting(cfg.TEST, False), } datasets = PTBDataset(vocabulary, datasets_settings, data_reader) counters = {'word': Counter(), 'tag': Counter(), 'rel': Counter()} datasets.build_dataset(counters, no_pad_namespace={'rel'}, no_unk_namespace={'rel'}) logger.info("Experiment name: %s" % args.name) SHA = os.popen('git log -1 | head -n 1 | cut -c 8-13').readline().rstrip() logger.info('Git SHA: %s' % SHA) # Build Test model test_pc = dy.ParameterCollection() token_repre, encoder, decoder = dy.load(cfg.BEST_FILE, test_pc) # PTB Evaluator my_eval = ScriptEvaluator(['Valid', 'Test'], datasets.vocabulary) my_eval.clear('Test') def cmp(ins): return len(ins['word']) test_batch = datasets.get_batches('test', cfg.TEST_BATCH_SIZE, False, cmp, False) for indexes, masks, truth in test_batch: dy.renew_cg() vectors = token_repre(indexes, False) vectors = encoder(vectors, None, cfg.RNN_DROP, cfg.RNN_DROP, False) pred = decoder(vectors, masks, None, False, True) my_eval.add_truth('Test', truth) my_eval.add_pred('Test', pred) my_eval.evaluation('Test', cfg.PRED_TEST, cfg.TEST)