def run(args): reverse, fil, n_epoch, print_every, learning_rate, n_layers, hidden_size, batch_size, beam_size, input = args.reverse, args.filter, args.epoch, args.print, args.learning_rate, args.layer, args.hidden, args.batch_size, args.beam, args.input if args.train and not args.load: print("==" * 20, "train", "==" * 20) trainIters(args, args.train, reverse, n_epoch, learning_rate, batch_size, n_layers, hidden_size, print_every) elif args.load: print("==" * 20, "load", "==" * 20) n_layers, hidden_size = parseFilename(args.load) trainIters(args, args.train, reverse, n_epoch, learning_rate, batch_size, n_layers, hidden_size, print_every, loadFilename=args.load) # load parameters from filename elif args.test: print("==" * 20, "test", "==" * 20) n_layers, hidden_size = parseFilename(args.test, True) runTest(args, n_layers, hidden_size, reverse, args.test, beam_size, batch_size, input, args.corpus)
def run(args): learning_rate, lr_decay_epoch, lr_decay_ratio, n_layers, hidden_size, embed_size, \ attr_size, attr_num, batch_size, beam_size, overall, max_length, min_length, save_dir = \ args.learning_rate, args.lr_decay_epoch, args.lr_decay_ratio, args.layer, args.hidden_size, args.embed_size, \ args.attr_size, args.attr_num, args.batch_size, args.beam_size, args.overall, args.max_length, args.min_length, args.save_dir if args.train and not args.load: trainIters(args.train, learning_rate, lr_decay_epoch, lr_decay_ratio, batch_size, n_layers, hidden_size, embed_size, attr_size, attr_num, overall, save_dir) elif args.load: n_layers, hidden_size = parseFilename(args.load) trainIters(args.train, learning_rate, lr_decay_epoch, lr_decay_ratio, batch_size, n_layers, hidden_size, embed_size, attr_size, attr_num, overall, save_dir, loadFilename=args.load) elif args.test: n_layers, hidden_size = parseFilename(args.review_model) runTest(args.test, n_layers, hidden_size, embed_size, attr_size, attr_num, overall, args.review_model, args.sketch_model, args.topic_model, beam_size, max_length, min_length, save_dir)
def run(args): reverse, fil, n_iteration, print_every, save_every, learning_rate, \ n_layers, hidden_size, batch_size, beam_size, inp, dropout = \ args.reverse, args.filter, args.iteration, args.print, args.save, args.learning_rate, \ args.layer, args.hidden, args.batch_size, args.beam, args.input, args.dropout if args.train and not args.load: trainIters(args.train, reverse, n_iteration, learning_rate, batch_size, n_layers, hidden_size, print_every, save_every, dropout) elif args.load: n_layers, hidden_size, reverse = parseFilename(args.load) trainIters(args.train, reverse, n_iteration, learning_rate, batch_size, n_layers, hidden_size, print_every, save_every, dropout, loadFilename=args.load) elif args.test: n_layers, hidden_size, reverse = parseFilename(args.test, True) runTest(n_layers, hidden_size, reverse, args.test, beam_size, inp, args.corpus)
def run(args): tab_printer(args) learning_rate, lr_decay_epoch, lr_decay_ratio, weight_decay, embed_size, hidden_size, \ node_size, capsule_size, gcn_layers, gcn_filters, rnn_layers, capsule_num, batch_size, epochs = \ args.learning_rate, args.lr_decay_epoch, args.lr_decay_ratio, args.weight_decay, args.embed_size, \ args.hidden_size, args.node_size, args.capsule_size, args.gcn_layers, args.gcn_filters, \ args.rnn_layers, args.capsule_num, args.batch_size, args.epochs if args.train: trainIters(args.train, learning_rate, lr_decay_epoch, lr_decay_ratio, weight_decay, batch_size, rnn_layers, hidden_size, embed_size, node_size, epochs, args.save_dir) elif args.load: trainIters(args.load, learning_rate, lr_decay_epoch, lr_decay_ratio, weight_decay, batch_size, rnn_layers, hidden_size, embed_size, node_size, epochs, args.save_dir, args.load_file) elif args.test: runTest(args.test, rnn_layers, hidden_size, embed_size, node_size, capsule_size, gcn_layers, gcn_filters, capsule_num, args.aspect_model, args.review_model, args.beam_size, args.max_length, args.min_length, args.save_dir) else: print("mode error!")
def run(args): reverse, fil, n_iteration, print_every, save_every, learning_rate, n_layers, hidden_size, batch_size, beam_size, inp = \ args.reverse, args.filter, args.iteration, args.print, args.save, args.learning_rate, \ args.layer, args.hidden, args.batch_size, args.beam, args.input if args.train and not args.load: trainIters(args.train, reverse, n_iteration, learning_rate, batch_size, n_layers, hidden_size, print_every, save_every) elif args.load: n_layers, hidden_size, reverse = parseFilename(args.load) trainIters(args.train, reverse, n_iteration, learning_rate, batch_size, n_layers, hidden_size, print_every, save_every, loadFilename=args.load) elif args.test: n_layers, hidden_size, reverse = parseFilename(args.test, True) runTest(n_layers, hidden_size, reverse, args.test, beam_size, inp, args.corpus)
def run(args): reverse, fil, n_iteration, print_every, save_every, learning_rate, n_layers, hidden_size, batch_size, beam_size, input = \ args.reverse, args.filter, args.iteration, args.print, args.save, args.learning_rate, \ args.layer, args.hidden, args.batch_size, args.beam, args.input if args.train and not args.load: trainIters(args.train, args.corpus_index, args.strip, args.pretrained_model, reverse, n_iteration, learning_rate, batch_size, n_layers, hidden_size, print_every, save_every) elif args.load: n_layers, hidden_size, reverse = parseFilename(args.load) trainIters(args.train, args.corpus_index, args.strip, args.pretrained_model, reverse, n_iteration, learning_rate, batch_size, n_layers, hidden_size, print_every, save_every, loadFilename=args.load) elif args.test: n_layers, hidden_size, reverse = parseFilename(args.test, True) runTest(n_layers, args.pretrained_model, hidden_size, reverse, args.test, beam_size, input, args.corpus, args.diff_corpus) elif args.loss: loss_graph(args.loss, args.corpus, hidden_size)
def run(args): reverse, fil, n_iteration, print_every, save_every, learning_rate, n_layers, hidden_size, batch_size, beam_size, input = \ args.reverse, args.filter, args.iteration, args.print, args.save, args.learning_rate, \ args.layer, args.hidden, args.batch_size, args.beam, args.input if args.train and not args.load: print("Train new model ... ") trainIters(args.train, reverse, n_iteration, learning_rate, batch_size, n_layers, hidden_size, print_every, save_every) elif args.load: print("Load existing model ... ") reverse = parseFilename(args.load) trainIters(args.train, reverse, n_iteration, learning_rate, batch_size, n_layers, hidden_size, print_every, save_every, loadFilename=args.load) elif args.test: print("Testing ... ") n_layers, hidden_size, reverse = parseFilename(args.test, True) runTest(n_layers, hidden_size, reverse, args.test, beam_size, input, args.corpus)
def run(args): learning_rate, loadFilename, datafile, decoder_n_layers,\ encoder_n_layers, hidden_size, dropout, attn_model, \ n_iteration, batch_size, save_every, print_every, \ decoder_learning_ratio, clip, beam_size, inp = args.learning_rate, args.model_file, args.data_file, args.layers, \ args.layers, args.hidden_size, args.dropout, args.attn_model, args.iteration, \ args.batch_size, args.save_every, args.print_every, \ args.decoder_learning_ratio, args.clip, args.beam_size, args.input if args.test: if loadFilename: print("Starting testing model!") runTest(decoder_n_layers, hidden_size, False, loadFilename, beam_size, inp, datafile) else: raise RuntimeError("Please assign modelFile to load") elif args.train: print("Starting Training model!") trainIters(attn_model=attn_model, hidden_size=hidden_size, encoder_n_layers=encoder_n_layers, \ decoder_n_layers=decoder_n_layers, save_dir=save_dir, n_iteration=n_iteration, batch_size=batch_size, \ learning_rate=learning_rate, decoder_learning_ratio=decoder_learning_ratio, print_every=print_every, \ save_every=save_every, clip=clip, dropout=dropout, corpus_name=corpus_name, datafile=datafile, \ modelFile=loadFilename) else: raise RuntimeError("Please specify a running mode between train and test")
# test import torch from evaluate import runTest if __name__ == '__main__': #load path modelFile = "./model_param/4_4_512/30000_seq2seq_bidir_model_test.tar" corpus_name = "test_data" # The test parameters are consistent with the training model n_layers = 4 input_size = 300 hidden_size = 512 beam_size = 4 runTest(n_layers, input_size, hidden_size, modelFile, beam_size, corpus_name)
# 测试对话 from evaluate import runTest if __name__ == __main__(): # 模型路径 modelFile = "model_path" # 参数与训练模型保持一致 n_layers = 4 hidden_size = 512 beam_size = 4 #beam search大小 # 语料路径 corpus = "corpus_path" # 另外需要注意测试时的模型结构需和训练时一致,比如使用注意力,双向LSTM runTest(n_layers, hidden_size, modelFile, beam_size, corpus)