# from model_seq2seq import Seq2seq tf_config = tf.ConfigProto(allow_soft_placement=True) tf_config.gpu_options.allow_growth = True if __name__ == "__main__": print("(1)load data......") args = load_arguments() # set the gpu_id os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_id) # set file dataset_file = os.path.join( os.path.abspath('.'), 'dataset', 'COVID-brief-Dialogue.txt' if args.breif == 0 else 'COVID-Dialogue.txt') docs_source, docs_target, _, _, _, _, _, _ = load_data(dataset_file, is_shuffle=False) w2i_source, i2w_source = make_vocab(docs_source) w2i_target, i2w_target = make_vocab(docs_target) print("(2) build model......") config = Config() config.source_vocab_size = len(w2i_source) config.target_vocab_size = len(w2i_target) model = Seq2seq(config=config, w2i_target=w2i_target, useTeacherForcing=False, useAttention=True, useBeamSearch=3) print("(3) run model......") # max_target_len = 20
from model_seq2seq_contrib import Seq2seq from train_seq2seq import load_data, load_test_data, make_vocab, make_target_vocab, get_batch from train_seq2seq import Config # from model_seq2seq import Seq2seq tf_config = tf.ConfigProto(allow_soft_placement=True) tf_config.gpu_options.allow_growth = True #model_path = "checkpoint/model.ckpt" model_path = "./checkpoint/" model = "checkpoint/model.ckpt.meta" if __name__ == "__main__": print("(1)load data......") docS, docT = load_test_data("") docs_source, docs_target = load_data("") w2i_source, i2w_source = make_vocab(docs_source) ## w2i_target, i2w_target = make_vocab(docs_target) w2i_target, i2w_target = make_target_vocab(docs_target) #print("i2w target:", i2w_target.keys()) #print("(2) build model......") config = Config() config.source_vocab_size = len(w2i_source) config.target_vocab_size = len(w2i_target) #model = Seq2seq(config=config, w2i_target=w2i_target, # useTeacherForcing=False, useAttention=True, useBeamSearch=3) print("(3) run model......") print_every = 100 max_target_len = 20