def create_training_model(FLAGS,vocab_in, vocab_out = None): batcher_train = Batcher(FLAGS.data_path, vocab_in,vocab_out, FLAGS, data_file=FLAGS.train_name) train_model = SummarizationModel(FLAGS, vocab_in,vocab_out,batcher_train) logging.info("Building graph...") train_model.build_graph() # Create dev model # I can't deepCopy tf.flags, so I change flags into nametuple. # Find another way in the future FLAGS_eval = FLAGS._asdict() FLAGS_eval["mode"] = "eval" FLAGS_eval = config.generate_nametuple(FLAGS_eval) #variable_scope.get_variable_scope().reuse_variables() batcher_dev = Batcher(FLAGS.data_path, vocab_in,vocab_out, FLAGS, data_file=FLAGS.dev_name) dev_model = SummarizationModel(FLAGS_eval, vocab_in,vocab_out,batcher_dev) dev_model.build_graph() train_model.create_or_load_recent_model() return train_model,dev_model
def decode_Beam(FLAGS): # If in decode mode, set batch_size = beam_size # Reason: in decode mode, we decode one example at a time. # On each step, we have beam_size-many hypotheses in the beam, so we need to make a batch of these hypotheses. #if FLAGS.mode == 'decode': # FLAGS.batch_size = FLAGS.beam_size # If single_pass=True, check we're in decode mode #if FLAGS.single_pass and FLAGS.mode != 'decode': # raise Exception("The single_pass flag should only be True in decode mode") vocab_in, vocab_out = data.load_dict_data(FLAGS) FLAGS_batcher = config.retype_FLAGS() FLAGS_decode = FLAGS_batcher._asdict() FLAGS_decode["max_dec_steps"] = 1 FLAGS_decode["mode"] = "decode" FLAGS_decode = config.generate_nametuple(FLAGS_decode) # The model is configured with max_dec_steps=1 because we only ever run one step of the decoder at a time (to do beam search). Note that the batcher is initialized with max_dec_steps equal to e.g. 100 because the batches need to contain the full summaries batcher = Batcher(FLAGS.data_path, vocab_in,vocab_out, FLAGS_batcher, data_file=FLAGS.test_name) model = SummarizationModel(FLAGS_decode, vocab_in,vocab_out,batcher) decoder = BeamSearchDecoder(model, batcher, vocab_out) decoder.decode()
def create_decode_model(FLAGS, vocab_in,vocab_out): batcher = Batcher(FLAGS.data_path, vocab_in, vocab_out, FLAGS, data_file=FLAGS.qq_name) import eval FLAGS_decode = config.retype_FLAGS()._asdict() FLAGS_decode["max_dec_steps"] = 1 FLAGS_decode["mode"] = "decode" FLAGS_decode = config.generate_nametuple(FLAGS_decode) model = SummarizationModel(FLAGS_decode, vocab_in, vocab_out, batcher) #model.graph.as_default() decoder = eval.EvalDecoder(model, batcher, vocab_out) return decoder
def decode_multi(FLAGS): vocab_in, vocab_out = data.load_dict_data(FLAGS) batcher = Batcher(FLAGS.data_path, vocab_in, vocab_out, FLAGS, data_file=FLAGS.test_name,shuffle=False) import eval FLAGS_decode = config.retype_FLAGS()._asdict() FLAGS_decode["max_dec_steps"] = 1 FLAGS_decode = config.generate_nametuple(FLAGS_decode) model = SummarizationModel(FLAGS_decode, vocab_in, vocab_out, batcher) decoder = eval.EvalDecoder(model, batcher, vocab_out) time_start = time.time() decoder.pair_wise_decode() time_end = time.time() print(time_end - time_start)