FLAGS = tf.flags.FLAGS FLAGS._parse_flags() print("\nParameters:") for attr, value in sorted(FLAGS.__flags.items()): print("{}={}".format(attr.upper(), value)) print("") vocab = data_helpers.load_vocab(FLAGS.vocab_file) print('vocabulary size: {}'.format(len(vocab))) charVocab = data_helpers.load_char_vocab(FLAGS.char_vocab_file) response_data = data_helpers.load_responses(FLAGS.response_file, vocab, FLAGS.max_response_len) print('response_data size: {}'.format(len(response_data))) test_dataset = data_helpers.load_dataset(FLAGS.test_file, vocab, FLAGS.max_utter_len, FLAGS.max_utter_num, response_data) print('test_pairs: {}'.format(len(test_dataset))) target_loss_weight = [1.0, 1.0] print("\nEvaluating...\n") checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir) print(checkpoint_file) graph = tf.Graph() with graph.as_default(): session_conf = tf.ConfigProto( allow_soft_placement=FLAGS.allow_soft_placement, log_device_placement=FLAGS.log_device_placement)
print("\nParameters:") for attr, value in sorted(FLAGS.__flags.items()): print("{}={}".format(attr.upper(), value)) print("") # Load data print("Loading data...") vocab = data_helpers.load_vocab(FLAGS.vocab_file) print('vocabulary size: {}'.format(len(vocab))) charVocab = data_helpers.load_char_vocab(FLAGS.char_vocab_file) print('charVocab size: {}'.format(len(charVocab))) train_dataset = data_helpers.load_dataset(FLAGS.train_file, vocab, FLAGS.max_utter_num, FLAGS.max_utter_len, FLAGS.max_response_len, FLAGS.max_persona_len) print('train dataset size: {}'.format(len(train_dataset))) valid_dataset = data_helpers.load_dataset(FLAGS.valid_file, vocab, FLAGS.max_utter_num, FLAGS.max_utter_len, FLAGS.max_response_len, FLAGS.max_persona_len) print('valid dataset size: {}'.format(len(valid_dataset))) with tf.Graph().as_default(): session_conf = tf.ConfigProto( allow_soft_placement=FLAGS.allow_soft_placement, log_device_placement=FLAGS.log_device_placement) sess = tf.Session(config=session_conf)
print("\nParameters:") # tf.app.flags.FLAGS.flag_values_dict() for attr in sorted(FLAGS.__flags.keys()): print("{}={}".format(attr.upper(), getattr(FLAGS, attr))) print("") # Load data print("Loading data...") vocab = data_helpers.load_vocab(FLAGS.vocab_file) print('vocabulary size: {}'.format(len(vocab))) charVocab = data_helpers.load_char_vocab(FLAGS.char_vocab_file) print('charVocab size: {}'.format(len(charVocab))) train_dataset = data_helpers.load_dataset(FLAGS.train_file, vocab, FLAGS.max_utter_num, FLAGS.max_utter_len) print('train_dataset: {}'.format(len(train_dataset))) valid_dataset = data_helpers.load_dataset(FLAGS.valid_file, vocab, FLAGS.max_utter_num, FLAGS.max_utter_len) print('valid_dataset: {}'.format(len(valid_dataset))) test_dataset = data_helpers.load_dataset(FLAGS.test_file, vocab, FLAGS.max_utter_num, FLAGS.max_utter_len) print('valid_dataset: {}'.format(len(valid_dataset))) target_loss_weight = [1.0, 1.0] # Set random seed to help reproduce result
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") FLAGS = tf.flags.FLAGS FLAGS._parse_flags() print("\nParameters:") for attr, value in sorted(FLAGS.__flags.items()): print("{}={}".format(attr.upper(), value)) print("") vocab = data_helpers.load_vocab(FLAGS.vocab_file) print('vocabulary size: {}'.format(len(vocab))) charVocab = data_helpers.load_char_vocab(FLAGS.char_vocab_file) print('charVocab size: {}'.format(len(charVocab))) test_dataset = data_helpers.load_dataset(FLAGS.task_name, FLAGS.test_file, vocab, FLAGS.max_utter_num, FLAGS.max_utter_len, FLAGS.max_response_len, FLAGS.max_persona_num, FLAGS.max_persona_len) print('test dataset size: {}'.format(len(test_dataset))) print("\nEvaluating...\n") checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir) print(checkpoint_file) graph = tf.Graph() with graph.as_default(): session_conf = tf.ConfigProto( allow_soft_placement=FLAGS.allow_soft_placement, log_device_placement=FLAGS.log_device_placement) sess = tf.Session(config=session_conf) with sess.as_default(): # Load the saved meta graph and restore variables
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") FLAGS = tf.flags.FLAGS FLAGS._parse_flags() print("\nParameters:") for attr, value in sorted(FLAGS.__flags.items()): print("{}={}".format(attr.upper(), value)) print("") # Load data print("Loading data...") vocab = data_helpers.load_vocab(FLAGS.vocab_file) print('vocabulary size: {}'.format(len(vocab))) response_data = data_helpers.load_responses(FLAGS.response_file, vocab, FLAGS.max_response_len) train_dataset = data_helpers.load_dataset(FLAGS.train_file, vocab, FLAGS.max_utter_len, FLAGS.max_utter_num, response_data) print('train_pairs: {}'.format(len(train_dataset))) valid_dataset = data_helpers.load_dataset(FLAGS.valid_file, vocab, FLAGS.max_utter_len, FLAGS.max_utter_num, response_data) # *varied-length* print('valid_pairs: {}'.format(len(valid_dataset))) test_dataset = data_helpers.load_dataset(FLAGS.test_file, vocab, FLAGS.max_utter_len, FLAGS.max_utter_num, response_data) print('test_pairs: {}'.format(len(test_dataset))) target_loss_weight=[1.0,1.0] with tf.Graph().as_default(): session_conf = tf.ConfigProto( allow_soft_placement=FLAGS.allow_soft_placement, log_device_placement=FLAGS.log_device_placement) sess = tf.Session(config=session_conf) with sess.as_default(): imn = IMN(