def __init__(self): tf.logging.set_verbosity(tf.logging.INFO) usr_dir.import_usr_dir(FLAGS.t2t_usr_dir) trainer_utils.log_registry() #trainer_utils.validate_flags() self.data_dir = os.path.expanduser(FLAGS.data_dir) self.output_dir = os.path.expanduser(FLAGS.output_dir) self.hparams = trainer_utils.create_hparams( FLAGS.hparams_set, self.data_dir, passed_hparams=FLAGS.hparams) trainer_utils.add_problem_hparams(self.hparams, FLAGS.problems) self.estimator, _ = trainer_utils.create_experiment_components( data_dir=self.data_dir, model_name=FLAGS.model, hparams=self.hparams, run_config=trainer_utils.create_run_config(self.output_dir)) self.decode_hp = decoding.decode_hparams(FLAGS.decode_hparams) self.decode_hp.add_hparam("shards", FLAGS.decode_shards) self.decode_hp.add_hparam("shard_id", FLAGS.worker_id) output_sentence = decoding.decode_from_file( self.estimator, FLAGS.decode_from_file, self.decode_hp, FLAGS.decode_to_file, input_sentence=FLAGS.input_sentence)
def main(unused_argv): tf.logging.set_verbosity(tf.logging.INFO) tf.set_random_seed(123) assert len(FLAGS.problems.split("-")) == 1 hparams = trainer_utils.create_hparams( FLAGS.hparams_set, FLAGS.data_dir, passed_hparams=FLAGS.hparams) trainer_utils.add_problem_hparams(hparams, FLAGS.problems) problem = hparams.problem_instances[0] model_fn = lib.get_model_fn(FLAGS.model, hparams) input_fn = lib.get_input_fn(FLAGS.data_dir, problem, hparams) estimator = lib.make_estimator( model_fn=model_fn, output_dir=FLAGS.output_dir, master=FLAGS.master, num_shards=FLAGS.tpu_num_shards, batch_size=hparams.batch_size_per_shard * FLAGS.tpu_num_shards, log_device_placement=FLAGS.log_device_placement) estimator.train( lambda params: input_fn(tf.estimator.ModeKeys.TRAIN, params), steps=FLAGS.train_steps) estimator.evaluate( lambda params: input_fn(tf.estimator.ModeKeys.EVAL, params), steps=FLAGS.eval_steps)
def create_experiment(run_config, hparams, model_name, problem_name, data_dir, train_steps, eval_steps, min_eval_frequency, use_tpu=True): """Create Experiment.""" # HParams hparams.add_hparam("data_dir", data_dir) trainer_utils.add_problem_hparams(hparams, problem_name) # Estimator estimator = create_estimator(model_name, hparams, run_config, use_tpu=use_tpu) # Input fns from Problem problem = hparams.problem_instances[0] train_input_fn = problem.make_estimator_input_fn( tf.estimator.ModeKeys.TRAIN, hparams) eval_input_fn = problem.make_estimator_input_fn( tf.estimator.ModeKeys.EVAL, hparams) # Experiment return tf.contrib.learn.Experiment( estimator=estimator, train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, train_steps=train_steps, eval_steps=eval_steps, min_eval_frequency=min_eval_frequency, train_steps_per_iteration=min_eval_frequency)
def main(_): tf.logging.set_verbosity(tf.logging.INFO) usr_dir.import_usr_dir(FLAGS.t2t_usr_dir) trainer_utils.log_registry() trainer_utils.validate_flags() assert FLAGS.schedule == "train_and_evaluate" data_dir = os.path.expanduser(FLAGS.data_dir) output_dir = os.path.expanduser(FLAGS.output_dir) hparams = trainer_utils.create_hparams(FLAGS.hparams_set, data_dir, passed_hparams=FLAGS.hparams) trainer_utils.add_problem_hparams(hparams, FLAGS.problems) estimator, _ = trainer_utils.create_experiment_components( data_dir=data_dir, model_name=FLAGS.model, hparams=hparams, run_config=trainer_utils.create_run_config(output_dir)) decode_hp = decoding.decode_hparams(FLAGS.decode_hparams) decode_hp.add_hparam("shards", FLAGS.decode_shards) decode_hp.add_hparam("shard_id", FLAGS.worker_id) if FLAGS.decode_interactive: decoding.decode_interactively(estimator, decode_hp) elif FLAGS.decode_from_file: decoding.decode_from_file(estimator, FLAGS.decode_from_file, decode_hp, FLAGS.decode_to_file) else: decoding.decode_from_dataset( estimator, FLAGS.problems.split("-"), decode_hp, decode_to_file=FLAGS.decode_to_file, dataset_split="test" if FLAGS.eval_use_test_set else None)
def testSmoke(self): data_dir = trainer_utils_test.TrainerUtilsTest.data_dir problem_name = "tiny_algo" model_name = "transformer" hparams_set = "transformer_tpu" hparams = trainer_utils.create_hparams(hparams_set, data_dir) trainer_utils.add_problem_hparams(hparams, problem_name) problem = hparams.problem_instances[0] model_fn = lib.get_model_fn(model_name, hparams, use_tpu=False) input_fn = lib.get_input_fn(data_dir, problem, hparams) params = {"batch_size": 16} config = tf.contrib.tpu.RunConfig(tpu_config=tf.contrib.tpu.TPUConfig( num_shards=2)) features, targets = input_fn(tf.estimator.ModeKeys.TRAIN, params) with tf.variable_scope("training"): spec = model_fn(features, targets, tf.estimator.ModeKeys.TRAIN, params, config) self.assertTrue(spec.loss is not None) self.assertTrue(spec.train_op is not None) with tf.variable_scope("eval"): spec = model_fn(features, targets, tf.estimator.ModeKeys.EVAL, params, config) self.assertTrue(spec.eval_metrics is not None)
def create_experiment(run_config, hparams, model_name, problem_name, data_dir, train_steps, eval_steps, min_eval_frequency, use_tpu=True): """Create Experiment.""" hparams.add_hparam("data_dir", data_dir) trainer_utils.add_problem_hparams(hparams, problem_name) batch_size = (hparams.tpu_batch_size_per_shard * run_config.tpu_config.num_shards) model_fn = get_model_fn(model_name, hparams, use_tpu=use_tpu) estimator = create_estimator(model_fn, run_config, batch_size, use_tpu=use_tpu) train_input_fn = get_input_fn(tf.estimator.ModeKeys.TRAIN, hparams) eval_input_fn = get_input_fn(tf.estimator.ModeKeys.EVAL, hparams) return tf.contrib.learn.Experiment( estimator=estimator, train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, train_steps=train_steps, eval_steps=eval_steps, min_eval_frequency=min_eval_frequency, train_steps_per_iteration=min_eval_frequency)
def testSingleEvalStepRawSession(self): """Illustrate how to run a T2T model in a raw session.""" # Set model name, hparams, problems as would be set on command line. model_name = "transformer" FLAGS.hparams_set = "transformer_test" FLAGS.problems = "tiny_algo" data_dir = "/tmp" # Used only when a vocab file or such like is needed. # Create the problem object, hparams, placeholders, features dict. encoders = registry.problem(FLAGS.problems).feature_encoders(data_dir) hparams = trainer_utils.create_hparams(FLAGS.hparams_set, data_dir) trainer_utils.add_problem_hparams(hparams, FLAGS.problems) inputs_ph = tf.placeholder(dtype=tf.int32) # Just length dimension. batch_inputs = tf.reshape(inputs_ph, [1, -1, 1, 1]) # Make it 4D. # In INFER mode targets can be None. targets_ph = tf.placeholder(dtype=tf.int32) # Just length dimension. batch_targets = tf.reshape(targets_ph, [1, -1, 1, 1]) # Make it 4D. features = { "inputs": batch_inputs, "targets": batch_targets, "problem_choice": tf.constant(0), # We run on the first problem here. "input_space_id": tf.constant(hparams.problems[0].input_space_id), "target_space_id": tf.constant(hparams.problems[0].target_space_id) } # Now set a mode and create the graph by invoking model_fn. mode = tf.estimator.ModeKeys.EVAL estimator_spec = model_builder.model_fn(model_name, features, mode, hparams, problem_names=[FLAGS.problems]) predictions_dict = estimator_spec.predictions predictions = tf.squeeze( # These are not images, axis=2,3 are not needed. predictions_dict["predictions"], axis=[2, 3]) # Having the graph, let's run it on some data. with self.test_session() as sess: sess.run(tf.global_variables_initializer()) inputs = "0 1 0" targets = "0 1 0" # Encode from raw string to numpy input array using problem encoders. inputs_numpy = encoders["inputs"].encode(inputs) targets_numpy = encoders["targets"].encode(targets) # Feed the encoded inputs and targets and run session. feed = {inputs_ph: inputs_numpy, targets_ph: targets_numpy} np_predictions = sess.run(predictions, feed) # Check that the result has the correct shape: batch x length x vocab_size # where, for us, batch = 1, length = 3, vocab_size = 4. self.assertEqual(np_predictions.shape, (1, 3, 4))
def testSingleStep(self): model_name = "transformer" data_dir = TrainerUtilsTest.data_dir hparams = trainer_utils.create_hparams("transformer_test", data_dir) trainer_utils.add_problem_hparams(hparams, FLAGS.problems) exp = trainer_utils.create_experiment( data_dir=data_dir, model_name=model_name, train_steps=1, eval_steps=1, hparams=hparams, run_config=trainer_utils.create_run_config( output_dir=tf.test.get_temp_dir())) exp.test()
def testSingleTrainStepCall(self): """Illustrate how to run a T2T model in a raw session.""" # Set model name, hparams, problems as would be set on command line. model_name = "transformer" FLAGS.hparams_set = "transformer_test" FLAGS.problems = "tiny_algo" data_dir = "/tmp" # Used only when a vocab file or such like is needed. # Create the problem object, hparams, placeholders, features dict. encoders = registry.problem(FLAGS.problems).feature_encoders(data_dir) hparams = trainer_utils.create_hparams(FLAGS.hparams_set, data_dir) trainer_utils.add_problem_hparams(hparams, FLAGS.problems) # Now set a mode and create the model. mode = tf.estimator.ModeKeys.TRAIN model = registry.model(model_name)(hparams, mode) # Create placeholder for features and make them batch-sized. inputs_ph = tf.placeholder(dtype=tf.int32) # Just length dimension. batch_inputs = tf.reshape(inputs_ph, [1, -1, 1, 1]) # Make it 4D. targets_ph = tf.placeholder(dtype=tf.int32) # Just length dimension. batch_targets = tf.reshape(targets_ph, [1, -1, 1, 1]) # Make it 4D. features = { "inputs": batch_inputs, "targets": batch_targets, "target_space_id": tf.constant(hparams.problems[0].target_space_id) } # Call the model. predictions, _ = model(features) nvars = len(tf.trainable_variables()) model(features) # Call again and check that reuse works. self.assertEqual(nvars, len(tf.trainable_variables())) # Having the graph, let's run it on some data. with self.test_session() as sess: sess.run(tf.global_variables_initializer()) inputs = "0 1 0" targets = "0 1 0" # Encode from raw string to numpy input array using problem encoders. inputs_numpy = encoders["inputs"].encode(inputs) targets_numpy = encoders["targets"].encode(targets) # Feed the encoded inputs and targets and run session. feed = {inputs_ph: inputs_numpy, targets_ph: targets_numpy} np_predictions = sess.run(predictions, feed) # Check that the result has the correct shape: batch x length x vocab_size # where, for us, batch = 1, length = 3, vocab_size = 4. self.assertEqual(np_predictions.shape, (1, 3, 1, 1, 4))
def testSingleEvalStepRawSession(self): """Illustrate how to run a T2T model in a raw session.""" # Set model name, hparams, problems as would be set on command line. model_name = "transformer" FLAGS.hparams_set = "transformer_test" FLAGS.problems = "tiny_algo" data_dir = "/tmp" # Used only when a vocab file or such like is needed. # Create the problem object, hparams, placeholders, features dict. encoders = registry.problem(FLAGS.problems).feature_encoders(data_dir) hparams = trainer_utils.create_hparams(FLAGS.hparams_set, data_dir) trainer_utils.add_problem_hparams(hparams, FLAGS.problems) inputs_ph = tf.placeholder(dtype=tf.int32) # Just length dimension. batch_inputs = tf.reshape(inputs_ph, [1, -1, 1, 1]) # Make it 4D. # In INFER mode targets can be None. targets_ph = tf.placeholder(dtype=tf.int32) # Just length dimension. batch_targets = tf.reshape(targets_ph, [1, -1, 1, 1]) # Make it 4D. features = { "inputs": batch_inputs, "targets": batch_targets, "problem_choice": tf.constant(0), # We run on the first problem here. "input_space_id": tf.constant(hparams.problems[0].input_space_id), "target_space_id": tf.constant(hparams.problems[0].target_space_id) } # Now set a mode and create the graph by invoking model_fn. mode = tf.estimator.ModeKeys.EVAL estimator_spec = model_builder.model_fn( model_name, features, mode, hparams, problem_names=[FLAGS.problems]) predictions_dict = estimator_spec.predictions predictions = tf.squeeze( # These are not images, axis=2,3 are not needed. predictions_dict["predictions"], axis=[2, 3]) # Having the graph, let's run it on some data. with self.test_session() as sess: sess.run(tf.global_variables_initializer()) inputs = "0 1 0" targets = "0 1 0" # Encode from raw string to numpy input array using problem encoders. inputs_numpy = encoders["inputs"].encode(inputs) targets_numpy = encoders["targets"].encode(targets) # Feed the encoded inputs and targets and run session. feed = {inputs_ph: inputs_numpy, targets_ph: targets_numpy} np_predictions = sess.run(predictions, feed) # Check that the result has the correct shape: batch x length x vocab_size # where, for us, batch = 1, length = 3, vocab_size = 4. self.assertEqual(np_predictions.shape, (1, 3, 4))
def main(unused_argv): tf.logging.set_verbosity(tf.logging.INFO) tf.set_random_seed(123) assert len(FLAGS.problems.split("-")) == 1 hparams = trainer_utils.create_hparams(FLAGS.hparams_set, FLAGS.data_dir, passed_hparams=FLAGS.hparams) trainer_utils.add_problem_hparams(hparams, FLAGS.problems) problem = hparams.problem_instances[0] model_fn = lib.get_model_fn(FLAGS.model, hparams) input_fn = lib.get_input_fn(FLAGS.data_dir, problem, hparams) estimator = lib.make_estimator( model_fn=model_fn, output_dir=FLAGS.output_dir, master=FLAGS.master, num_shards=FLAGS.tpu_num_shards, batch_size=hparams.tpu_batch_size_per_shard * FLAGS.tpu_num_shards, log_device_placement=FLAGS.log_device_placement, iterations_per_loop=FLAGS.iterations_per_loop) if not FLAGS.train_steps: assert FLAGS.eval_steps estimator.evaluate( lambda params: input_fn(tf.estimator.ModeKeys.EVAL, params), steps=FLAGS.eval_steps) return num_rounds = FLAGS.train_steps // FLAGS.local_eval_frequency steps_per_round = [FLAGS.local_eval_frequency] * num_rounds remainder = FLAGS.train_steps % FLAGS.local_eval_frequency if remainder: steps_per_round.append(remainder) for num_steps in steps_per_round: estimator.train( lambda params: input_fn(tf.estimator.ModeKeys.TRAIN, params), steps=num_steps) if FLAGS.eval_steps: estimator.evaluate( lambda params: input_fn(tf.estimator.ModeKeys.EVAL, params), steps=FLAGS.eval_steps) tf.logging.info("Training and evaluation complete.")
def __init__(self, str_tokens, eval_tokens=None, batch_size=1000): """ Args: batch_size: used for encoding str_tokens: the original token inputs, as the format of ['t1', 't2'...]. The items within should be strings eval_tokens: if not None, then should be the same length as tokens, for similarity comparisons. """ assert type(str_tokens) is list assert len(str_tokens) > 0 assert type(str_tokens[0]) is str self.str_tokens = str_tokens if eval_tokens is not None: assert (len(eval_tokens) == len(str_tokens) and type(eval_tokens[0]) is str) self.eval_tokens = eval_tokens tf.logging.set_verbosity(tf.logging.INFO) tf.logging.info('tf logging set to INFO by: %s' % self.__class__.__name__) usr_dir.import_usr_dir(FLAGS.t2t_usr_dir) trainer_utils.log_registry() trainer_utils.validate_flags() assert FLAGS.schedule == "train_and_evaluate" data_dir = os.path.expanduser(FLAGS.data_dir) out_dir = os.path.expanduser(FLAGS.output_dir) hparams = trainer_utils.create_hparams(FLAGS.hparams_set, data_dir, passed_hparams=FLAGS.hparams) trainer_utils.add_problem_hparams(hparams, FLAGS.problems) # print(hparams) hparams.eval_use_test_set = True self.estimator, _ = trainer_utils.create_experiment_components( data_dir=data_dir, model_name=FLAGS.model, hparams=hparams, run_config=trainer_utils.create_run_config(out_dir)) decode_hp = decoding.decode_hparams(FLAGS.decode_hparams) decode_hp.add_hparam("shards", FLAGS.decode_shards) decode_hp.batch_size = batch_size self.decode_hp = decode_hp self.arr_results = None self._encoding_len = 1
def _init_env(self): tf.logging.info("Import usr dir from %s",self._usr_dir) if self._usr_dir != None: usr_dir.import_usr_dir(self._usr_dir) tf.logging.info("Start to create hparams,for %s of %s",self._problem,self._hparams_set) self._hparams = trainer_utils.create_hparams(self._hparams_set,self._data_dir) trainer_utils.add_problem_hparams(self._hparams, self._problem) tf.logging.info("build the model_fn of %s of %s",self._model_name,self._hparams) #self._model_fn = model_builder.build_model_fn(self._model_name,self._hparams) #self._model_fn = model_builder.build_model_fn(self._model_name) self._inputs_ph = tf.placeholder(dtype=tf.int32)# shape not specified,any shape batch_inputs = tf.reshape(self._inputs_ph,[self._batch_size,-1,1,1]) #batch_inputs = tf.reshape(self._inputs_ph, [-1, -1, 1, 1]) targets_ph = tf.placeholder(dtype=tf.int32) batch_targets = tf.reshape(targets_ph,[1,-1,1,1]) features = {"inputs": batch_inputs, "problem_choice": 0, # We run on the first problem here. "input_space_id": self._hparams.problems[0].input_space_id, "target_space_id": self._hparams.problems[0].target_space_id} mode = tf.estimator.ModeKeys.PREDICT estimator_spec = model_builder.model_fn(self._model_name,features, mode,self._hparams, problem_names=[self._problem],decode_hparams=self._hparams_dc) predictions_dict=estimator_spec.predictions self._predictions = predictions_dict["outputs"] #self._scores=predictions_dict['scores'] not return when greedy search tf.logging.info("Start to init tf session") if self._isGpu: print('Using GPU in Decoder') gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=self._fraction) self._sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,log_device_placement=False,gpu_options=gpu_options)) else: print('Using CPU in Decoder') gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0) config = tf.ConfigProto(gpu_options=gpu_options) config.allow_soft_placement=True config.log_device_placement=False self._sess = tf.Session(config=config) with self._sess.as_default(): ckpt = saver_mod.get_checkpoint_state(self._model_dir) saver = tf.train.Saver() tf.logging.info("Start to restore the parameters from %s",ckpt.model_checkpoint_path) saver.restore(self._sess,ckpt.model_checkpoint_path) tf.logging.info("Finish intialize environment")
def create_experiment(run_config, hparams, model_name, problem_name, data_dir, train_steps, eval_steps, min_eval_frequency, use_tpu=True): """Create Experiment.""" # HParams hparams.add_hparam("data_dir", data_dir) trainer_utils.add_problem_hparams(hparams, problem_name) # Estimator estimator = create_estimator(model_name, hparams, run_config, use_tpu=use_tpu) # Input fns from Problem problem = hparams.problem_instances[0] train_input_fn = problem.make_estimator_input_fn( tf.estimator.ModeKeys.TRAIN, hparams) eval_input_fn = problem.make_estimator_input_fn(tf.estimator.ModeKeys.EVAL, hparams) # Experiment return tf.contrib.learn.Experiment( estimator=estimator, train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, train_steps=train_steps, eval_steps=eval_steps, min_eval_frequency=min_eval_frequency, train_steps_per_iteration=min_eval_frequency)
flags = tf.flags FLAGS = flags.FLAGS flags.DEFINE_string("t2t_usr_dir", "", "Path to a Python module that will be imported. The ") flags.DEFINE_string("output_dir", "", "Base output directory for run.") usr_dir.import_usr_dir(FLAGS.t2t_usr_dir) FLAGS.schedule = 'train_and_evaluate' hparams = utils.create_hparams(FLAGS.hparams_set, FLAGS.data_dir) # SET EXTRA HYPER PARAMS HERE! #hparams.null_slot = True utils.add_problem_hparams(hparams, FLAGS.problems) num_datashards = utils.devices.data_parallelism().n mode = tf.estimator.ModeKeys.EVAL input_fn = utils.input_fn_builder.build_input_fn( mode=mode, hparams=hparams, data_dir=FLAGS.data_dir, num_datashards=num_datashards, worker_replicas=FLAGS.worker_replicas, worker_id=FLAGS.worker_id, batch_size=32) inputs, target = input_fn()
def main(_): # Set the logging level. tf.logging.set_verbosity(tf.logging.INFO) # Import module at usr_dir, if provided. if FLAGS.t2t_usr_dir is not None: usr_dir.import_usr_dir(FLAGS.t2t_usr_dir) # Get inputs (list formatted) from file. assert FLAGS.srcFile is not None assert FLAGS.firstPFile is not None assert FLAGS.tgtFile is not None [sorted_inputs, sorted_firstP, sorted_targets], sorted_keys = \ get_sorted_inputs(FLAGS.srcFile, FLAGS.firstPFile, FLAGS.tgtFile) num_decode_batches = (len(sorted_inputs) - 1) // FLAGS.eval_batch + 1 assert len(sorted_inputs) == len(sorted_firstP) == len(sorted_targets) tf.logging.info("Writing decodes into %s" % FLAGS.scoreFile) outfile = tf.gfile.Open(FLAGS.scoreFile, "w") # Generate hyper-parameters. hparams = utils.create_hparams(FLAGS.hparams_set, FLAGS.data_dir, passed_hparams=FLAGS.hparams) utils.add_problem_hparams(hparams, FLAGS.problems) # Create input function. num_datashards = utils.devices.data_parallelism().n mode = tf.estimator.ModeKeys.EVAL input_fn = utils.input_fn_builder.build_input_fn(mode, hparams, data_dir=FLAGS.data_dir, num_datashards=num_datashards, worker_replicas=FLAGS.worker_replicas, worker_id=FLAGS.worker_id, batch_size=FLAGS.eval_batch) # Get wrappers for feeding datas into models. inputs, target = input_fn() features = inputs features['targets'] = target inputs_vocab = hparams.problems[0].vocabulary["inputs"] targets_vocab = hparams.problems[0].vocabulary["targets"] feed_iters = input_iter(0, num_decode_batches, sorted_inputs, sorted_firstP, sorted_targets, inputs_vocab, targets_vocab) model_fn = utils.model_builder.build_model_fn(FLAGS.model, problem_names=[FLAGS.problems], train_steps=FLAGS.train_steps, worker_id=FLAGS.worker_id, worker_replicas=FLAGS.worker_replicas, eval_run_autoregressive=FLAGS.eval_run_autoregressive, decode_hparams=decoding.decode_hparams(FLAGS.decode_hparams)) est_spec = model_fn(features, target, mode, hparams) score, _ = metrics.padded_neg_log_perplexity(est_spec.predictions['predictions'], target) score = tf.reduce_sum(score, axis=[1,2,3]) # Create session. sv = tf.train.Supervisor(logdir=FLAGS.output_dir, global_step=tf.Variable(0, dtype=tf.int64, trainable=False, name='global_step')) sess = sv.PrepareSession(config=tf.ConfigProto(allow_soft_placement=True)) sv.StartQueueRunners(sess, tf.get_default_graph().get_collection(tf.GraphKeys.QUEUE_RUNNERS)) sumt = 0 scores_list = [] # Loop for batched translation. for i, features in enumerate(feed_iters): t = time.time() inputs_ = features["inputs"] firstP_ = features["firstP"] targets_ = features["targets"] while inputs_.ndim < 4: inputs_ = np.expand_dims(inputs_, axis=-1) while firstP_.ndim < 4: firstP_ = np.expand_dims(firstP_, axis=-1) while targets_.ndim < 4: targets_ = np.expand_dims(targets_, axis=-1) scores = sess.run(score, feed_dict={inputs['inputs']: inputs_, inputs["firstP"]: firstP_, target: targets_}) scores_list.extend(scores.tolist()) dt = time.time() - t sumt += dt avgt = sumt / (i+1) needt = (num_decode_batches - i+1) * avgt print("Batch %d/%d worktime=(%s), lefttime=(%s)" % (i+1, num_decode_batches, time.strftime('%H:%M:%S',time.gmtime(sumt)),time.strftime('%H:%M:%S',time.gmtime(needt)))) scores_list.reverse() # Write to file with the original order. for index in range(len(sorted_inputs)): outfile.write("%.8f\n" % (scores_list[sorted_keys[index]]))