Пример #1
0
class InferenceWrapper(object):
  """Model wrapper class for performing inference with a ShowAndTellModel."""

  def __init__(self, config, model_dir,
               ses_threads=2,
               length_normalization_factor=0.0,
               gpu_memory_fraction=0.3,
               gpu=1,
               with_image_embedding=True):
    self.config = copy.deepcopy(config)
    self.config.batch_size = 1
    self.flag_load_model = False
    self.model_dir = model_dir
    self.gpu= gpu
    self.gpu_memory_fraction = gpu_memory_fraction
    self.with_image_embedding = with_image_embedding

  def build_model(self):
    gpu_options = tf.GPUOptions(
        per_process_gpu_memory_fraction=self.gpu_memory_fraction)
    config_proto = tf.ConfigProto( gpu_options=gpu_options, allow_soft_placement=True)
    self.session = session = tf.Session(config=config_proto) 
    with tf.device('/gpu:%d'%self.gpu):
      with tf.variable_scope("LSTMModel", reuse=None):
        if self.with_image_embedding:
          self.model = LSTMModel(config=self.config, mode="inference", 
                      model_dir = self.model_dir,
                      flag_with_saver=True,
                      num_steps = 1,
                      gpu=self.gpu)
        else:
          print ('Please use with_image_embeddind=1')
          sys.exit(-1)
        self.model.build()

  def load_model(self, model_path):
      self.model.saver.restore(self.session, model_path)
      self.flag_load_model = True
      self.model_path = model_path
      logger.info('Load model from %s', model_path)

  def feed_visual_feature(self, visual_feature):
    assert visual_feature.shape[0] == self.config.vf_size
    #assert self.flag_load_model, 'Must call local_model first'
    sess = self.session
    initial_state = sess.run(fetches="LSTMModel/lstm/initial_state:0",
                             feed_dict={"LSTMModel/visual_feature:0": visual_feature})
    return initial_state

  def inference_step(self, input_feed, state_feed):
    sess = self.session
    softmax_output, state_output = sess.run(
        fetches=["LSTMModel/softmax:0", "LSTMModel/lstm/state:0"],
        feed_dict={
            "LSTMModel/input_feed:0": input_feed,
            "LSTMModel/lstm/state_feed:0": state_feed,
        })
    return softmax_output, state_output, None
Пример #2
0
def main(unused_args):
    train_collection = FLAGS.train_collection
    val_collection = FLAGS.val_collection
    overwrite = FLAGS.overwrite
    output_dir = utility.get_sim_dir(FLAGS)
    loss_info_file = os.path.join(output_dir, 'loss_info.txt')
    if os.path.exists(loss_info_file) and not overwrite:
        logger.info('%s exists. quit', loss_info_file)
        sys.exit(0)

    model_dir = utility.get_model_dir(FLAGS)
    config_path = os.path.join(os.path.dirname(__file__), 'model_conf',
                               FLAGS.model_name + '.py')
    config = utility.load_config(config_path)

    if FLAGS.fluency_method == 'None':
        FLAGS.fluency_method = None
    config.fluency_method = FLAGS.fluency_method
    if config.fluency_method == 'weighted':
        config.use_weighted_loss = True
    else:
        config.use_weighted_loss = False

    textbank = TextBank(utility.get_train_vocab_file(FLAGS))
    config.vocab_size = len(textbank.vocab)
    config.vf_size = int(
        open(os.path.join(utility.get_val_feat_dir(FLAGS),
                          'shape.txt')).read().split()[1])

    gpu_options = tf.GPUOptions(
        per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction)
    config_proto = tf.ConfigProto(
        intra_op_parallelism_threads=FLAGS.ses_threads,
        gpu_options=gpu_options,
        allow_soft_placement=True)

    with_image_embedding = True if FLAGS.with_image_embedding > 0 else False
    with tf.Graph().as_default(), tf.Session(config=config_proto) as session:

        assert len(config.buckets) >= 1
        assert config.buckets[-1] == config.max_num_steps
        with tf.device('/gpu:%d' % FLAGS.gpu):
            with tf.variable_scope("LSTMModel", reuse=None):
                if with_image_embedding:
                    model = LSTMModel(
                        mode='eval',
                        num_steps=config.buckets[-1],
                        config=config,
                        model_dir=model_dir,  #model_name=FLAGS.model_name,
                        flag_with_saver=True)
                    #model_root=FLAGS.model_root)
                else:
                    # deprecating this option
                    print('Plz use image_embedding')
                    sys.exit(-1)
                model.build()

        model_path_list = []
        _dir = os.path.join(model_dir, 'variables')
        for _file in os.listdir(_dir):
            if _file.startswith('model_') and _file.endswith('.ckpt.meta'):
                iter_n = int(_file[6:-10])
                model_path = os.path.join(_dir, 'model_%d.ckpt' % iter_n)
                model_path_list.append((iter_n, model_path))

        data_provider = BucketDataProvider(val_collection,
                                           utility.get_train_vocab_file(FLAGS),
                                           feature=FLAGS.vf_name,
                                           language=FLAGS.language,
                                           flag_shuffle=False,
                                           method=config.fluency_method,
                                           rootpath=FLAGS.rootpath)
        iter2loss = {}
        for iter_n, model_path in model_path_list:
            loss_file = os.path.join(output_dir, 'model_%d.ckpt' % iter_n,
                                     'loss.txt')
            if os.path.exists(loss_file) and not overwrite:
                logger.info('load loss from %s', loss_file)
                loss = float(open(loss_file).readline().strip())
                iter2loss[iter_n] = loss
                continue
            if not os.path.exists(os.path.split(loss_file)[0]):
                os.makedirs(os.path.split(loss_file)[0])

            model.saver.restore(session, model_path)
            # print([v.name for v in tf.trainable_variables()])
            logger.info('Continue to train from %s', model_path)

            val_cost = run_epoch(session, config.batch_size,
                                 config.buckets[-1], config, model,
                                 data_provider)
            logger.info(
                "Validation cost for checkpoint model_%d.ckpt is %.3f" %
                (iter_n, val_cost))

            iter2loss[iter_n] = val_cost
            with open(loss_file, "w") as fw:
                fw.write('%g' % val_cost)
                fw.close()

    sorted_iter2loss = sorted(iter2loss.iteritems(), key=lambda x: x[1])
    with open(loss_info_file, 'w') as fw:
        fw.write('\n'.join(
            ['%d %g' % (iter_n, loss) for (iter_n, loss) in sorted_iter2loss]))
        fw.close()
Пример #3
0
def main(unused_args):
    model_dir = utility.get_model_dir(FLAGS)
    if os.path.exists(model_dir) and not FLAGS.overwrite:
        logger.info('%s exists. quit', model_dir)
        sys.exit(0)

    # Load model configuration
    config_path = os.path.join(os.path.dirname(__file__), 'model_conf',
                               FLAGS.model_name + '.py')
    config = utility.load_config(config_path)

    FLAGS.vf_dir = os.path.join(FLAGS.rootpath, FLAGS.train_collection,
                                'FeatureData', FLAGS.vf_name)
    vocab_file = utility.get_vocab_file(FLAGS.train_collection,
                                        FLAGS.word_cnt_thr, FLAGS.rootpath)
    textbank = TextBank(vocab_file)
    config.vocab_size = len(textbank.vocab)
    config.vf_size = int(
        open(os.path.join(FLAGS.vf_dir, 'shape.txt')).read().split()[1])

    if hasattr(config, 'num_epoch_save'):
        num_epoch_save = config.num_epoch_save
    else:
        num_epoch_save = 1

    if FLAGS.fluency_method == 'None':
        FLAGS.fluency_method = None
    config.fluency_method = FLAGS.fluency_method
    if config.fluency_method == 'weighted':
        config.use_weighted_loss = True
    else:
        config.use_weighted_loss = False

    train_image_embedding = True
    try:
        if config.train_image_embedding == False:
            assert ('freeze' in FLAGS.model_name)
            train_image_embedding = False
            logger.info('Not training image embedding')
    except:
        pass

    with_image_embedding = True if FLAGS.with_image_embedding != 0 else False
    # Start model training
    gpu_options = tf.GPUOptions(
        per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction)
    config_proto = tf.ConfigProto(
        intra_op_parallelism_threads=FLAGS.ses_threads,
        gpu_options=gpu_options,
        allow_soft_placement=True)

    with tf.Graph().as_default(), tf.Session(config=config_proto) as session:
        initializer = tf.random_uniform_initializer(-config.init_scale,
                                                    config.init_scale)
        assert len(config.buckets) >= 1
        assert config.buckets[-1] == config.max_num_steps
        models = []
        with tf.device('gpu:%s' % FLAGS.gpu):
            with tf.variable_scope("LSTMModel",
                                   reuse=None,
                                   initializer=initializer):
                if with_image_embedding:
                    m = LSTMModel(mode='train',
                                  num_steps=config.buckets[0],
                                  config=config,
                                  model_dir=model_dir,
                                  flag_with_saver=True,
                                  train_image_embedding=train_image_embedding)
                    #model_root=FLAGS.model_root)
                else:
                    # deprecating this function
                    logger.info('Plz use with_image_embedding=1')
                    sys.exit(-1)
                    '''m = PCALSTMModel(mode='train',
              num_steps=config.buckets[0],
              config=config,
              model_name=FLAGS.model_name,
              flag_with_saver=True,
              train_image_embedding=train_image_embedding,
              model_root=FLAGS.model_root)
          '''
                m.build()
                models.append(m)

        pre_trained_iter = 0
        if FLAGS.pre_trained_model_path:
            pre_trained_iter = int(
                FLAGS.pre_trained_model_path.split('model_')[1].split('.')[0])
        hdlr = logging.FileHandler(
            os.path.join(m.model_dir, 'log%d.txt' % pre_trained_iter))
        hdlr.setLevel(logging.INFO)
        hdlr.setFormatter(logging.Formatter(formatter_log))
        logger.addHandler(hdlr)

        if FLAGS.pre_trained_model_path:
            if tf.__version__ < '1.0':
                tf.initialize_all_variables().run()
            else:
                tf.global_variables_initializer().run()
            models[0].saver.restore(session, FLAGS.pre_trained_model_path)
            logger.info('Continue to train from %s',
                        FLAGS.pre_trained_model_path)
        elif FLAGS.pre_trained_imembedding_path:
            if tf.__version__ < '1.0':
                tf.initialize_all_variables().run()
            else:
                tf.global_variables_initializer().run()
            models[0].imemb_saver.restore(session,
                                          FLAGS.pre_trained_imembedding_path)
            logger.info('Init image-embedding from %s',
                        FLAGS.pre_trained_imembedding_path)
        elif FLAGS.pre_trained_lm_path:
            if tf.__version__ < '1.0':
                tf.initialize_all_variables().run()
            else:
                tf.global_variables_initializer().run()
            models[0].lm_saver.restore(session, FLAGS.pre_trained_lm_path)
            logger.info('Init language from %s', FLAGS.pre_trained_lm_path)
        else:
            if tf.__version__ < '1.0':
                tf.initialize_all_variables().run()
            else:
                tf.global_variables_initializer().run()
            # print([v.name for v in tf.trainable_variables()])

        iters_done = 0
        data_provider = BucketDataProvider(FLAGS.train_collection,
                                           vocab_file,
                                           FLAGS.vf_name,
                                           language=FLAGS.language,
                                           method=config.fluency_method,
                                           rootpath=FLAGS.rootpath)

        for i in range(config.num_epoch):
            logger.info('epoch %d', i)
            data_provider.shuffle_data_queue()
            train_cost, iters_done = run_epoch(session,
                                               iters_done,
                                               config,
                                               models,
                                               data_provider,
                                               verbose=True)
            logger.info("Train cost for epoch %d is %.3f" % (i, train_cost))

            # save the current model if necessary
            if (i + 1) % num_epoch_save == 0:
                models[0].saver.save(
                    session,
                    os.path.join(
                        m.variable_dir,
                        'model_%d.ckpt' % (iters_done + pre_trained_iter)))
                if with_image_embedding:
                    models[0].imemb_saver.save(session, os.path.join(m.variable_dir, \
                       'imembedding_model_%d.ckpt' % (iters_done)))
                logger.info("Model saved at iteration %d", iters_done)

    # copy the configure file in to checkpoint direction
    os.system("cp %s %s" % (config_path, model_dir))
    if FLAGS.pre_trained_model_path:
        os.system("echo %s > %s" %
                  (FLAGS.pre_trained_model_path,
                   os.path.join(model_dir, 'pre_trained_model_path.txt')))
    if FLAGS.pre_trained_imembedding_path:
        os.system(
            "echo %s > %s" %
            (FLAGS.pre_trained_imembedding_path,
             os.path.join(model_dir, 'pre_trained_imembedding_path.txt')))
Пример #4
0
def main(unused_args):
  model_dir=utility.get_model_dir(FLAGS)
  if os.path.exists(model_dir) and not FLAGS.overwrite:
    logger.info('%s exists. quit', model_dir)
    sys.exit(0)

  # Load model configuration
  config_path = os.path.join(os.path.dirname(__file__), 'model_conf', FLAGS.model_name + '.py')
  config = utility.load_config(config_path)

  # pdb.set_trace()
  FLAGS.vf_dir = os.path.join(FLAGS.rootpath, FLAGS.train_collection, 'FeatureData', FLAGS.vf_name)
  vocab_file = utility.get_vocab_file(FLAGS.train_collection, FLAGS.word_cnt_thr, FLAGS.rootpath)
  textbank = TextBank(vocab_file)
  config.vocab_size = len(textbank.vocab)
  config.vf_size = int(open(os.path.join(FLAGS.vf_dir, 'shape.txt')).read().split()[1])

  if hasattr(config,'num_epoch_save'):
    num_epoch_save = config.num_epoch_save
  else:
    num_epoch_save = 1

  # if FLAGS.fluency_method == 'None':
  #     FLAGS.fluency_method = None
  # config.fluency_method = FLAGS.fluency_method
  # if config.fluency_method == 'weighted':
  #   config.use_weighted_loss = True
  # else:
  #   config.use_weighted_loss = False

  train_image_embedding = True
  try:
    if config.train_image_embedding == False:
      assert('freeze' in FLAGS.model_name)
      train_image_embedding = False 
      logger.info('Not training image embedding')
  except:
    pass

  with_image_embedding = True if FLAGS.with_image_embedding != 0 else False
  # Start model training
  gpu_options = tf.GPUOptions(
        per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction)
  config_proto = tf.ConfigProto(
      intra_op_parallelism_threads=FLAGS.ses_threads, gpu_options=gpu_options, allow_soft_placement=True)
 
  with tf.Graph().as_default(), tf.Session(config=config_proto) as session:


    writer = tf.train.SummaryWriter("logs/", session.graph)

    initializer = tf.random_uniform_initializer(-config.init_scale,
                                                config.init_scale)


    assert len(config.buckets) >= 1
    assert config.buckets[-1] == config.max_num_steps
    models = []
    with tf.device('gpu:%s'%FLAGS.gpu):
      with tf.variable_scope("LSTMModel", reuse=None, initializer=initializer):
        if with_image_embedding:
          m = LSTMModel(mode='train',
              num_steps=config.buckets[0], 
              config=config,
              model_dir=model_dir,
              flag_with_saver=True,
              train_image_embedding=train_image_embedding)
              #model_root=FLAGS.model_root)
        else:
          # deprecating this function
          logger.info('Plz use with_image_embedding=1')
          sys.exit(-1)

        m.build()
        models.append(m)

    pre_trained_iter=0 
    if FLAGS.pre_trained_model_path:
      pre_trained_iter = int(FLAGS.pre_trained_model_path.split('model_')[1].split('.')[0])
    hdlr = logging.FileHandler(os.path.join(m.model_dir, 'log%d.txt'%pre_trained_iter))
    hdlr.setLevel(logging.INFO)
    hdlr.setFormatter(logging.Formatter(formatter_log))
    logger.addHandler(hdlr)

    if FLAGS.pre_trained_model_path:
      if tf.__version__ < '1.0':
        tf.initialize_all_variables().run()
      else:
        tf.global_variables_initializer().run()
      models[0].saver.restore(session, FLAGS.pre_trained_model_path)
      logger.info('Continue to train from %s', FLAGS.pre_trained_model_path)
    elif FLAGS.pre_trained_imembedding_path:
      if tf.__version__ < '1.0':
        tf.initialize_all_variables().run()
      else:
        tf.global_variables_initializer().run()
      models[0].imemb_saver.restore(session, FLAGS.pre_trained_imembedding_path)
      logger.info('Init image-embedding from %s', FLAGS.pre_trained_imembedding_path)
    elif FLAGS.pre_trained_lm_path:
      if tf.__version__ < '1.0':
        tf.initialize_all_variables().run()
      else:
        tf.global_variables_initializer().run()
      models[0].lm_saver.restore(session, FLAGS.pre_trained_lm_path)
      logger.info('Init language from %s', FLAGS.pre_trained_lm_path)
    else:
      if tf.__version__ < '1.0':
        tf.initialize_all_variables().run()
      else:
        tf.global_variables_initializer().run()
      # print([v.name for v in tf.trainable_variables()])

    iters_done = 0
    data_provider = BucketDataProvider(FLAGS.train_collection, vocab_file, FLAGS.vf_name, 
                               language=FLAGS.language, # method=config.fluency_method,
                               rootpath=FLAGS.rootpath)
    
    for i in range(config.num_epoch):
      logger.info('epoch %d', i)
      data_provider.shuffle_data_queue()  ####################################
      train_cost, iters_done, result = run_epoch(session, iters_done, config, models, data_provider , verbose=True)
      logger.info("Train cost for epoch %d is %.3f" % (i, train_cost))
      writer.add_summary(result, i)

      # save the current model if necessary
      if (i+1)% num_epoch_save == 0:
          models[0].saver.save(session, os.path.join(m.variable_dir,
                'model_%d.ckpt' % (iters_done+pre_trained_iter)))
          if with_image_embedding: 
              models[0].imemb_saver.save(session, os.path.join(m.variable_dir, \
                 'imembedding_model_%d.ckpt' % (iters_done)))
          logger.info("Model saved at iteration %d", iters_done)


  # copy the configure file in to checkpoint direction
  os.system("cp %s %s" % (config_path, model_dir))
  if FLAGS.pre_trained_model_path:
    os.system("echo %s > %s" % (FLAGS.pre_trained_model_path, os.path.join(model_dir, 'pre_trained_model_path.txt')))
  if FLAGS.pre_trained_imembedding_path:
    os.system("echo %s > %s" % (FLAGS.pre_trained_imembedding_path, os.path.join(model_dir, 'pre_trained_imembedding_path.txt')))
Пример #5
0
def main(unused_args):
  train_collection =  FLAGS.train_collection
  val_collection = FLAGS.val_collection
  overwrite = FLAGS.overwrite
  output_dir = utility.get_sim_dir(FLAGS)
  loss_info_file = os.path.join(output_dir, 'loss_info.txt')
  if os.path.exists(loss_info_file) and not overwrite:
      logger.info('%s exists. quit', loss_info_file)
      sys.exit(0)

  model_dir=utility.get_model_dir(FLAGS)
  config_path = os.path.join(os.path.dirname(__file__), 'model_conf', FLAGS.model_name + '.py')
  config = utility.load_config(config_path)

  # if FLAGS.fluency_method == 'None':
  #     FLAGS.fluency_method = None
  # config.fluency_method = FLAGS.fluency_method
  # if config.fluency_method == 'weighted':
  #   config.use_weighted_loss = True
  # else:
  #   config.use_weighted_loss = False

  textbank = TextBank(utility.get_train_vocab_file(FLAGS))
  config.vocab_size = len(textbank.vocab)
  config.vf_size = int(open(os.path.join(utility.get_val_feat_dir(FLAGS), 'shape.txt')).read().split()[1])

  gpu_options = tf.GPUOptions(
        per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction)
  config_proto = tf.ConfigProto(
      intra_op_parallelism_threads=FLAGS.ses_threads, gpu_options=gpu_options, allow_soft_placement=True)

  with_image_embedding = True if FLAGS.with_image_embedding > 0 else False
  with tf.Graph().as_default(), tf.Session(config=config_proto) as session:

    assert len(config.buckets) >= 1
    assert config.buckets[-1] == config.max_num_steps
    with tf.device('/gpu:%d'%FLAGS.gpu):
      with tf.variable_scope("LSTMModel", reuse=None):
        if with_image_embedding:
          model = LSTMModel(mode='eval',
                num_steps=config.buckets[-1], 
                config=config,
                model_dir=model_dir, #model_name=FLAGS.model_name,
                flag_with_saver=True)
                #model_root=FLAGS.model_root)
        else:
          # deprecating this option
          print('Plz use image_embedding')
          sys.exit(-1)          
        model.build()    


    model_path_list = []
    _dir = os.path.join(model_dir,'variables')
    for _file in os.listdir(_dir):
      if _file.startswith('model_') and _file.endswith('.ckpt.meta'):
        iter_n = int(_file[6:-10])
        model_path = os.path.join(_dir, 'model_%d.ckpt'%iter_n)
        model_path_list.append((iter_n, model_path))

    data_provider = BucketDataProvider(val_collection, utility.get_train_vocab_file(FLAGS), 
          feature=FLAGS.vf_name, 
          language=FLAGS.language, 
          flag_shuffle=False, 
          # method=config.fluency_method,
          rootpath=FLAGS.rootpath)
    iter2loss = {}
    for iter_n, model_path in model_path_list:
      loss_file = os.path.join(output_dir, 'model_%d.ckpt' % iter_n, 'loss.txt')
      if os.path.exists(loss_file) and not overwrite:
          logger.info('load loss from %s', loss_file)
          loss = float(open(loss_file).readline().strip())
          iter2loss[iter_n] = loss
          continue
      if not os.path.exists(os.path.split(loss_file)[0]):
          os.makedirs(os.path.split(loss_file)[0])

      model.saver.restore(session, model_path)
      # print([v.name for v in tf.trainable_variables()])
      logger.info('Continue to train from %s', model_path)

      val_cost = run_epoch(session, config.batch_size, config.buckets[-1], config,model, data_provider)
      logger.info("Validation cost for checkpoint model_%d.ckpt is %.3f" % (iter_n, val_cost))

      iter2loss[iter_n] = val_cost
      with open(loss_file, "w") as fw:
        fw.write('%g' % val_cost)
        fw.close()

  sorted_iter2loss = sorted(iter2loss.iteritems(), key=lambda x: x[1])
  with open(loss_info_file, 'w') as fw:
      fw.write('\n'.join(['%d %g' % (iter_n, loss) for (iter_n, loss) in sorted_iter2loss]))
      fw.close()
Пример #6
0
class InferenceWrapper(object):
    """Model wrapper class for performing inference with a ShowAndTellModel."""
    def __init__(self,
                 config,
                 model_dir,
                 ses_threads=2,
                 length_normalization_factor=0.0,
                 gpu_memory_fraction=0.3,
                 gpu=1,
                 with_image_embedding=True):
        self.config = copy.deepcopy(config)
        self.config.batch_size = 1
        self.flag_load_model = False
        self.model_dir = model_dir
        self.gpu = gpu
        self.gpu_memory_fraction = gpu_memory_fraction
        self.with_image_embedding = with_image_embedding

    def build_model(self):
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=self.gpu_memory_fraction)
        config_proto = tf.ConfigProto(gpu_options=gpu_options,
                                      allow_soft_placement=True)
        self.session = session = tf.Session(config=config_proto)
        with tf.device('/gpu:%d' % self.gpu):
            with tf.variable_scope("LSTMModel", reuse=None):
                if self.with_image_embedding:
                    self.model = LSTMModel(config=self.config,
                                           mode="inference",
                                           model_dir=self.model_dir,
                                           flag_with_saver=True,
                                           num_steps=1,
                                           gpu=self.gpu)
                else:
                    print('Please use with_image_embeddind=1')
                    sys.exit(-1)
                self.model.build()

    def load_model(self, model_path):
        self.model.saver.restore(self.session, model_path)
        self.flag_load_model = True
        self.model_path = model_path
        logger.info('Load model from %s', model_path)

    def feed_visual_feature(self, visual_feature):
        assert visual_feature.shape[0] == self.config.vf_size
        #assert self.flag_load_model, 'Must call local_model first'
        sess = self.session
        initial_state = sess.run(
            fetches="LSTMModel/lstm/initial_state:0",
            feed_dict={"LSTMModel/visual_feature:0": visual_feature})
        return initial_state

    def inference_step(self, input_feed, state_feed):
        sess = self.session
        softmax_output, state_output = sess.run(
            fetches=["LSTMModel/softmax:0", "LSTMModel/lstm/state:0"],
            feed_dict={
                "LSTMModel/input_feed:0": input_feed,
                "LSTMModel/lstm/state_feed:0": state_feed,
            })
        return softmax_output, state_output, None