Exemple #1
0
def everySenPre(flags_input_pipeline, model, hooks, model_dir, sess):
    flags_input_pipeline = _maybe_load_yaml(flags_input_pipeline)

    input_pipeline_infer = input_pipeline.make_input_pipeline_from_def(
        flags_input_pipeline, mode=tf.contrib.learn.ModeKeys.INFER,
        shuffle=False, num_epochs=1)
    tf.reset_default_graph()
    # Create the graph used for inference
    predictions, _, _ = create_inference_graph(
        model=model,
        input_pipeline=input_pipeline_infer,
        batch_size=flags_batch_size)

    saver = tf.train.Saver()
    checkpoint_path = flags_checkpoint_path
    if not checkpoint_path:
        checkpoint_path = tf.train.latest_checkpoint(model_dir)

    def session_init_op(_scaffold, sess):
        saver.restore(sess, checkpoint_path)
        tf.logging.info("Restored model from %s", checkpoint_path)

    scaffold = tf.train.Scaffold(init_fn=session_init_op)
    session_creator = tf.train.ChiefSessionCreator(scaffold=scaffold)

    # logging.basicConfig(filename='new.log',level=logging.DEBUG)

    sess = tf.train.MonitoredSession(session_creator=session_creator, hooks=hooks)
    output,result = sess.run([])

    # with tf.train.MonitoredSession(
    #         session_creator=session_creator,
    #         hooks=hooks) as sess:
    #     # sess.run(tf.global_variables_initializer())
    #     # Run until the inputs are exhausted
    #     while True:
    #         outputs, result = sess.run([])

    return result, sess
Exemple #2
0
    if (str(tdict["class"]) == "DecodeText"):
        task = task_cls(
            tdict["params"], callback_func=_save_prediction_to_dict)
    elif (str(tdict["class"]) == "DumpAttention"):
        task = task_cls(tdict["params"], callback_func=_handle_attention)

    hooks.append(task)

input_pipeline_infer = input_pipeline.make_input_pipeline_from_def(
    fl_input_pipeline,
    mode=tf.contrib.learn.ModeKeys.INFER,
    shuffle=False,
    num_epochs=1)

# Create the graph used for inference
predictions, _, _ = create_inference_graph(
    model=model, input_pipeline=input_pipeline_infer, batch_size=batch_size)

graph = tf.get_default_graph()

# Function to run inference.


def run_inference():
    # tf.reset_default_graph()
    with graph.as_default():
        saver = tf.train.Saver()
        checkpoint_path = loaded_checkpoint_path
        if not checkpoint_path:
            checkpoint_path = tf.train.latest_checkpoint(model_dir_input)

        def session_init_op(_scaffold, sess):
Exemple #3
0
def main(_argv):
    """Program entry point.
  """

    # Load flags from config file
    if FLAGS.config_path:
        with gfile.GFile(FLAGS.config_path) as config_file:
            config_flags = yaml.load(config_file)
            for flag_key, flag_value in config_flags.items():
                setattr(FLAGS, flag_key, flag_value)

    if isinstance(FLAGS.tasks, string_types):
        FLAGS.tasks = _maybe_load_yaml(FLAGS.tasks)

    if isinstance(FLAGS.input_pipeline, string_types):
        FLAGS.input_pipeline = _maybe_load_yaml(FLAGS.input_pipeline)

    input_pipeline_infer = input_pipeline.make_input_pipeline_from_def(
        FLAGS.input_pipeline,
        mode=tf.contrib.learn.ModeKeys.INFER,
        shuffle=False,
        num_epochs=1)

    # Create output dir
    if not os.path.exists(FLAGS.model_dir + '/pred'):
        os.makedirs(FLAGS.model_dir + '/pred')

    # Load saved training options
    train_options = training_utils.TrainOptions.load(FLAGS.model_dir)

    # Create the model
    model_cls = locate(train_options.model_class) or \
      getattr(models, train_options.model_class)
    model_params = train_options.model_params
    model_params = _deep_merge_dict(model_params,
                                    _maybe_load_yaml(FLAGS.model_params))
    model = model_cls(params=model_params,
                      mode=tf.contrib.learn.ModeKeys.INFER)

    # Load inference tasks
    hooks = []
    for tdict in FLAGS.tasks:
        if not "params" in tdict:
            tdict["params"] = {}
        task_cls = locate(tdict["class"]) or getattr(tasks, tdict["class"])
        print("******", task_cls)
        task = task_cls(tdict["params"],
                        callback_func=_save_prediction_to_dict)

        hooks.append(task)

    # Create the graph used for inference
    predictions, _, _ = create_inference_graph(
        model=model,
        input_pipeline=input_pipeline_infer,
        batch_size=FLAGS.batch_size)

    saver = tf.train.Saver()
    checkpoint_path = FLAGS.checkpoint_path
    if not checkpoint_path:
        checkpoint_path = tf.train.latest_checkpoint(FLAGS.model_dir)

    def session_init_op(_scaffold, sess):
        saver.restore(sess, checkpoint_path)
        tf.logging.info("Restored model from %s", checkpoint_path)

    scaffold = tf.train.Scaffold(init_fn=session_init_op)
    session_creator = tf.train.ChiefSessionCreator(scaffold=scaffold)
    with tf.train.MonitoredSession(session_creator=session_creator,
                                   hooks=hooks) as sess:

        # Run until the inputs are exhausted
        while not sess.should_stop():
            sess.run([])
Exemple #4
0
def main(_argv):
    """Program entry point.
  """

    # Load flags from config file
    if FLAGS.config_path:
        with gfile.GFile(FLAGS.config_path) as config_file:
            config_flags = yaml.load(config_file)
            for flag_key, flag_value in config_flags.items():
                setattr(FLAGS, flag_key, flag_value)

    if isinstance(FLAGS.tasks, string_types):
        FLAGS.tasks = _maybe_load_yaml(FLAGS.tasks)

    if isinstance(FLAGS.input_pipeline, string_types):
        FLAGS.input_pipeline = _maybe_load_yaml(FLAGS.input_pipeline)

    input_pipeline_infer = input_pipeline.make_input_pipeline_from_def(
        FLAGS.input_pipeline,
        mode=tf.contrib.learn.ModeKeys.INFER,
        shuffle=False,
        num_epochs=1)

    # Load saved training options
    train_options = training_utils.TrainOptions.load(FLAGS.model_dir)

    # Create the model
    model_cls = locate(train_options.model_class) or \
      getattr(models, train_options.model_class)
    model_params = train_options.model_params
    model_params = _deep_merge_dict(model_params,
                                    _maybe_load_yaml(FLAGS.model_params))
    model = model_cls(params=model_params,
                      mode=tf.contrib.learn.ModeKeys.INFER)

    # Load inference tasks
    hooks = []
    for tdict in FLAGS.tasks:
        if not "params" in tdict:
            tdict["params"] = {}
        task_cls = locate(tdict["class"]) or getattr(tasks, tdict["class"])
        task = task_cls(tdict["params"])
        hooks.append(task)

    # Create the graph used for inference
    predictions, _, _ = create_inference_graph(
        model=model,
        input_pipeline=input_pipeline_infer,
        batch_size=FLAGS.batch_size)

    saver = tf.train.Saver(tf.all_variables())
    checkpoint_path = FLAGS.checkpoint_path
    if not checkpoint_path:
        checkpoint_path = tf.train.latest_checkpoint(FLAGS.model_dir)

    def session_init_op(_scaffold, sess):
        saver.restore(sess, checkpoint_path)
        tf.logging.info("Restored model from %s", checkpoint_path)

        if FLAGS.save_pb_during_infer:
            save_vars = {}
            for v in tf.trainable_variables():
                save_vars[v.value().name] = sess.run(v)
            g2 = tf.Graph()
            with g2.as_default():
                consts = {}
                for k in save_vars.keys():
                    consts[k] = tf.constant(save_vars[k])
                tf.import_graph_def(
                    sess.graph_def,
                    input_map={name: consts[name]
                               for name in consts.keys()})
                tf.train.write_graph(g2.as_graph_def(), FLAGS.save_pb_dir,
                                     'rnn.pb', False)
                tf.train.write_graph(g2.as_graph_def(), FLAGS.save_pb_dir,
                                     'rnn.txt')
            tf.logging.info("Save pb down! %s", FLAGS.save_pb_dir)

    scaffold = tf.train.Scaffold(init_fn=session_init_op)
    session_creator = tf.train.ChiefSessionCreator(scaffold=scaffold)
    with tf.train.MonitoredSession(session_creator=session_creator,
                                   hooks=hooks) as sess:

        # Run until the inputs are exhausted
        while not sess.should_stop():
            sess.run([])
Exemple #5
0
def main(_argv):
    """Program entry point.
  """

    # Load flags from config file
    if FLAGS.config_path:
        with gfile.GFile(FLAGS.config_path) as config_file:
            config_flags = yaml.load(config_file)
            for flag_key, flag_value in config_flags.items():
                setattr(FLAGS, flag_key, flag_value)

    if isinstance(FLAGS.tasks, string_types):
        FLAGS.tasks = _maybe_load_yaml(FLAGS.tasks)

    if isinstance(FLAGS.input_pipeline, string_types):
        FLAGS.input_pipeline = _maybe_load_yaml(FLAGS.input_pipeline)

    input_pipeline_infer = input_pipeline.make_input_pipeline_from_def(
        FLAGS.input_pipeline,
        mode=tf.contrib.learn.ModeKeys.INFER,
        shuffle=False,
        num_epochs=1)

    # Load saved training options
    train_options = training_utils.TrainOptions.load(FLAGS.model_dir)

    # Create the model
    model_cls = locate(train_options.model_class) or \
      getattr(models, train_options.model_class)
    model_params = train_options.model_params
    model_params = _deep_merge_dict(model_params,
                                    _maybe_load_yaml(FLAGS.model_params))
    model = model_cls(params=model_params,
                      mode=tf.contrib.learn.ModeKeys.INFER)

    # Load inference tasks
    hooks = []
    for tdict in FLAGS.tasks:
        if not "params" in tdict:
            tdict["params"] = {}
        task_cls = locate(tdict["class"]) or getattr(tasks, tdict["class"])
        task = task_cls(tdict["params"])
        hooks.append(task)

    # Create the graph used for inference
    predictions, _, _ = create_inference_graph(
        model=model,
        input_pipeline=input_pipeline_infer,
        batch_size=FLAGS.batch_size)

    # moving_average_variables = []
    #
    # for var in slim.get_model_variables():
    #     if 'resnet_v1_50' in var.name and 'fully_connected' not in var.name:
    #         moving_average_variables.append(var)
    #
    #
    # tf_global_step = slim.get_or_create_global_step()
    #
    # variable_averages = tf.train.ExponentialMovingAverage(
    #     0.99, tf_global_step)
    #
    # #get variable of the models and apply average to the concerned variables
    # variables_to_restore = variable_averages.variables_to_restore(
    #     moving_average_variables)
    #
    #
    # #TODO
    # #current checkpoint have model/att_seq2seq/model/att_seq2seq/encode/.... for the exp moving
    # #instead of model/att_seq2seq/
    # #so need to rename these:
    #
    # def name_in_checkpoint(var):
    #     if 'ExponentialMovingAverage' in var:
    #         return var.replace('model/att_seq2seq/', 'model/att_seq2seq/model/att_seq2seq/')
    #     return var
    #
    #
    # variables_to_restore = {name_in_checkpoint(k): v
    #                         for k,v in variables_to_restore.items()}
    #
    #
    # variables_to_restore[tf_global_step.op.name] = tf_global_step
    #
    # for k,v in variables_to_restore.items():
    #     print(k)
    #     print(v)
    #     print("#############")
    # import sys
    # sys.exit()

    # saver = tf.train.Saver(var_list=variables_to_restore)
    saver = tf.train.Saver()
    checkpoint_path = FLAGS.checkpoint_path
    if not checkpoint_path:
        checkpoint_path = tf.train.latest_checkpoint(FLAGS.model_dir)

    def session_init_op(_scaffold, sess):
        saver.restore(sess, checkpoint_path)
        tf.logging.info("Restored model from %s", checkpoint_path)

    scaffold = tf.train.Scaffold(init_fn=session_init_op)
    session_creator = tf.train.ChiefSessionCreator(scaffold=scaffold)
    with tf.train.MonitoredSession(session_creator=session_creator,
                                   hooks=hooks) as sess:

        # Run until the inputs are exhausted
        while not sess.should_stop():
            sess.run([])
Exemple #6
0
def main(_argv):
    """Program entrypoint.
  """

    predictions, _, _ = create_inference_graph(model_dir=FLAGS.model_dir,
                                               input_file=FLAGS.source,
                                               batch_size=FLAGS.batch_size,
                                               beam_width=FLAGS.beam_width)

    # Filter fetched predictions to save memory
    prediction_keys = set([
        "predicted_tokens", "features.source_len", "features.source_tokens",
        "attention_scores"
    ])

    # Optional UNK token replacement
    unk_replace_fn = None
    if FLAGS.unk_replace:
        if "attention_scores" not in predictions.keys():
            raise ValueError("""To perform UNK replacement you must use a model
                       class that outputs attention scores.""")
        prediction_keys.add("attention_scores")
        mapping = None
        if FLAGS.unk_mapping is not None:
            mapping = get_unk_mapping(FLAGS.unk_mapping)
        if FLAGS.unk_replace:
            unk_replace_fn = functools.partial(unk_replace, mapping=mapping)

    predictions = {
        k: v
        for k, v in predictions.items() if k in prediction_keys
    }

    saver = tf.train.Saver()

    checkpoint_path = FLAGS.checkpoint_path
    if not checkpoint_path:
        checkpoint_path = tf.train.latest_checkpoint(FLAGS.model_dir)

    with tf.Session() as sess:
        # Initialize variables
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        sess.run(tf.tables_initializer())

        # Restore checkpoint
        saver.restore(sess, checkpoint_path)
        tf.logging.info("Restored model from %s", checkpoint_path)

        # Accumulate attention scores in this array.
        # Shape: [num_examples, target_length, input_length]
        attention_scores_accum = []
        if FLAGS.dump_attention_dir is not None:
            gfile.MakeDirs(FLAGS.dump_attention_dir)

        # Output predictions
        predictions_iter = create_predictions_iter(predictions, sess)
        for idx, predictions_dict in enumerate(predictions_iter):
            # Convert to unicode
            predictions_dict["predicted_tokens"] = np.char.decode(
                predictions_dict["predicted_tokens"].astype("S"), "utf-8")
            predicted_tokens = predictions_dict["predicted_tokens"]

            if FLAGS.beam_width is not None:
                # If we're using beam search we take the first beam
                predicted_tokens = predicted_tokens[:, 0]

            predictions_dict["features.source_tokens"] = np.char.decode(
                predictions_dict["features.source_tokens"].astype("S"),
                "utf-8")
            source_tokens = predictions_dict["features.source_tokens"]
            source_len = predictions_dict["features.source_len"]

            if unk_replace_fn is not None:
                # We slice the attention scores so that we do not
                # accidentially replace UNK with a SEQUENCE_END token
                attention_scores = predictions_dict["attention_scores"]
                attention_scores = attention_scores[:, :source_len - 1]
                predicted_tokens = unk_replace_fn(
                    source_tokens=source_tokens,
                    predicted_tokens=predicted_tokens,
                    attention_scores=attention_scores)

            # Optionally Dump attention
            if FLAGS.dump_attention_dir is not None:
                if not FLAGS.dump_attention_no_plot:
                    output_path = os.path.join(FLAGS.dump_attention_dir,
                                               "{:05d}.png".format(idx))
                    create_figure(predictions_dict)
                    plt.savefig(output_path)
                    plt.close()
                    tf.logging.info("Wrote %s", output_path)
                attention_scores_accum.append(get_scores(predictions_dict))

            sent = FLAGS.delimiter.join(predicted_tokens).split(
                "SEQUENCE_END")[0]
            # Replace special BPE tokens
            sent = sent.replace("@@ ", "")
            sent = sent.strip()

            print(sent)

        # Write attention scores
        if FLAGS.dump_attention_dir is not None:
            scores_path = os.path.join(FLAGS.dump_attention_dir,
                                       "attention_scores.npz")
            np.savez(scores_path, *attention_scores_accum)
Exemple #7
0
def main(_argv):
    """Program entry point.
  """

    # Load flags from config file
    if FLAGS.config_path:
        with gfile.GFile(FLAGS.config_path) as config_file:
            config_flags = yaml.load(config_file)
            for flag_key, flag_value in config_flags.items():
                setattr(FLAGS, flag_key, flag_value)

    if isinstance(FLAGS.tasks, string_types):
        FLAGS.tasks = _maybe_load_yaml(FLAGS.tasks)

    if isinstance(FLAGS.input_pipeline, string_types):
        FLAGS.input_pipeline = _maybe_load_yaml(FLAGS.input_pipeline)

    input_pipeline_infer = input_pipeline.make_input_pipeline_from_def(
        FLAGS.input_pipeline,
        mode=tf.contrib.learn.ModeKeys.INFER,
        shuffle=False,
        num_epochs=1)

    # Load saved training options
    train_options = training_utils.TrainOptions.load(FLAGS.model_dir)

    # Create the model
    model_cls = locate(train_options.model_class) or \
      getattr(models, train_options.model_class)
    model_params = train_options.model_params
    model_params = _deep_merge_dict(model_params,
                                    _maybe_load_yaml(FLAGS.model_params))
    model = model_cls(params=model_params,
                      mode=tf.contrib.learn.ModeKeys.INFER)

    # Load inference tasks
    hooks = []
    for tdict in FLAGS.tasks:
        if not "params" in tdict:
            tdict["params"] = {}
        task_cls = locate(tdict["class"]) or getattr(tasks, tdict["class"])
        task = task_cls(tdict["params"])
        hooks.append(task)

    # Create the graph used for inference
    predictions, _, _ = create_inference_graph(
        model=model,
        input_pipeline=input_pipeline_infer,
        batch_size=FLAGS.batch_size)

    saver = tf.train.Saver()
    checkpoint_path = FLAGS.checkpoint_path
    if not checkpoint_path:
        checkpoint_path = tf.train.latest_checkpoint(FLAGS.model_dir)

    #def session_init_op(_scaffold, sess):
    #  saver.restore(sess, checkpoint_path)
    #  tf.logging.info("Restored model from %s", checkpoint_path)

    #scaffold = tf.train.Scaffold(init_fn=session_init_op)
    #session_creator = tf.train.ChiefSessionCreator(scaffold=scaffold)
    #with tf.train.MonitoredSession(
    #    session_creator=session_creator,
    #    hooks=hooks) as sess:

    #  # Run until the inputs are exhausted
    #  while not sess.should_stop():
    #    sess.run([])

    with tf.Session() as sess:
        print("start to restore checkpoint:{} into session".format(
            checkpoint_path))
        saver.restore(sess, checkpoint_path)
        saved_model_path = os.path.join(FLAGS.model_dir, FLAGS.export_dir)
        if os.path.exists(saved_model_path):
            print("remove old directory:{}".format(saved_model_path))
            shutil.rmtree(saved_model_path)
        print("start to export SavedModel")
        builder = tf.saved_model.builder.SavedModelBuilder(saved_model_path)
        builder.add_meta_graph_and_variables(sess, ["fin_biseq2seq"])
        builder.save()
        print("finish exporting SavedModel")
Exemple #8
0
def main(_argv):
  """Program entry point.
  """

  # Load flags from config file
  if FLAGS.config_path:
    with gfile.GFile(FLAGS.config_path) as config_file:
      config_flags = yaml.load(config_file)
      for flag_key, flag_value in config_flags.items():
        setattr(FLAGS, flag_key, flag_value)

  if isinstance(FLAGS.tasks, string_types):
    FLAGS.tasks = _maybe_load_yaml(FLAGS.tasks)

  if isinstance(FLAGS.input_pipeline, string_types):
    FLAGS.input_pipeline = _maybe_load_yaml(FLAGS.input_pipeline)

  input_pipeline_infer = input_pipeline.make_input_pipeline_from_def(
      FLAGS.input_pipeline, mode=tf.contrib.learn.ModeKeys.INFER,
      shuffle=False, num_epochs=1)

  # Load saved training options
  train_options = training_utils.TrainOptions.load(FLAGS.model_dir)

  # Create the model
  model_cls = locate(train_options.model_class) or \
    getattr(models, train_options.model_class)
  model_params = train_options.model_params
  model_params = _deep_merge_dict(
      model_params, _maybe_load_yaml(FLAGS.model_params))
  model = model_cls(
      params=model_params,
      mode=tf.contrib.learn.ModeKeys.INFER)

  # Load inference tasks
  hooks = []
  for tdict in FLAGS.tasks:
    if not "params" in tdict:
      tdict["params"] = {}
    task_cls = locate(tdict["class"]) or getattr(tasks, tdict["class"])
    task = task_cls(tdict["params"])
    hooks.append(task)

  # Create the graph used for inference
  predictions, _, _ = create_inference_graph(
      model=model,
      input_pipeline=input_pipeline_infer,
      batch_size=FLAGS.batch_size)

  saver = tf.train.Saver()
  checkpoint_path = FLAGS.checkpoint_path
  if not checkpoint_path:
    checkpoint_path = tf.train.latest_checkpoint(FLAGS.model_dir)

  def session_init_op(_scaffold, sess):
    saver.restore(sess, checkpoint_path)
    tf.logging.info("Restored model from %s", checkpoint_path)

  scaffold = tf.train.Scaffold(init_fn=session_init_op)
  session_creator = tf.train.ChiefSessionCreator(scaffold=scaffold)
  with tf.train.MonitoredSession(
      session_creator=session_creator,
      hooks=hooks) as sess:

    # Run until the inputs are exhausted
    while not sess.should_stop():
      sess.run([])
Exemple #9
0
def main(_argv):
    """Program entry point.
  """

    # Load flags from config file
    if FLAGS.config_path:
        with gfile.GFile(FLAGS.config_path) as config_file:
            config_flags = yaml.load(config_file)
            for flag_key, flag_value in config_flags.items():
                setattr(FLAGS, flag_key, flag_value)

    if isinstance(FLAGS.tasks, string_types):
        FLAGS.tasks = _maybe_load_yaml(FLAGS.tasks)

    if isinstance(FLAGS.input_pipeline, string_types):
        FLAGS.input_pipeline = _maybe_load_yaml(FLAGS.input_pipeline)

    if isinstance(FLAGS.model_params, string_types):
        FLAGS.model_params = _maybe_load_yaml(FLAGS.model_params)

    if isinstance(FLAGS.models, string_types):
        FLAGS.models = _maybe_load_yaml(FLAGS.models)
        for mdict in FLAGS.models:
            if 'params' not in mdict:
                mdict['params'] = {}

    input_pipeline_infer = input_pipeline.make_input_pipeline_from_def(
        FLAGS.input_pipeline,
        mode=tf.contrib.learn.ModeKeys.INFER,
        shuffle=False,
        num_epochs=1)

    # ---------- Load Models First to Load Model Paramerters ----------
    model_variables = []

    for mdict in FLAGS.models:
        # Load saved training options
        train_options = training_utils.TrainOptions.load(mdict['dir'])

        # Get the model class
        model_cls = locate(train_options.model_class) or getattr(
            model_clsses, train_options.model_class)

        # Load model params
        model_params = train_options.model_params
        model_params = _deep_merge_dict(model_params, mdict['params'])
        model_params = _deep_merge_dict(model_params, FLAGS.model_params)

        # Create model
        model = model_cls(params=model_params,
                          mode=tf.contrib.learn.ModeKeys.INFER)

        # Create computation graph
        predictions, _, _ = create_inference_graph(
            model=model,
            input_pipeline=input_pipeline_infer,
            batch_size=FLAGS.batch_size)

        # Get path to the checkpoint
        checkpoint_path = mdict[
            'checkpoint_path'] if 'checkpoint_path' in mdict else tf.train.latest_checkpoint(
                mdict['dir'])

        # Get Saver
        saver = tf.train.Saver()

        # Create session to load values
        with tf.Session() as sess:
            # Load model values from checkpoint
            saver.restore(sess, checkpoint_path)

            # List all variables
            variables = {}
            for var in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
                variables[var.name] = var.eval()

            model_variables.append(variables)

        # Reset graph
        tf.reset_default_graph()

    # Create computation graph for ensemble
    models = []
    vocab_source = None
    vocab_target = None

    for n, (mdict, variables) in enumerate(zip(FLAGS.models, model_variables)):
        # Load saved training options
        train_options = training_utils.TrainOptions.load(mdict['dir'])

        # Get the model class
        model_cls = locate(train_options.model_class) or getattr(
            model_clsses, train_options.model_class)

        # Load model params
        model_params = train_options.model_params
        model_params = _deep_merge_dict(model_params, mdict['params'])
        model_params = _deep_merge_dict(model_params, FLAGS.model_params)

        # Create model
        model = model_cls(params=model_params,
                          mode=tf.contrib.learn.ModeKeys.INFER)

        models.append(model)

        # Predefine variables
        with tf.variable_scope('model{}'.format(n)):
            for name, value in variables.items():
                varname = name.split(':')[0]
                tf.get_variable(varname,
                                shape=value.shape,
                                initializer=tf.constant_initializer(value))

        # Create computation graph
        with tf.variable_scope('model{}'.format(n), reuse=True):
            predictions, _, _ = create_inference_graph(
                model=model,
                input_pipeline=input_pipeline_infer,
                batch_size=FLAGS.batch_size)

        # Get vocab informatin
        if 'vocab_source' in model_params:
            vocab_source = vocab_source if vocab_source else model_params[
                'vocab_source']
            assert vocab_source == model_params[
                'vocab_source'], 'Vocab Not Match'
        if 'vocab_target' in model_params:
            vocab_target = vocab_target if vocab_target else model_params[
                'vocab_target']
            assert vocab_target == model_params[
                'vocab_target'], 'Vocab Not Match'

    # Fill vocab info of model_params
    if vocab_source:
        FLAGS.model_params['vocab_source'] = vocab_source
    if vocab_target:
        FLAGS.model_params['vocab_target'] = vocab_target

    # Create Ensemble Models
    ensemble_model = EnsembleModel(models=models, params=FLAGS.model_params)

    # Create Computation Graph
    predictions, _, _ = create_inference_graph(ensemble_model,
                                               input_pipeline_infer,
                                               FLAGS.batch_size)

    # DEBUG
    #for var in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
    #  print(var.name)

    #exit();

    # Load inference tasks
    hooks = []
    for tdict in FLAGS.tasks:
        if not "params" in tdict:
            tdict["params"] = {}
        task_cls = locate(tdict["class"]) or getattr(tasks, tdict["class"])
        task = task_cls(tdict["params"])
        hooks.append(task)

    with tf.train.MonitoredSession(hooks=hooks) as sess:

        # Run until the inputs are exhausted
        while not sess.should_stop():
            sess.run([])