def main(unused_argv):
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    hyper_params = create_hparams()

    model_fn = model.create_model_fn(hyper_params,
                                     model_impl=dual_encoder_model)

    estimator = tf.contrib.learn.Estimator(
        model_fn=model_fn,
        model_dir=MODEL_DIR,
        config=tf.contrib.learn.RunConfig(session_config=config))

    input_fn_train = inputs.create_input_fn(
        mode=tf.contrib.learn.ModeKeys.TRAIN,
        input_files=[TRAIN_FILE],
        batch_size=hyper_params.batch_size,
        num_epochs=FLAGS.num_epochs)

    input_fn_eval = inputs.create_input_fn(
        mode=tf.contrib.learn.ModeKeys.EVAL,
        input_files=[VALIDATION_FILE],
        batch_size=hyper_params.eval_batch_size,
        num_epochs=1)

    eval_metrics = metrics.create_evaluation_metrics()

    eval_monitor = tf.contrib.learn.monitors.ValidationMonitor(
        input_fn=input_fn_eval,
        every_n_steps=FLAGS.eval_every,
        metrics=eval_metrics)

    estimator.fit(input_fn=input_fn_train, steps=None, monitors=[eval_monitor])
def main(unused_arg):
    hparams = hp.create_hparams()

    model_fn = model.create_model_fn(hparams)

    estimator = tf.contrib.learn.Estimator(
        model_fn=model_fn,
        config=tf.contrib.learn.RunConfig(
            save_checkpoints_steps=FLAGS.eval_every,
            save_summary_steps=10000,
            log_step_count_steps=10000,
            model_dir=MODEL_DIR))

    input_fn_train = input.create_input_fn(
        input_files=[TRAIN_FILE_PATH],
        batch_size=hparams.batch_size,
        mode=tf.contrib.learn.ModeKeys.TRAIN,
        num_epochs=hparams.num_epochs)

    monitors_list = []

    input_fn_validation = input.create_input_fn([VALIDATION_FILE_PATH],
                                                tf.contrib.learn.ModeKeys.EVAL,
                                                hparams.eval_batch_size, 1)
    validation_monitor = tf.contrib.learn.monitors.ValidationMonitor(
        input_fn=input_fn_validation,
        every_n_steps=FLAGS.eval_every,
        metrics=metrics.create_evaluation_metrics('validation'))
    monitors_list.append(validation_monitor)

    input_fn_test = input.create_input_fn([TEST_FILE_PATH],
                                          tf.contrib.learn.ModeKeys.EVAL,
                                          hparams.eval_batch_size, 1)
    test_monitor = tf.contrib.learn.monitors.ValidationMonitor(
        input_fn=input_fn_test,
        every_n_steps=FLAGS.eval_every,
        metrics=metrics.create_evaluation_metrics('test'))
    monitors_list.append(test_monitor)

    if FLAGS.debug:
        debuger = tf_debug.LocalCLIDebugHook()
        monitors_list.append(debuger)

    input_fn_train_eval = input.create_input_fn([TRAIN_FILE_PATH],
                                                tf.contrib.learn.ModeKeys.EVAL,
                                                hparams.batch_size, 1)
    train_monitor = tf.contrib.learn.monitors.ValidationMonitor(
        input_fn=input_fn_train_eval,
        every_n_steps=FLAGS.train_eval_every,
        metrics={
            'train_accuracy':
            metrics.create_metric_spec(tf.contrib.metrics.streaming_accuracy,
                                       'predictions', None)
        })
    monitors_list.append(train_monitor)

    estimator.fit(input_fn=input_fn_train, steps=None, monitors=monitors_list)

    hp.write_hparams_to_file(hparams, MODEL_DIR)
Exemplo n.º 3
0
def main(unused_arg):
    model_fn = model.create_model_fn(hp.create_hparams())

    estimator = tf.contrib.learn.Estimator(model_fn=model_fn,
                                           model_dir=FLAGS.model_dir,
                                           config=tf.contrib.learn.RunConfig())

    input_fn = input.create_input_fn([TEST_FILE_PATH],
                                     tf.crontrib.learn.ModeKeys.EVAL,
                                     FLAGS.test_batch_size, 1)

    eval_metrics = metrics.create_evaluation_metrics()

    estimator.evaluate(input_fn=input_fn,
                       batch_size=FLAGS.test_batch_size,
                       metrics=eval_metrics,
                       steps=None)
  def _assert_outputs_for_train_eval(self, configs, mode, class_agnostic=False):
    model_config = configs['model']
    train_config = configs['train_config']
    with tf.Graph().as_default():
      if mode == tf.estimator.ModeKeys.TRAIN:
        features, labels = inputs.create_train_input_fn(
            configs['train_config'],
            configs['train_input_config'],
            configs['model'])()
        batch_size = train_config.batch_size
      else:
        features, labels = inputs.create_eval_input_fn(
            configs['eval_config'],
            configs['eval_input_config'],
            configs['model'])()
        batch_size = 1

      detection_model_fn = functools.partial(
          model_builder.build, model_config=model_config, is_training=True)

      hparams = model_hparams.create_hparams(
          hparams_overrides='load_pretrained=false')

      model_fn = model.create_model_fn(detection_model_fn, configs, hparams)
      estimator_spec = model_fn(features, labels, mode)

      self.assertIsNotNone(estimator_spec.loss)
      self.assertIsNotNone(estimator_spec.predictions)
      if class_agnostic:
        self.assertNotIn('detection_classes', estimator_spec.predictions)
      else:
        detection_classes = estimator_spec.predictions['detection_classes']
        self.assertEqual(batch_size, detection_classes.shape.as_list()[0])
        self.assertEqual(tf.float32, detection_classes.dtype)
      detection_boxes = estimator_spec.predictions['detection_boxes']
      detection_scores = estimator_spec.predictions['detection_scores']
      num_detections = estimator_spec.predictions['num_detections']
      self.assertEqual(batch_size, detection_boxes.shape.as_list()[0])
      self.assertEqual(tf.float32, detection_boxes.dtype)
      self.assertEqual(batch_size, detection_scores.shape.as_list()[0])
      self.assertEqual(tf.float32, detection_scores.dtype)
      self.assertEqual(tf.float32, num_detections.dtype)
      if mode == tf.estimator.ModeKeys.TRAIN:
        self.assertIsNotNone(estimator_spec.train_op)
      return estimator_spec
Exemplo n.º 5
0
def main(unused_argv):
    flags.mark_flag_as_required('model_dir')
    flags.mark_flag_as_required('label_map_path')
    flags.mark_flag_as_required('train_file_pattern')
    flags.mark_flag_as_required('eval_file_pattern')

    config = tf.estimator.RunConfig(model_dir=FLAGS.model_dir)
    run_config = {"label_map_path": FLAGS.label_map_path,
                  "num_classes": FLAGS.num_classes}
    if FLAGS.finetune_ckpt:
        run_config["finetune_ckpt"] = FLAGS.finetune_ckpt
    model_fn = create_model_fn(run_config)
    estimator = tf.estimator.Estimator(model_fn=model_fn, config=config)
    train_input_fn = create_input_fn(FLAGS.train_file_pattern, True, FLAGS.image_size, FLAGS.batch_size)
    eval_input_fn = create_input_fn(FLAGS.eval_file_pattern, False, FLAGS.image_size)
    prediction_fn = create_prediction_input_fn()
    train_spec, eval_spec = create_train_and_eval_specs(train_input_fn, eval_input_fn, prediction_fn, FLAGS.num_train_steps)
    tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
Exemplo n.º 6
0
def main(_):
    """Launch training"""

    # Load hyperparameters
    params = config.create_config()

    # Prepare function that will be used for loading context/utterance
    model_fn = model.create_model_fn(params, model_impl=selected_model)

    # Prepare estimator
    estimator = tf.contrib.learn.Estimator(model_fn=model_fn,
                                           model_dir=MODEL_DIR,
                                           config=tf.contrib.learn.RunConfig(
                                               gpu_memory_fraction=0.25,
                                               save_checkpoints_secs=60 * 2,
                                               keep_checkpoint_max=1,
                                               log_device_placement=False))

    # Prepare input training examples
    input_fn_train = inputs.create_input_fn(
        mode=tf.contrib.learn.ModeKeys.TRAIN,
        input_files=[TRAIN_FILE],
        batch_size=params.batch_size,
        num_epochs=FLAGS.num_epochs,
        params=params)

    # Prepare input validation examples
    input_fn_eval = inputs.create_input_fn(mode=tf.contrib.learn.ModeKeys.EVAL,
                                           input_files=[VALIDATION_FILE],
                                           batch_size=params.eval_batch_size,
                                           num_epochs=1,
                                           params=params)

    # Load recall metrics for validation
    eval_metrics = metrics.create_evaluation_metrics()

    # Prepare monitor for validation
    eval_monitor = tf.contrib.learn.monitors.ValidationMonitor(
        input_fn=input_fn_eval,
        every_n_steps=FLAGS.eval_every,
        metrics=eval_metrics)

    # Lauch training
    estimator.fit(input_fn=input_fn_train, steps=None, monitors=[eval_monitor])
Exemplo n.º 7
0
def main(unused_argv):

  # Replace MODEL_DIR with the folder current run to resume training from a set of hyperparameters
  # MODEL_DIR = '/Users/eduardolitonjua/Desktop/Retrieval-System/runs/1472130056' 
  hparams = hyperparameters.create_hparams()

  model_fn = model.create_model_fn(
    hparams,
    model_impl=dual_encoder_model)

  estimator = tf.contrib.learn.Estimator(
    model_fn=model_fn,
    model_dir=MODEL_DIR,
    config=tf.contrib.learn.RunConfig())

  input_fn_train = inputs.create_input_fn(
    mode=tf.contrib.learn.ModeKeys.TRAIN,
    input_files=[TRAIN_FILE],
    batch_size=hparams.batch_size,
    num_epochs=FLAGS.num_epochs)

  input_fn_eval = inputs.create_input_fn(
    mode=tf.contrib.learn.ModeKeys.EVAL,
    input_files=[VALIDATION_FILE],
    batch_size=hparams.eval_batch_size,
    num_epochs=1)

  eval_metrics = metrics.create_evaluation_metrics()

  class EvaluationMonitor(tf.contrib.learn.monitors.EveryN):
    def every_n_step_end(self, step, outputs):
      self._estimator.evaluate(
        input_fn=input_fn_eval,
        metrics=eval_metrics,
        steps=None)

  eval_monitor = EvaluationMonitor(every_n_steps=FLAGS.eval_every, first_n_steps=-1)
  estimator.fit(input_fn=input_fn_train, steps=None, monitors=[eval_monitor])
Exemplo n.º 8
0
def main(unused_argv):
    hparams = new_hparams.create_hparams()
    model_fn = model.create_model_fn(
            hparams, model_impl=model.birnn_att_model)

    estimator = tf.contrib.learn.Estimator(
            model_fn=model_fn,
            model_dir=MODEL_DIR,
            config=tf.contrib.learn.RunConfig())

    input_fn_train = read_data.create_input_fn(
            mode=tf.contrib.learn.ModeKeys.TRAIN,
            input_files=[TRAIN_FILE],
            batch_size=hparams.batch_size,
            num_epochs=FLAGS.num_epochs)

#    features, target= input_fn_train()
#
#    # TODO
#    probs, loss, train_op = model_fn()


    input_fn_eval = read_data.create_input_fn(
            mode=tf.contrib.learn.ModeKeys.EVAL,
            input_files=[VALIDATION_FILE],
            batch_size=hparams.eval_batch_size,
            num_epochs=1)

    eval_metrics = create_evaluation_metrics()
    eval_monitor = tf.contrib.learn.monitors.ValidationMonitor(
            input_fn=input_fn_eval,
            every_n_steps=FLAGS.eval_every,
            metrics=eval_metrics)
    # TODO: add eval_monitor metrics
    estimator.fit(
            input_fn=input_fn_train,
            steps=None,
            monitors=None)
  def _assert_outputs_for_predict(self, configs):
    model_config = configs['model']

    with tf.Graph().as_default():
      features, _ = inputs.create_eval_input_fn(
          configs['eval_config'],
          configs['eval_input_config'],
          configs['model'])()
      detection_model_fn = functools.partial(
          model_builder.build, model_config=model_config, is_training=False)

      hparams = model_hparams.create_hparams(
          hparams_overrides='load_pretrained=false')

      model_fn = model.create_model_fn(detection_model_fn, configs, hparams)
      estimator_spec = model_fn(features, None, tf.estimator.ModeKeys.PREDICT)

      self.assertIsNone(estimator_spec.loss)
      self.assertIsNone(estimator_spec.train_op)
      self.assertIsNotNone(estimator_spec.predictions)
      self.assertIsNotNone(estimator_spec.export_outputs)
      self.assertIn(tf.saved_model.signature_constants.PREDICT_METHOD_NAME,
                    estimator_spec.export_outputs)
Exemplo n.º 10
0
def main():
    hparams = network_params.create_hparams()
    if hparams.eval_batch_size % 10:
        raise ArithmeticError(
            'EVAL BATCH have to be a multiple of 10 to maintain the utterance-distractors ratio'
        )

    model_fn = model.create_model_fn(  # create the model
        hparams, model_impl=dual_encoder_model)

    estimator = tf.contrib.learn.Estimator(  # creation of the estimator for our model_fn functions
        model_fn=model_fn,  # model function
        model_dir=FLAGS.
        model_dir,  # directory to save the model paramters, graphs, etc.
        config=tf.contrib.learn.RunConfig(
            gpu_memory_fraction=FLAGS.memory_fraction, save_summary_steps=50)
    )  # specify the ammount of memory to use for the GPU

    input_fn_train = IO_data.create_input_fn(  # ensure that the model receive the data in the correct format for training
        input_files=[TRAIN_FILE],
        batch_size=hparams.batch_size,
        num_epochs=FLAGS.num_epochs
    )  # Integer specifying the number of times to read through the dataset.

    input_fn_eval = IO_data.create_input_fn(  # ensure that the model receive the data in the correct format for evaluation
        input_files=[VALIDATION_FILE],
        batch_size=hparams.eval_batch_size,
        num_epochs=1)

    eval_metrics = create_evaluation_metrics()

    eval_monitor = tf.contrib.learn.monitors.ValidationMonitor(  # creation of the monitor which evaluate the model every eval_every step during training
        input_fn=input_fn_eval,
        every_n_steps=FLAGS.eval_every,
        metrics=eval_metrics)

    estimator.fit(input_fn=input_fn_train, steps=None, monitors=[eval_monitor])
Exemplo n.º 11
0
def main(unused_arg):
    hyper_parameters = hparam.create_hparam()

    train_config = tf.contrib.learn.RunConfig(
        gpu_memory_fraction=1,
        save_summary_steps=hyper_parameters.eval_step,
        save_checkpoints_steps=hyper_parameters.eval_step,
        log_step_count_steps=1000)

    estimator = tf.estimator.Estimator(model_fn=model.create_model_fn(),
                                       model_dir=MODEL_DIR,
                                       config=train_config,
                                       params=hyper_parameters)

    monitors_list = []
    if FLAGS.debug:
        debuger = tf_debug.LocalCLIDebugHook()
        monitors_list.append(debuger)

    valid_input_fn = input.create_input_fn(tf.estimator.ModeKeys.EVAL,
                                           [VALID_FILE],
                                           hyper_parameters.eval_batch_size, 1,
                                           False)
    valid_monitor = tf.contrib.learn.monitors.ValidationMonitor(
        input_fn=valid_input_fn, every_n_steps=hyper_parameters.eval_step)
    monitors_list.append(valid_monitor)

    hooks = tf.contrib.learn.monitors.replace_monitors_with_hooks(
        monitors_list, estimator)
    train_input_fn = input.create_input_fn(tf.estimator.ModeKeys.TRAIN,
                                           [TRAIN_FILE],
                                           hyper_parameters.batch_size,
                                           hyper_parameters.num_epochs,
                                           hyper_parameters.shuffle_batch)
    estimator.train(input_fn=train_input_fn, hooks=hooks)

    hparam.write_hparams_to_file(hyper_parameters, MODEL_DIR)
Exemplo n.º 12
0
def main(unused_arg):
    eval_model_fn = model.create_model_fn()
    if not FLAGS.model_dir:
        raise KeyError()
    else:
        MODEL_DIR = FLAGS.model_dir

    hyper_parameters = hparam.create_hparam()

    estimator = tf.estimator.Estimator(eval_model_fn, model_dir=MODEL_DIR, params=hyper_parameters)

    EVAL_FILE = './data/validation.tfrecords'
    eval_input_fn = input.create_input_fn(tf.estimator.ModeKeys.EVAL,[EVAL_FILE],hyper_parameters.eval_batch_size,1,False)

    monitors_list = []
    if FLAGS.debug:
        debuger = tf_debug.LocalCLIDebugHook()
        monitors_list.append(debuger)
    hooks = tf.contrib.learn.monitors.replace_monitors_with_hooks(monitors_list, estimator)

    eval_result = estimator.evaluate(input_fn=eval_input_fn,hooks=hooks)

    print('precision: {}'.format(eval_result['precision']))
    print('recall: {}'.format(eval_result['recall']))
Exemplo n.º 13
0
tf.flags.DEFINE_string("test_file", "./data/test.tfrecords",
                       "Path of test data in TFRecords format")
tf.flags.DEFINE_string("model_dir", None,
                       "Directory to load model checkpoints from")
tf.flags.DEFINE_integer("loglevel", 20, "Tensorflow log level")
tf.flags.DEFINE_integer("test_batch_size", 16, "Batch size for testing")
FLAGS = tf.flags.FLAGS

if not FLAGS.model_dir:
    print("You must specify a model directory")
    sys.exit(1)

tf.logging.set_verbosity(FLAGS.loglevel)

if __name__ == "__main__":
    hparams = hparams.create_hparams()
    model_fn = model.create_model_fn(hparams, model_impl=dual_encoder_model)
    estimator = tf.contrib.learn.Estimator(model_fn=model_fn,
                                           model_dir=FLAGS.model_dir,
                                           config=tf.contrib.learn.RunConfig())

    input_fn_test = inputs.create_input_fn(mode=tf.contrib.learn.ModeKeys.EVAL,
                                           input_files=[FLAGS.test_file],
                                           batch_size=FLAGS.test_batch_size,
                                           num_epochs=1)

    eval_metrics = metrics.create_evaluation_metrics()
    estimator.evaluate(input_fn=input_fn_test,
                       steps=None,
                       metrics=eval_metrics)
Exemplo n.º 14
0
if FLAGS.network == "LSTM":
    selected_model = lstm
elif FLAGS.network == "DSSM":
    selected_model = dssm
else:
    selected_model = mn

tf.logging.set_verbosity(tf.logging.INFO)

if __name__ == "__main__":

    # Load hyperparameters
    hparams = config.create_config()

    # Prepare function that will be used for loading context/utterance
    model_fn = model.create_model_fn(hparams, model_impl=selected_model)

    # Prepare estimator
    estimator = tf.contrib.learn.Estimator(
        model_fn=model_fn,
        model_dir=FLAGS.model_dir,
        config=tf.contrib.learn.RunConfig(gpu_memory_fraction=0.25))

    # Prepare input testing examples
    input_fn_test = inputs.create_input_fn(mode=tf.contrib.learn.ModeKeys.EVAL,
                                           input_files=[FLAGS.test_file],
                                           batch_size=FLAGS.test_batch_size,
                                           num_epochs=1,
                                           params=hparams)

    eval_metrics = metrics.create_evaluation_metrics()
Exemplo n.º 15
0
def main(unused_argv):
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    hyper_params = create_hparams()
    print("\n\nModel hyperparameters", hyper_params)

    model_fn = model.create_model_fn(hyper_params,
                                     model_impl=dual_encoder_model)

    estimator = tf.contrib.learn.Estimator(
        model_fn=model_fn,
        model_dir=MODEL_DIR,
        config=tf.contrib.learn.RunConfig(session_config=config))

    # Training mode
    if not FLAGS.infer_mode:
        input_fn_train = inputs.create_input_fn(
            mode=tf.contrib.learn.ModeKeys.TRAIN,
            input_files=[TRAIN_FILE],
            batch_size=hyper_params.batch_size,
            num_epochs=FLAGS.num_epochs,
            has_dssm=hyper_params.dssm,
            has_lcs=hyper_params.lcs,
        )

        input_fn_eval = inputs.create_input_fn(
            mode=tf.contrib.learn.ModeKeys.EVAL,
            input_files=[VALIDATION_FILE],
            batch_size=hyper_params.eval_batch_size,
            num_epochs=1,
            has_dssm=hyper_params.dssm,
            has_lcs=hyper_params.lcs,
        )

        eval_metrics = metrics.create_evaluation_metrics()

        eval_monitor = tf.contrib.learn.monitors.ValidationMonitor(
            input_fn=input_fn_eval,
            every_n_steps=FLAGS.eval_every,
            metrics=eval_metrics,
        )

        estimator.fit(input_fn=input_fn_train,
                      steps=None,
                      monitors=[eval_monitor])
    # Testing mode
    else:
        input_fn_infer = inputs.create_input_fn(
            mode=tf.contrib.learn.ModeKeys.INFER,
            input_files=[TEST_FILE],
            batch_size=hyper_params.eval_batch_size,
            num_epochs=1,
            has_dssm=hyper_params.dssm,
            has_lcs=hyper_params.lcs,
            randomize=False)

        preds = estimator.predict(input_fn=input_fn_infer)
        i = 0
        with open(FLAGS.test_out, 'w') as f:
            for pred in preds:
                i += 1
                output_string = ",".join([("%.15f" % indi) for indi in pred])
                f.write(output_string + "\n")
                print(i)