def main(unused_argv):
    flags.mark_flag_as_required('model_dir')
    flags.mark_flag_as_required('pipeline_config_path')
    tf.config.set_soft_device_placement(True)

    if FLAGS.checkpoint_dir:
        model_lib_v2.eval_continuously(
            hparams=model_hparams.create_hparams(FLAGS.hparams_overrides),
            pipeline_config_path=FLAGS.pipeline_config_path,
            model_dir=FLAGS.model_dir,
            train_steps=FLAGS.num_train_steps,
            sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples,
            sample_1_of_n_eval_on_train_examples=(
                FLAGS.sample_1_of_n_eval_on_train_examples),
            checkpoint_dir=FLAGS.checkpoint_dir,
            wait_interval=300,
            timeout=FLAGS.eval_timeout)
    else:
        if tf.config.get_visible_devices('TPU'):
            resolver = tf.distribute.cluster_resolver.TPUClusterResolver()
            tf.config.experimental_connect_to_cluster(resolver)
            tf.tpu.experimental.initialize_tpu_system(resolver)
            strategy = tf.distribute.experimental.TPUStrategy(resolver)
        elif FLAGS.num_workers > 1:
            strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
        else:
            strategy = tf.compat.v2.distribute.MirroredStrategy()

        with strategy.scope():
            model_lib_v2.train_loop(
                hparams=model_hparams.create_hparams(FLAGS.hparams_overrides),
                pipeline_config_path=FLAGS.pipeline_config_path,
                model_dir=FLAGS.model_dir,
                train_steps=FLAGS.num_train_steps,
                use_tpu=FLAGS.use_tpu)
Beispiel #2
0
def main(unused_argv):
  flags.mark_flag_as_required('model_dir')
  flags.mark_flag_as_required('pipeline_config_path')
  config = tf.estimator.RunConfig(model_dir=FLAGS.model_dir)

  train_and_eval_dict = model_lib.create_estimator_and_inputs(
      run_config=config,
      hparams=model_hparams.create_hparams(FLAGS.hparams_overrides),
      pipeline_config_path=FLAGS.pipeline_config_path,
      train_steps=FLAGS.num_train_steps,
      eval_steps=FLAGS.num_eval_steps)
  estimator = train_and_eval_dict['estimator']
  train_input_fn = train_and_eval_dict['train_input_fn']
  eval_input_fn = train_and_eval_dict['eval_input_fn']
  eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
  predict_input_fn = train_and_eval_dict['predict_input_fn']
  train_steps = train_and_eval_dict['train_steps']
  eval_steps = train_and_eval_dict['eval_steps']

  train_spec, eval_specs = model_lib.create_train_and_eval_specs(
      train_input_fn,
      eval_input_fn,
      eval_on_train_input_fn,
      predict_input_fn,
      train_steps,
      eval_steps,
      eval_on_train_data=False)

  # Currently only a single Eval Spec is allowed.
  tf.estimator.train_and_evaluate(estimator, train_spec, eval_specs[0])
Beispiel #3
0
    def _assert_model_fn_for_predict(self, configs):
        model_config = configs['model']

        with tf.Graph().as_default():
            features, _ = _make_initializable_iterator(
                inputs.create_eval_input_fn(configs['eval_config'],
                                            configs['eval_input_config'],
                                            configs['model'])()).get_next()
            detection_model_fn = functools.partial(model_builder.build,
                                                   model_config=model_config,
                                                   is_training=False)

            hparams = model_hparams.create_hparams(
                hparams_overrides='load_pretrained=false')

            model_fn = model_lib.create_model_fn(detection_model_fn, configs,
                                                 hparams)
            estimator_spec = model_fn(features, None,
                                      tf.estimator.ModeKeys.PREDICT)

            self.assertIsNone(estimator_spec.loss)
            self.assertIsNone(estimator_spec.train_op)
            self.assertIsNotNone(estimator_spec.predictions)
            self.assertIsNotNone(estimator_spec.export_outputs)
            self.assertIn(
                tf.saved_model.signature_constants.PREDICT_METHOD_NAME,
                estimator_spec.export_outputs)
def main(unused_argv):
    flags.mark_flag_as_required('model_dir')
    flags.mark_flag_as_required('pipeline_config_path')
    config = tf.estimator.RunConfig(model_dir=FLAGS.model_dir)

    train_and_eval_dict = model_lib.create_estimator_and_inputs(
        run_config=config,
        hparams=model_hparams.create_hparams(FLAGS.hparams_overrides),
        pipeline_config_path=FLAGS.pipeline_config_path,
        train_steps=FLAGS.num_train_steps,
        eval_steps=FLAGS.num_eval_steps)
    estimator = train_and_eval_dict['estimator']
    train_input_fn = train_and_eval_dict['train_input_fn']
    eval_input_fn = train_and_eval_dict['eval_input_fn']
    eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
    predict_input_fn = train_and_eval_dict['predict_input_fn']
    train_steps = train_and_eval_dict['train_steps']
    eval_steps = train_and_eval_dict['eval_steps']

    train_spec, eval_specs = model_lib.create_train_and_eval_specs(
        train_input_fn,
        eval_input_fn,
        eval_on_train_input_fn,
        predict_input_fn,
        train_steps,
        eval_steps,
        eval_on_train_data=False)

    # Currently only a single Eval Spec is allowed.
    tf.estimator.train_and_evaluate(estimator, train_spec, eval_specs[0])
Beispiel #5
0
def train_model(model_name, num_train_steps=50000, num_eval_steps=2000):
    model_dir = os.path.join(BASE_MODEL_DIR, model_name)
    hparams_overrides = ""
    pipeline_config_path = os.path.join(model_dir, 'pipeline.config')
    eval_training_data = True

    config = tf.estimator.RunConfig(model_dir=model_dir)

    train_and_eval_dict = model_lib.create_estimator_and_inputs(
        run_config=config,
        hparams=model_hparams.create_hparams(hparams_overrides),
        pipeline_config_path=pipeline_config_path,
        train_steps=num_train_steps,
        eval_steps=num_eval_steps)

    estimator = train_and_eval_dict['estimator']
    train_input_fn = train_and_eval_dict['train_input_fn']
    eval_input_fn = train_and_eval_dict['eval_input_fn']
    eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
    predict_input_fn = train_and_eval_dict['predict_input_fn']
    train_steps = train_and_eval_dict['train_steps']
    eval_steps = train_and_eval_dict['eval_steps']

    train_spec, eval_specs = model_lib.create_train_and_eval_specs(
        train_input_fn,
        eval_input_fn,
        eval_on_train_input_fn,
        predict_input_fn,
        train_steps,
        eval_steps,
        eval_on_train_data=eval_training_data)

    tf.estimator.train_and_evaluate(estimator, train_spec, eval_specs[0])
def train_model(model, pipeline_path, training_steps=10000):

    config = tf.estimator.RunConfig(model_dir=model)

    train_and_eval_dict = model_lib.create_estimator_and_inputs(
        run_config=config,
        hparams=model_hparams.create_hparams(),
        pipeline_config_path=pipeline_path,
        train_steps=training_steps,
        sample_1_of_n_eval_examples=1,
        sample_1_of_n_eval_on_train_examples=(5))
    estimator = train_and_eval_dict['estimator']
    train_input_fn = train_and_eval_dict['train_input_fn']
    eval_input_fns = train_and_eval_dict['eval_input_fns']
    eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
    predict_input_fn = train_and_eval_dict['predict_input_fn']
    train_steps = train_and_eval_dict['train_steps']
    train_spec, eval_specs = model_lib.create_train_and_eval_specs(
        train_input_fn,
        eval_input_fns,
        eval_on_train_input_fn,
        predict_input_fn,
        train_steps,
        eval_on_train_data=False)
    # Currently only a single Eval Spec is allowed.
    print("Training starts!")
    tf.estimator.train_and_evaluate(estimator, train_spec, eval_specs[0])
    print("Training ends!")
Beispiel #7
0
    def train_new_model(self,
                        image_set,
                        hparam_overrides='',
                        num_train_steps=100,
                        num_eval_steps=30):
        if any(
                x is None for x in
            [self.ckpt_data_file, self.ckpt_index_file, self.ckpt_meta_file]):
            raise UntrainableModelException

        model_dir = object_detector_upload_to(self, '')
        config = tf.estimator.RunConfig(model_dir=model_dir)
        train_and_eval_dict = model_lib.create_estimator_and_inputs(
            run_config=config,
            hparams=model_hparams.create_hparams(hparam_overrides),
            pipeline_config_path=FLAGS.pipeline_config_path,
            train_steps=num_train_steps,
            eval_steps=num_eval_steps)
        estimator = train_and_eval_dict['estimator']
        train_input_fn = train_and_eval_dict['train_input_fn']
        eval_input_fn = train_and_eval_dict['eval_input_fn']
        eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
        predict_input_fn = train_and_eval_dict['predict_input_fn']
        train_steps = train_and_eval_dict['train_steps']
        eval_steps = train_and_eval_dict['eval_steps']
    def test_checkpoint_max_to_keep(self):
        """Test that only the most recent checkpoints are kept."""

        with mock.patch.object(model_builder, 'build',
                               autospec=True) as mock_builder:
            mock_builder.return_value = SimpleModel()

            hparams = model_hparams.create_hparams(
                hparams_overrides='load_pretrained=false')
            pipeline_config_path = get_pipeline_config_path(
                MODEL_NAME_FOR_TEST)
            config_kwarg_overrides = _get_config_kwarg_overrides()
            model_dir = tempfile.mkdtemp(dir=self.get_temp_dir())

            model_lib_v2.train_loop(hparams,
                                    pipeline_config_path,
                                    model_dir=model_dir,
                                    train_steps=20,
                                    checkpoint_every_n=2,
                                    checkpoint_max_to_keep=3,
                                    **config_kwarg_overrides)
            ckpt_files = tf.io.gfile.glob(
                os.path.join(model_dir, 'ckpt-*.index'))
            self.assertEqual(len(ckpt_files), 3,
                             '{} not of length 3.'.format(ckpt_files))
Beispiel #9
0
  def test_create_train_and_eval_specs(self):
    """Tests that `TrainSpec` and `EvalSpec` is created correctly."""
    run_config = tf.estimator.RunConfig()
    hparams = model_hparams.create_hparams(
        hparams_overrides='load_pretrained=false')
    pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST)
    train_steps = 20
    train_and_eval_dict = model_lib.create_estimator_and_inputs(
        run_config,
        hparams,
        pipeline_config_path,
        train_steps=train_steps)
    train_input_fn = train_and_eval_dict['train_input_fn']
    eval_input_fns = train_and_eval_dict['eval_input_fns']
    eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
    predict_input_fn = train_and_eval_dict['predict_input_fn']
    train_steps = train_and_eval_dict['train_steps']

    train_spec, eval_specs = model_lib.create_train_and_eval_specs(
        train_input_fn,
        eval_input_fns,
        eval_on_train_input_fn,
        predict_input_fn,
        train_steps,
        eval_on_train_data=True,
        final_exporter_name='exporter',
        eval_spec_names=['holdout'])
    self.assertEqual(train_steps, train_spec.max_steps)
    self.assertEqual(2, len(eval_specs))
    self.assertEqual(None, eval_specs[0].steps)
    self.assertEqual('holdout', eval_specs[0].name)
    self.assertEqual('exporter_holdout', eval_specs[0].exporters[0].name)
    self.assertEqual(None, eval_specs[1].steps)
    self.assertEqual('eval_on_train', eval_specs[1].name)
def train(unused_argv, model_dir, pipeline_config_path, num_train_steps,
          num_eval_steps, network_arch):

    config = tf.estimator.RunConfig(model_dir=model_dir)

    train_and_eval_dict = model_lib.create_estimator_and_inputs(
        run_config=config,
        hparams=model_hparams.create_hparams(None),
        pipeline_config_path=pipeline_config_path,
        train_steps=num_train_steps,
        eval_steps=num_eval_steps)
    estimator = train_and_eval_dict['estimator']
    train_input_fn = train_and_eval_dict['train_input_fn']
    eval_input_fn = train_and_eval_dict['eval_input_fn']
    eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
    predict_input_fn = train_and_eval_dict['predict_input_fn']
    train_steps = train_and_eval_dict['train_steps']
    eval_steps = train_and_eval_dict['eval_steps']

    train_spec, eval_specs = model_lib.create_train_and_eval_specs(
        train_input_fn,
        eval_input_fn,
        eval_on_train_input_fn,
        predict_input_fn,
        train_steps,
        eval_steps,
        eval_on_train_data=False)
    # Currently only a single Eval Spec is allowed.
    tf.estimator.train_and_evaluate(estimator, train_spec, eval_specs[0])
Beispiel #11
0
def test():
    ckpt_dir = '/sdb/tmp/users/yizt/data/20191005.80000.150000.5/output_model'
    pipe_line_config = '/sdb/tmp/users/yizt/data/20191005.80000.150000.5/pipeline.config'
    config = tf.estimator.RunConfig(model_dir=ckpt_dir)

    train_and_eval_dict = model_lib.create_estimator_and_inputs(
        run_config=config,
        hparams=model_hparams.create_hparams(None),
        sample_1_of_n_eval_examples=1,
        pipeline_config_path=pipe_line_config)
    estimator = train_and_eval_dict['estimator']

    image_dir = '/sdb/tmp/truck_crane/part1'
    image_path_list = [os.path.join(image_dir, f) for f in os.listdir(image_dir)]

    def input_fn():
        """Created batched dataset of encoded inputs."""
        ds = tf.data.Dataset.from_generator(
            input_generator, {"image": tf.float32, "true_image_shape": tf.int32},
            output_shapes={"image": tf.TensorShape([None, 480, 848, 3]), "true_image_shape": tf.TensorShape([None, 3])},
            args=[image_path_list])
        return ds

    rs = estimator.predict(input_fn, yield_single_examples=False)

    import datetime
    for i, x in enumerate(rs):
        if i % 100 == 0:
            print("============={}==============={:06d}=============".format(datetime.datetime.now(), i))
            print(x)
Beispiel #12
0
def train(job):
    config = tf.estimator.RunConfig(job.tempdir + '/model')
    train_and_eval_dict = model_lib.create_estimator_and_inputs(
        run_config=config,
        hparams=model_hparams.create_hparams(hparams_overrides),
        pipeline_config_path=job.pipeline,
        train_steps=job.num_train_steps,
        sample_1_of_n_eval_examples=sample_1_of_n_eval_examples,
        sample_1_of_n_eval_on_train_examples=(
            sample_1sample_1_of_n_eval_examples))
    logger.debug(train_and_eval_dict)
    estimator = train_and_eval_dict['estimator']
    train_input_fn = train_and_eval_dict['train_input_fn']
    eval_input_fns = train_and_eval_dict['eval_input_fns']
    eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
    predict_input_fn = train_and_eval_dict['predict_input_fn']
    train_steps = train_and_eval_dict['train_steps']

    train_spec, eval_specs = model_lib.create_train_and_eval_specs(
        train_input_fn,
        eval_input_fns,
        eval_on_train_input_fn,
        predict_input_fn,
        train_steps,
        eval_on_train_data=False)

    # Currently only a single Eval Spec is allowed.
    tf.estimator.train_and_evaluate(estimator, train_spec, eval_specs[0])
Beispiel #13
0
 def _train_and_eval(self):
     gpu_available = tf.test.is_gpu_available()
     session_config = tf.ConfigProto()
     if gpu_available:
         run_config = tf.estimator.RunConfig(
             model_dir=str(self._out_folder))
     else:
         session_config.gpu_options.allow_growth = True
         run_config = tf.estimator.RunConfig(
             model_dir=str(self._out_folder),
             session_config=session_config)
     train_and_eval_dict = model_lib.create_estimator_and_inputs(
         run_config=run_config,
         hparams=model_hparams.create_hparams(None),
         pipeline_config_path=str(self._pipeline_file),
         train_steps=self.num_steps,
         sample_1_of_n_eval_examples=1,
         sample_1_of_n_eval_on_train_examples=5)
     estimator = train_and_eval_dict['estimator']
     train_input_fn = train_and_eval_dict['train_input_fn']
     eval_input_fns = train_and_eval_dict['eval_input_fns']
     eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
     predict_input_fn = train_and_eval_dict['predict_input_fn']
     train_steps = train_and_eval_dict['train_steps']
     train_spec, eval_specs = model_lib.create_train_and_eval_specs(
         train_input_fn,
         eval_input_fns,
         eval_on_train_input_fn,
         predict_input_fn,
         train_steps,
         eval_on_train_data=False)
     tf.estimator.train_and_evaluate(estimator, train_spec, eval_specs[0])
    def test_create_train_and_eval_specs(self):
        """Tests that `TrainSpec` and `EvalSpec` is created correctly."""
        run_config = tf.estimator.RunConfig()
        hparams = model_hparams.create_hparams(
            hparams_overrides='load_pretrained=false')
        pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST)
        train_steps = 20
        train_and_eval_dict = model_lib.create_estimator_and_inputs(
            run_config, hparams, pipeline_config_path, train_steps=train_steps)
        train_input_fn = train_and_eval_dict['train_input_fn']
        eval_input_fns = train_and_eval_dict['eval_input_fns']
        eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
        predict_input_fn = train_and_eval_dict['predict_input_fn']
        train_steps = train_and_eval_dict['train_steps']

        train_spec, eval_specs = model_lib.create_train_and_eval_specs(
            train_input_fn,
            eval_input_fns,
            eval_on_train_input_fn,
            predict_input_fn,
            train_steps,
            eval_on_train_data=True,
            final_exporter_name='exporter',
            eval_spec_names=['holdout'])
        self.assertEqual(train_steps, train_spec.max_steps)
        self.assertEqual(2, len(eval_specs))
        self.assertEqual(None, eval_specs[0].steps)
        self.assertEqual('holdout', eval_specs[0].name)
        self.assertEqual('exporter_holdout', eval_specs[0].exporters[0].name)
        self.assertEqual(None, eval_specs[1].steps)
        self.assertEqual('eval_on_train', eval_specs[1].name)
  def test_train_loop_then_eval_loop(self):
    """Tests that Estimator and input function are constructed correctly."""
    hparams = model_hparams.create_hparams(
        hparams_overrides='load_pretrained=false')
    pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST)
    config_kwarg_overrides = _get_config_kwarg_overrides()
    model_dir = tf.test.get_temp_dir()

    train_steps = 2
    model_lib_v2.train_loop(
        hparams,
        pipeline_config_path,
        model_dir=model_dir,
        train_steps=train_steps,
        checkpoint_every_n=1,
        **config_kwarg_overrides)

    model_lib_v2.eval_continuously(
        hparams,
        pipeline_config_path,
        model_dir=model_dir,
        checkpoint_dir=model_dir,
        train_steps=train_steps,
        wait_interval=10,
        **config_kwarg_overrides)
Beispiel #16
0
def main(unused_argv):
    params = {}
    params["PIPELINE_FILE"] = 'pipeline.config'
    params["MODEL_DIR"] = 'model.ckpt'  #output directory
    params["NUM_STEPS"] = int(os.getenv("CK_NUM_STEPS", '1'))
    params["EVAL_TRAIN_DATA"] = os.getenv("CK_EVAL_TRAIN_DATA", False)
    params["SAMPLE_1_OF_N_EVAL_EXAMPLES"] = int(
        os.getenv("CK_SAMPLE_1_OF_N_EVAL_EXAMPLES", 1))
    params["SAMPLE_1_OF_N_TRAIN_EXAMPLES"] = int(
        os.getenv("CK_SAMPLE_1_OF_N_TRAIN_EXAMPLES", 5))
    params["HYPERPARAMS_OVERRIDE"] = os.getenv("CK_HYPERPARAMS_OVERRIDE", None)
    params["CHECKPOINT_DIR"] = os.getenv("CK_CHECKPOINT_DIR", None)
    params["RUN_ONCE"] = os.getenv("CK_RUN_ONCE", None)
    #flags.mark_flag_as_required('model_dir')
    #flags.mark_flag_as_required('pipeline_config_path')
    config = tf.estimator.RunConfig(params["MODEL_DIR"])

    train_and_eval_dict = model_lib.create_estimator_and_inputs(
        run_config=config,
        hparams=model_hparams.create_hparams(params["HYPERPARAMS_OVERRIDE"]),
        pipeline_config_path=params["PIPELINE_FILE"],
        train_steps=params["NUM_STEPS"],
        sample_1_of_n_eval_examples=params["SAMPLE_1_OF_N_EVAL_EXAMPLES"],
        sample_1_of_n_eval_on_train_examples=(
            params["SAMPLE_1_OF_N_TRAIN_EXAMPLES"]))
    estimator = train_and_eval_dict['estimator']
    train_input_fn = train_and_eval_dict['train_input_fn']
    eval_input_fns = train_and_eval_dict['eval_input_fns']
    eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
    predict_input_fn = train_and_eval_dict['predict_input_fn']
    train_steps = train_and_eval_dict['train_steps']

    if params["CHECKPOINT_DIR"]:
        if params["EVAL_TRAIN_DATA"]:
            name = 'training_data'
            input_fn = eval_on_train_input_fn
        else:
            name = 'validation_data'
            # The first eval input will be evaluated.
            input_fn = eval_input_fns[0]
        if params["RUN_ONCE"]:
            estimator.evaluate(input_fn,
                               steps=None,
                               checkpoint_path=tf.train.latest_checkpoint(
                                   params["CHECKPOINT_DIR"]))
        else:
            model_lib.continuous_eval(estimator, params["CHECKPOINT_DIR"],
                                      input_fn, train_steps, name)
    else:
        train_spec, eval_specs = model_lib.create_train_and_eval_specs(
            train_input_fn,
            eval_input_fns,
            eval_on_train_input_fn,
            predict_input_fn,
            train_steps,
            eval_on_train_data=False)

        # Currently only a single Eval Spec is allowed.
        tf.estimator.train_and_evaluate(estimator, train_spec, eval_specs[0])
Beispiel #17
0
    def _assert_model_fn_for_train_eval(self,
                                        configs,
                                        mode,
                                        class_agnostic=False):
        model_config = configs['model']
        train_config = configs['train_config']
        with tf.Graph().as_default():
            if mode == 'train_bkp':
                features, labels = inputs.create_train_input_fn(
                    configs['train_config'], configs['train_input_config'],
                    configs['model'])()
                model_mode = tf.estimator.ModeKeys.TRAIN
                batch_size = train_config.batch_size
            elif mode == 'eval_bkp':
                features, labels = inputs.create_eval_input_fn(
                    configs['eval_config'], configs['eval_input_config'],
                    configs['model'])()
                model_mode = tf.estimator.ModeKeys.EVAL
                batch_size = 1
            elif mode == 'eval_on_train':
                features, labels = inputs.create_eval_input_fn(
                    configs['eval_config'], configs['train_input_config'],
                    configs['model'])()
                model_mode = tf.estimator.ModeKeys.EVAL
                batch_size = 1

            detection_model_fn = functools.partial(model_builder.build,
                                                   model_config=model_config,
                                                   is_training=True)

            hparams = model_hparams.create_hparams(
                hparams_overrides='load_pretrained=false')

            model_fn = model_lib.create_model_fn(detection_model_fn, configs,
                                                 hparams)
            estimator_spec = model_fn(features, labels, model_mode)

            self.assertIsNotNone(estimator_spec.loss)
            self.assertIsNotNone(estimator_spec.predictions)
            if class_agnostic:
                self.assertNotIn('detection_classes',
                                 estimator_spec.predictions)
            else:
                detection_classes = estimator_spec.predictions[
                    'detection_classes']
                self.assertEqual(batch_size,
                                 detection_classes.shape.as_list()[0])
                self.assertEqual(tf.float32, detection_classes.dtype)
            detection_boxes = estimator_spec.predictions['detection_boxes']
            detection_scores = estimator_spec.predictions['detection_scores']
            num_detections = estimator_spec.predictions['num_detections']
            self.assertEqual(batch_size, detection_boxes.shape.as_list()[0])
            self.assertEqual(tf.float32, detection_boxes.dtype)
            self.assertEqual(batch_size, detection_scores.shape.as_list()[0])
            self.assertEqual(tf.float32, detection_scores.dtype)
            self.assertEqual(tf.float32, num_detections.dtype)
            if model_mode == tf.estimator.ModeKeys.TRAIN:
                self.assertIsNotNone(estimator_spec.train_op)
            return estimator_spec
Beispiel #18
0
  def _assert_model_fn_for_train_eval(self, configs, mode,
                                      class_agnostic=False):
    model_config = configs['model']
    train_config = configs['train_config']
    with tf.Graph().as_default():
      if mode == 'train':
        features, labels = _make_initializable_iterator(
            inputs.create_train_input_fn(configs['train_config'],
                                         configs['train_input_config'],
                                         configs['model'])()).get_next()
        model_mode = tf.estimator.ModeKeys.TRAIN
        batch_size = train_config.batch_size
      elif mode == 'eval':
        features, labels = _make_initializable_iterator(
            inputs.create_eval_input_fn(configs['eval_config'],
                                        configs['eval_input_config'],
                                        configs['model'])()).get_next()
        model_mode = tf.estimator.ModeKeys.EVAL
        batch_size = 1
      elif mode == 'eval_on_train':
        features, labels = _make_initializable_iterator(
            inputs.create_eval_input_fn(configs['eval_config'],
                                        configs['train_input_config'],
                                        configs['model'])()).get_next()
        model_mode = tf.estimator.ModeKeys.EVAL
        batch_size = 1

      detection_model_fn = functools.partial(
          model_builder.build, model_config=model_config, is_training=True)

      hparams = model_hparams.create_hparams(
          hparams_overrides='load_pretrained=false')

      model_fn = model_lib.create_model_fn(detection_model_fn, configs, hparams)
      estimator_spec = model_fn(features, labels, model_mode)

      self.assertIsNotNone(estimator_spec.loss)
      self.assertIsNotNone(estimator_spec.predictions)
      if mode == 'eval' or mode == 'eval_on_train':
        if class_agnostic:
          self.assertNotIn('detection_classes', estimator_spec.predictions)
        else:
          detection_classes = estimator_spec.predictions['detection_classes']
          self.assertEqual(batch_size, detection_classes.shape.as_list()[0])
          self.assertEqual(tf.float32, detection_classes.dtype)
        detection_boxes = estimator_spec.predictions['detection_boxes']
        detection_scores = estimator_spec.predictions['detection_scores']
        num_detections = estimator_spec.predictions['num_detections']
        self.assertEqual(batch_size, detection_boxes.shape.as_list()[0])
        self.assertEqual(tf.float32, detection_boxes.dtype)
        self.assertEqual(batch_size, detection_scores.shape.as_list()[0])
        self.assertEqual(tf.float32, detection_scores.dtype)
        self.assertEqual(tf.float32, num_detections.dtype)
        if mode == 'eval':
          self.assertIn('Detections_Left_Groundtruth_Right/0',
                        estimator_spec.eval_metric_ops)
      if model_mode == tf.estimator.ModeKeys.TRAIN:
        self.assertIsNotNone(estimator_spec.train_op)
      return estimator_spec
Beispiel #19
0
def main(unused_argv):
    tf.flags.mark_flag_as_required('model_dir')
    tf.flags.mark_flag_as_required('pipeline_config_path')
    config = tf.contrib.learn.RunConfig(model_dir=FLAGS.model_dir)
    learn_runner.run(experiment_fn=build_experiment_fn(FLAGS.num_train_steps,
                                                       FLAGS.num_eval_steps),
                     run_config=config,
                     hparams=model_hparams.create_hparams())
Beispiel #20
0
def main(unused_argv):
  tf.flags.mark_flag_as_required('model_dir')
  tf.flags.mark_flag_as_required('pipeline_config_path')
  config = tf.contrib.learn.RunConfig(model_dir=FLAGS.model_dir)
  learn_runner.run(
      experiment_fn=build_experiment_fn(FLAGS.num_train_steps,
                                        FLAGS.num_eval_steps),
      run_config=config,
      hparams=model_hparams.create_hparams())
Beispiel #21
0
def main(unused_argv):
  flags.mark_flag_as_required('model_dir')
  flags.mark_flag_as_required('pipeline_config_path')

  tpu_cluster_resolver = (
      tf.contrib.cluster_resolver.TPUClusterResolver(
          tpu=[FLAGS.tpu_name],
          zone=FLAGS.tpu_zone,
          project=FLAGS.gcp_project))
  tpu_grpc_url = tpu_cluster_resolver.get_master()

  config = tf.contrib.tpu.RunConfig(
      master=tpu_grpc_url,
      evaluation_master=tpu_grpc_url,
      model_dir=FLAGS.model_dir,
      tpu_config=tf.contrib.tpu.TPUConfig(
          iterations_per_loop=FLAGS.iterations_per_loop,
          num_shards=FLAGS.num_shards))

  kwargs = {}
  if FLAGS.train_batch_size:
    kwargs['batch_size'] = FLAGS.train_batch_size

  train_and_eval_dict = model_lib.create_estimator_and_inputs(
      run_config=config,
      hparams=model_hparams.create_hparams(FLAGS.hparams_overrides),
      pipeline_config_path=FLAGS.pipeline_config_path,
      train_steps=FLAGS.num_train_steps,
      sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples,
      sample_1_of_n_eval_on_train_examples=(
          FLAGS.sample_1_of_n_eval_on_train_examples),
      use_tpu_estimator=True,
      use_tpu=FLAGS.use_tpu,
      num_shards=FLAGS.num_shards,
      save_final_config=FLAGS.mode == 'train',
      **kwargs)
  estimator = train_and_eval_dict['estimator']
  train_input_fn = train_and_eval_dict['train_input_fn']
  eval_input_fns = train_and_eval_dict['eval_input_fns']
  eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
  train_steps = train_and_eval_dict['train_steps']

  if FLAGS.mode == 'train':
    estimator.train(input_fn=train_input_fn, max_steps=train_steps)

  # Continuously evaluating.
  if FLAGS.mode == 'eval':
    if FLAGS.eval_training_data:
      name = 'training_data'
      input_fn = eval_on_train_input_fn
    else:
      name = 'validation_data'
      # Currently only a single eval input is allowed.
      input_fn = eval_input_fns[0]
    model_lib.continuous_eval(estimator, FLAGS.model_dir, input_fn, train_steps,
                              name)
Beispiel #22
0
def train_and_eval(rpn_type=None,
                   filter_fn_arg=None,
                   replace_rpn_arg=None,
                   number_of_stages=None):
    flags.mark_flag_as_required('model_dir')
    flags.mark_flag_as_required('pipeline_config_path')
    config = tf.estimator.RunConfig(model_dir=FLAGS.model_dir,
                                    save_checkpoints_steps=10000)

    train_and_eval_dict = model_lib.create_estimator_and_inputs(
        run_config=config,
        hparams=model_hparams.create_hparams(FLAGS.hparams_overrides),
        pipeline_config_path=FLAGS.pipeline_config_path,
        train_steps=FLAGS.num_train_steps,
        sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples,
        sample_1_of_n_eval_on_train_examples=(
            FLAGS.sample_1_of_n_eval_on_train_examples),
        rpn_type=rpn_type,
        filter_fn_arg=filter_fn_arg,
        replace_rpn_arg=replace_rpn_arg,
        number_of_stages=number_of_stages)
    estimator = train_and_eval_dict['estimator']
    train_input_fn = train_and_eval_dict['train_input_fn']
    eval_input_fns = train_and_eval_dict['eval_input_fns']
    eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
    predict_input_fn = train_and_eval_dict['predict_input_fn']
    train_steps = train_and_eval_dict['train_steps']

    if FLAGS.checkpoint_dir:
        if FLAGS.eval_training_data:
            name = 'training_data'
            input_fn = eval_on_train_input_fn
        else:
            name = 'validation_data'
            # The first eval input will be evaluated.
            input_fn = eval_input_fns[0]
        if FLAGS.run_once:
            estimator.evaluate(input_fn,
                               steps=None,
                               checkpoint_path=tf.train.latest_checkpoint(
                                   FLAGS.checkpoint_dir))
        else:
            model_lib.continuous_eval(estimator, FLAGS.checkpoint_dir,
                                      input_fn, train_steps, name)
    else:
        train_spec, eval_specs = model_lib.create_train_and_eval_specs(
            train_input_fn,
            eval_input_fns,
            eval_on_train_input_fn,
            predict_input_fn,
            train_steps,
            eval_on_train_data=False)

        # Currently only a single Eval Spec is allowed.
        tf.estimator.train_and_evaluate(estimator, train_spec, eval_specs[0])
    pass
def main(unused_argv):
  flags.mark_flag_as_required('model_dir')
  flags.mark_flag_as_required('pipeline_config_path')

  tpu_cluster_resolver = (
      tf.contrib.cluster_resolver.TPUClusterResolver(
          tpu=[FLAGS.tpu_name],
          zone=FLAGS.tpu_zone,
          project=FLAGS.gcp_project))
  tpu_grpc_url = tpu_cluster_resolver.get_master()

  config = tf.contrib.tpu.RunConfig(
      master=tpu_grpc_url,
      evaluation_master=tpu_grpc_url,
      model_dir=FLAGS.model_dir,
      tpu_config=tf.contrib.tpu.TPUConfig(
          iterations_per_loop=FLAGS.iterations_per_loop,
          num_shards=FLAGS.num_shards))

  kwargs = {}
  if FLAGS.train_batch_size:
    kwargs['batch_size'] = FLAGS.train_batch_size

  train_and_eval_dict = model_lib.create_estimator_and_inputs(
      run_config=config,
      hparams=model_hparams.create_hparams(FLAGS.hparams_overrides),
      pipeline_config_path=FLAGS.pipeline_config_path,
      train_steps=FLAGS.num_train_steps,
      sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples,
      sample_1_of_n_eval_on_train_examples=(
          FLAGS.sample_1_of_n_eval_on_train_examples),
      use_tpu_estimator=True,
      use_tpu=FLAGS.use_tpu,
      num_shards=FLAGS.num_shards,
      save_final_config=FLAGS.mode == 'train',
      **kwargs)
  estimator = train_and_eval_dict['estimator']
  train_input_fn = train_and_eval_dict['train_input_fn']
  eval_input_fns = train_and_eval_dict['eval_input_fns']
  eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
  train_steps = train_and_eval_dict['train_steps']

  if FLAGS.mode == 'train':
    estimator.train(input_fn=train_input_fn, max_steps=train_steps)

  # Continuously evaluating.
  if FLAGS.mode == 'eval':
    if FLAGS.eval_training_data:
      name = 'training_data'
      input_fn = eval_on_train_input_fn
    else:
      name = 'validation_data'
      # Currently only a single eval input is allowed.
      input_fn = eval_input_fns[0]
    model_lib.continuous_eval(estimator, FLAGS.model_dir, input_fn, train_steps,
                              name)
Beispiel #24
0
def BuildExperiment():
  """Builds an Experiment object for testing purposes."""
  run_config = tf.contrib.learn.RunConfig()
  hparams = model_hparams.create_hparams(
      hparams_overrides='load_pretrained=false')

  # pylint: disable=protected-access
  experiment_fn = model._build_experiment_fn(10, 10)
  # pylint: enable=protected-access
  return experiment_fn(run_config, hparams)
def BuildExperiment():
    """Builds an Experiment object for testing purposes."""
    run_config = tf.contrib.learn.RunConfig()
    hparams = model_hparams.create_hparams(
        hparams_overrides='load_pretrained=false')

    # pylint: disable=protected-access
    experiment_fn = model.build_experiment_fn(10, 10)
    # pylint: enable=protected-access
    return experiment_fn(run_config, hparams)
Beispiel #26
0
def main(unused_argv):
    flags.mark_flag_as_required('model_dir')
    flags.mark_flag_as_required('pipeline_config_path')
    config = tf.estimator.RunConfig(
        model_dir=FLAGS.model_dir,
        keep_checkpoint_max=FLAGS.keep_checkpoint_max,
        save_checkpoints_steps=FLAGS.save_checkpoints_steps,
        log_step_count_steps=FLAGS.log_step_count_steps)

    train_and_eval_dict = model_lib.create_estimator_and_inputs(
        run_config=config,
        hparams=model_hparams.create_hparams(FLAGS.hparams_overrides),
        pipeline_config_path=FLAGS.pipeline_config_path,
        train_steps=FLAGS.num_train_steps,
        sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples,
        sample_1_of_n_eval_on_train_examples=(
            FLAGS.sample_1_of_n_eval_on_train_examples))
    estimator = train_and_eval_dict['estimator']
    train_input_fn = train_and_eval_dict['train_input_fn']
    eval_input_fns = train_and_eval_dict['eval_input_fns']
    eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
    predict_input_fn = train_and_eval_dict['predict_input_fn']
    train_steps = train_and_eval_dict['train_steps']

    if FLAGS.checkpoint_dir:
        if FLAGS.eval_training_data:
            name = 'training_data'
            input_fn = eval_on_train_input_fn
        else:
            name = 'validation_data'
            # The first eval input will be evaluated.
            input_fn = eval_input_fns[0]
        if FLAGS.run_once:
            estimator.evaluate(input_fn,
                               steps=None,
                               checkpoint_path=tf.train.latest_checkpoint(
                                   FLAGS.checkpoint_dir))
        else:
            model_lib.continuous_eval(estimator, FLAGS.checkpoint_dir,
                                      input_fn, train_steps, name,
                                      FLAGS.max_eval_retries)
    else:
        train_spec, eval_specs = model_lib.create_train_and_eval_specs(
            train_input_fn,
            eval_input_fns,
            eval_on_train_input_fn,
            predict_input_fn,
            train_steps,
            num_eval_steps=FLAGS.num_eval_steps,
            eval_throttle_secs=FLAGS.eval_throttle_secs,
            eval_start_delay_secs=FLAGS.eval_start_delay_secs,
            eval_on_train_data=False)

        # Currently only a single Eval Spec is allowed.
        tf.estimator.train_and_evaluate(estimator, train_spec, eval_specs[0])
Beispiel #27
0
def main(unused_argv):
    flags.mark_flag_as_required('model_dir')
    flags.mark_flag_as_required('pipeline_config_path')
    config = tf.estimator.RunConfig(model_dir=FLAGS.model_dir)

    train_and_eval_dict = model_lib.create_estimator_and_inputs(
        run_config=config,
        hparams=model_hparams.create_hparams(FLAGS.hparams_overrides),
        pipeline_config_path=FLAGS.pipeline_config_path,
        train_steps=FLAGS.num_train_steps,
        eval_steps=FLAGS.num_eval_steps)
    estimator = train_and_eval_dict['estimator']
    train_input_fn = train_and_eval_dict['train_input_fn']
    eval_input_fn = train_and_eval_dict['eval_input_fn']
    eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
    predict_input_fn = train_and_eval_dict['predict_input_fn']
    train_steps = train_and_eval_dict['train_steps']
    eval_steps = train_and_eval_dict['eval_steps']

    print('train_and_eval_dict: ' + str(train_and_eval_dict))

    if FLAGS.checkpoint_dir:
        print('enter into "FLAGS.checkpoint_dir:"')
        if FLAGS.eval_training_data:
            print('enter into "if FLAGS.eval_training_data:"')
            name = 'training_data'
            input_fn = eval_on_train_input_fn
        else:
            print('enter into else of "if FLAGS.eval_training_data:"')
            name = 'validation_data'
            input_fn = eval_input_fn
        if FLAGS.run_once:
            print('enter into "if FLAGS.run_once:"')
            estimator.evaluate(input_fn,
                               eval_steps,
                               checkpoint_path=tf.train.latest_checkpoint(
                                   FLAGS.checkpoint_dir))
        else:
            print('enter into else of "if FLAGS.run_once:"')
            model_lib.continuous_eval(estimator, FLAGS.model_dir, input_fn,
                                      eval_steps, train_steps, name)
    else:
        print('enter into else of "FLAGS.checkpoint_dir:"')
        train_spec, eval_specs = model_lib.create_train_and_eval_specs(
            train_input_fn,
            eval_input_fn,
            eval_on_train_input_fn,
            predict_input_fn,
            train_steps,
            eval_steps,
            eval_on_train_data=False)

        # Currently only a single Eval Spec is allowed.
        tf.logging.set_verbosity(tf.logging.INFO)
        tf.estimator.train_and_evaluate(estimator, train_spec, eval_specs[0])
def main(unused_argv):
    flags.mark_flag_as_required('model_dir')
    flags.mark_flag_as_required('pipeline_config_path')

    tf.logging.set_verbosity(tf.logging.INFO)

    #session_config = tf.ConfigProto()
    #session_config.gpu_options.per_process_gpu_memory_fraction = 0.9
    #session_config.gpu_options.allow_growth = True

    #config = tf.estimator.RunConfig(model_dir=FLAGS.model_dir, session_config=session_config)
    config = tf.estimator.RunConfig(model_dir=FLAGS.model_dir)

    train_and_eval_dict = model_lib.create_estimator_and_inputs(
        run_config=config,
        hparams=model_hparams.create_hparams(FLAGS.hparams_overrides),
        pipeline_config_path=FLAGS.pipeline_config_path,
        train_steps=FLAGS.num_train_steps,
        sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples,
        sample_1_of_n_eval_on_train_examples=(
            FLAGS.sample_1_of_n_eval_on_train_examples))
    estimator = train_and_eval_dict['estimator']
    train_input_fn = train_and_eval_dict['train_input_fn']
    eval_input_fns = train_and_eval_dict['eval_input_fns']
    eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
    predict_input_fn = train_and_eval_dict['predict_input_fn']
    train_steps = train_and_eval_dict['train_steps']

    if FLAGS.checkpoint_dir:
        if FLAGS.eval_training_data:
            name = 'training_data'
            input_fn = eval_on_train_input_fn
        else:
            name = 'validation_data'
            # The first eval input will be evaluated.
            input_fn = eval_input_fns[0]
        if FLAGS.run_once:
            estimator.evaluate(input_fn,
                               num_eval_steps=None,
                               checkpoint_path=tf.train.latest_checkpoint(
                                   FLAGS.checkpoint_dir))
        else:
            model_lib.continuous_eval(estimator, FLAGS.checkpoint_dir,
                                      input_fn, train_steps, name)
    else:
        train_spec, eval_specs = model_lib.create_train_and_eval_specs(
            train_input_fn,
            eval_input_fns,
            eval_on_train_input_fn,
            predict_input_fn,
            train_steps,
            eval_on_train_data=False)

        # Currently only a single Eval Spec is allowed.
        tf.estimator.train_and_evaluate(estimator, train_spec, eval_specs[0])
def main(unused_argv):
    #There are two arguments that must be provided.
    flags.mark_flag_as_required('model_dir')
    flags.mark_flag_as_required('pipeline_config_path')
    #The variable 'config' contains instructions on saving checkpoints and
    #general training info.
    config = tf.estimator.RunConfig(model_dir=FLAGS.model_dir,
                                    log_step_count_steps=10,
                                    save_checkpoints_steps=300)

    # the 'create_estimator_and_inputs' function
    train_and_eval_dict = model_lib.create_estimator_and_inputs(
        run_config=config,
        hparams=model_hparams.create_hparams(
            FLAGS.hparams_overrides),  #this is
        # worth paying attention to, considering that I will need to do some tuning.
        pipeline_config_path=FLAGS.pipeline_config_path,
        train_steps=FLAGS.num_train_steps,
        sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples,
        sample_1_of_n_eval_on_train_examples=(
            FLAGS.sample_1_of_n_eval_on_train_examples))
    estimator = train_and_eval_dict['estimator']
    train_input_fn = train_and_eval_dict['train_input_fn']
    eval_input_fns = train_and_eval_dict['eval_input_fns']
    eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
    predict_input_fn = train_and_eval_dict['predict_input_fn']
    train_steps = train_and_eval_dict['train_steps']

    if FLAGS.checkpoint_dir:
        if FLAGS.eval_training_data:
            name = 'training_data'
            input_fn = eval_on_train_input_fn
        else:
            name = 'validation_data'
            # The first eval input will be evaluated.
            input_fn = eval_input_fns[0]
        if FLAGS.run_once:
            estimator.evaluate(input_fn,
                               checkpoint_path=tf.train.latest_checkpoint(
                                   FLAGS.checkpoint_dir))
        else:
            model_lib.continuous_eval(estimator, FLAGS.checkpoint_dir,
                                      input_fn, train_steps, name)
    else:
        train_spec, eval_specs = model_lib.create_train_and_eval_specs(
            train_input_fn,
            eval_input_fns,
            eval_on_train_input_fn,
            predict_input_fn,
            train_steps,
            eval_on_train_data=False)

        # Currently only a single Eval Spec is allowed.
        tf.estimator.train_and_evaluate(estimator, train_spec, eval_specs[0])
Beispiel #30
0
def main(unused_argv):
    flags.mark_flag_as_required("model_dir")
    flags.mark_flag_as_required("pipeline_config_path")
    config = tf.estimator.RunConfig(
        model_dir=FLAGS.model_dir, log_step_count_steps=FLAGS.log_step_count_steps
    )

    train_and_eval_dict = model_lib.create_estimator_and_inputs(
        run_config=config,
        hparams=model_hparams.create_hparams(FLAGS.hparams_overrides),
        pipeline_config_path=FLAGS.pipeline_config_path,
        train_steps=FLAGS.num_train_steps,
        sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples,
        sample_1_of_n_eval_on_train_examples=(
            FLAGS.sample_1_of_n_eval_on_train_examples
        ),
    )
    estimator = train_and_eval_dict["estimator"]
    train_input_fn = train_and_eval_dict["train_input_fn"]
    eval_input_fns = train_and_eval_dict["eval_input_fns"]
    eval_on_train_input_fn = train_and_eval_dict["eval_on_train_input_fn"]
    predict_input_fn = train_and_eval_dict["predict_input_fn"]
    train_steps = train_and_eval_dict["train_steps"]

    if FLAGS.checkpoint_dir:
        if FLAGS.eval_training_data:
            name = "training_data"
            input_fn = eval_on_train_input_fn
        else:
            name = "validation_data"
            # The first eval input will be evaluated.
            input_fn = eval_input_fns[0]
        if FLAGS.run_once:
            estimator.evaluate(
                input_fn,
                steps=None,
                checkpoint_path=tf.train.latest_checkpoint(FLAGS.checkpoint_dir),
            )
        else:
            model_lib.continuous_eval(
                estimator, FLAGS.checkpoint_dir, input_fn, train_steps, name
            )
    else:
        train_spec, eval_specs = model_lib.create_train_and_eval_specs(
            train_input_fn,
            eval_input_fns,
            eval_on_train_input_fn,
            predict_input_fn,
            train_steps,
            eval_on_train_data=False,
        )

        # Currently only a single Eval Spec is allowed.
        tf.estimator.train_and_evaluate(estimator, train_spec, eval_specs[0])
Beispiel #31
0
def build_estimator():
    session_config = tf.ConfigProto()
    config = tf.estimator.RunConfig(session_config=session_config)

    train_and_eval_dict = model_lib.create_estimator_and_inputs(
        run_config=config,
        hparams=model_hparams.create_hparams(None),
        pipeline_config_path=FLAGS.pipeline_config_path)
    estimator = train_and_eval_dict['estimator']
    eval_input_fns = train_and_eval_dict['eval_input_fns']
    return estimator, eval_input_fns[0]
Beispiel #32
0
def main(unused_argv):
    pipeline_config_path = "D:\\project3_faster_rcnn\\models-master\\research\\hat_dataset\\hat_resnet50_config.config"
    model_dir = "D:\\project3_faster_rcnn\\models-master\\research\\hat_dataset\\checkpoints\\"
    config = tf.estimator.RunConfig(model_dir=model_dir)

    train_and_eval_dict = model_lib.create_estimator_and_inputs(
        run_config=config,
        hparams=model_hparams.create_hparams(FLAGS.hparams_overrides),
        pipeline_config_path=pipeline_config_path,
        train_steps=FLAGS.num_train_steps,
        sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples,
        sample_1_of_n_eval_on_train_examples=(
            FLAGS.sample_1_of_n_eval_on_train_examples))
    estimator = train_and_eval_dict['estimator']
    train_input_fn = train_and_eval_dict['train_input_fn']
    eval_input_fns = train_and_eval_dict['eval_input_fns']
    eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
    predict_input_fn = train_and_eval_dict['predict_input_fn']
    train_steps = train_and_eval_dict['train_steps']

    config = tf.ConfigProto(gpu_options=tf.GPUOptions(
        per_process_gpu_memory_fraction=0.9))
    config.gpu_options.allow_growth = True
    session = tf.Session(config=config)
    set_session(session)

    if FLAGS.checkpoint_dir:
        if FLAGS.eval_training_data:
            name = 'training_data'
            input_fn = eval_on_train_input_fn
        else:
            name = 'validation_data'
            # The first eval input will be evaluated.
            input_fn = eval_input_fns[0]
        if FLAGS.run_once:
            estimator.evaluate(input_fn,
                               num_eval_steps=None,
                               checkpoint_path=tf.train.latest_checkpoint(
                                   FLAGS.checkpoint_dir))
        else:
            model_lib.continuous_eval(estimator, FLAGS.checkpoint_dir,
                                      input_fn, train_steps, name)
    else:
        train_spec, eval_specs = model_lib.create_train_and_eval_specs(
            train_input_fn,
            eval_input_fns,
            eval_on_train_input_fn,
            predict_input_fn,
            train_steps,
            eval_on_train_data=False)

        # Currently only a single Eval Spec is allowed.
        tf.estimator.train_and_evaluate(estimator, train_spec, eval_specs[0])
Beispiel #33
0
def main(unused_argv):
    flags.mark_flag_as_required('model_dir')
    flags.mark_flag_as_required('pipeline_config_path')
    #config = tf.estimator.RunConfig(model_dir=FLAGS.model_dir)
    config = config_util.get_configs_from_pipeline_file(
        pipeline_config_path=FLAGS.pipeline_config_path, config_override=None)
    train_config = config['train_config']

    run_config = create_run_config(train_config=train_config)
    train_and_eval_dict = model_lib_multiGPU.create_estimator_and_inputs(
        run_config=run_config,
        hparams=model_hparams.create_hparams(FLAGS.hparams_overrides),
        pipeline_config_path=FLAGS.pipeline_config_path,
        train_steps=FLAGS.num_train_steps,
        sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples,
        sample_1_of_n_eval_on_train_examples=(
            FLAGS.sample_1_of_n_eval_on_train_examples))
    estimator = train_and_eval_dict['estimator']
    train_input_fn = train_and_eval_dict['train_input_fn']
    eval_input_fns = train_and_eval_dict['eval_input_fns']
    eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
    predict_input_fn = train_and_eval_dict['predict_input_fn']
    train_steps = train_and_eval_dict['train_steps']

    if FLAGS.checkpoint_dir:
        if FLAGS.eval_training_data:
            name = 'training_data'
            input_fn = eval_on_train_input_fn
        else:
            name = 'validation_data'
            # The first eval input will be evaluated.
            input_fn = eval_input_fns[0]
        if FLAGS.run_once:
            estimator.evaluate(input_fn,
                               num_eval_steps=None,
                               checkpoint_path=tf.train.latest_checkpoint(
                                   FLAGS.checkpoint_dir))
        else:
            model_lib_multiGPU.continuous_eval(estimator, FLAGS.checkpoint_dir,
                                               input_fn, train_steps, name)
    else:
        train_spec, eval_specs = model_lib_multiGPU.create_train_and_eval_specs(
            train_input_fn,
            eval_input_fns,
            eval_on_train_input_fn,
            predict_input_fn,
            train_steps,
            eval_on_train_data=False)

        # Currently only a single Eval Spec is allowed.
        tf.logging.info('begin to train and evaluate')
        tf.estimator.train_and_evaluate(estimator, train_spec, eval_specs[0])
def main(unused_argv):
    flags.mark_flag_as_required('train_dir')
    flags.mark_flag_as_required('pipeline_config_path')
    #config = tf.estimator.RunConfig(model_dir=FLAGS.model_dir)
    config = tf.estimator.RunConfig(
        'path where the training output files will be generated')

    train_and_eval_dict = model_lib.create_estimator_and_inputs(
        run_config=config,
        hparams=model_hparams.create_hparams(FLAGS.hparams_overrides),
        #pipeline_config_path=FLAGS.pipeline_config_path,
        pipeline_config_path='path of the model config file',
        train_steps=FLAGS.num_train_steps,
        sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples,
        sample_1_of_n_eval_on_train_examples=(
            FLAGS.sample_1_of_n_eval_on_train_examples))
    estimator = train_and_eval_dict['estimator']
    train_input_fn = train_and_eval_dict['train_input_fn']
    eval_input_fns = train_and_eval_dict['eval_input_fns']
    eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
    predict_input_fn = train_and_eval_dict['predict_input_fn']
    train_steps = train_and_eval_dict['train_steps']

    if FLAGS.checkpoint_dir:
        #if ('C:/Users/zansh/Anaconda3/Lib/site-packages/object_detection1/training/'):
        if FLAGS.eval_training_data:
            name = 'training_data'
            input_fn = eval_on_train_input_fn
        else:
            name = 'validation_data'
            # The first eval input will be evaluated.
            input_fn = eval_input_fns[0]
        if FLAGS.run_once:
            estimator.evaluate(input_fn,
                               num_eval_steps=None,
                               checkpoint_path=tf.train.latest_checkpoint(
                                   FLAGS.checkpoint_dir))

        else:
            model_lib.continuous_eval(estimator, FLAGS.checkpoint_dir,
                                      input_fn, train_steps, name)
    else:
        train_spec, eval_specs = model_lib.create_train_and_eval_specs(
            train_input_fn,
            eval_input_fns,
            eval_on_train_input_fn,
            predict_input_fn,
            train_steps,
            eval_on_train_data=False)

        # Currently only a single Eval Spec is allowed.
        tf.estimator.train_and_evaluate(estimator, train_spec, eval_specs[0])
Beispiel #35
0
 def test_experiment(self):
     """Tests that the `Experiment` object is constructed correctly."""
     run_config = tf.estimator.RunConfig()
     hparams = model_hparams.create_hparams(
         hparams_overrides='load_pretrained=false')
     pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST)
     experiment = model_lib.populate_experiment(run_config,
                                                hparams,
                                                pipeline_config_path,
                                                train_steps=10,
                                                eval_steps=20)
     self.assertEqual(10, experiment.train_steps)
     self.assertEqual(20, experiment.eval_steps)
Beispiel #36
0
def main(unused_argv):
    #flags.mark_flag_as_required('model_dir')
    #flags.mark_flag_as_required('pipeline_config_path')
    if not FLAGS.model_dir:
        raise ValueError('You must supply the mode_dir')
    if not FLAGS.pipeline_config_path:
        raise ValueError('You must supply the pipeline_config_path')

    config = tf.estimator.RunConfig(model_dir=FLAGS.model_dir)

    train_and_eval_dict = model_lib.create_estimator_and_inputs(
        run_config=config,
        hparams=model_hparams.create_hparams(FLAGS.hparams_overrides),
        pipeline_config_path=FLAGS.pipeline_config_path,
        train_steps=FLAGS.num_train_steps,
        eval_steps=FLAGS.num_eval_steps)
    estimator = train_and_eval_dict['estimator']
    train_input_fn = train_and_eval_dict['train_input_fn']
    eval_input_fn = train_and_eval_dict['eval_input_fn']
    eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
    predict_input_fn = train_and_eval_dict['predict_input_fn']
    train_steps = train_and_eval_dict['train_steps']
    eval_steps = train_and_eval_dict['eval_steps']

    if FLAGS.checkpoint_dir:
        if FLAGS.eval_training_data:
            name = 'training_data'
            input_fn = eval_on_train_input_fn
        else:
            name = 'validation_data'
            input_fn = eval_input_fn
        if FLAGS.run_once:
            estimator.evaluate(input_fn,
                               eval_steps,
                               checkpoint_path=tf.train.latest_checkpoint(
                                   FLAGS.checkpoint_dir))
        else:
            model_lib.continuous_eval(estimator, FLAGS.model_dir, input_fn,
                                      eval_steps, train_steps, name)
    else:
        train_spec, eval_specs = model_lib.create_train_and_eval_specs(
            train_input_fn,
            eval_input_fn,
            eval_on_train_input_fn,
            predict_input_fn,
            train_steps,
            eval_steps,
            eval_on_train_data=False)

        # Currently only a single Eval Spec is allowed.
        tf.estimator.train_and_evaluate(estimator, train_spec, eval_specs[0])
Beispiel #37
0
 def test_experiment(self):
   """Tests that the `Experiment` object is constructed correctly."""
   run_config = tf.estimator.RunConfig()
   hparams = model_hparams.create_hparams(
       hparams_overrides='load_pretrained=false')
   pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST)
   experiment = model_lib.populate_experiment(
       run_config,
       hparams,
       pipeline_config_path,
       train_steps=10,
       eval_steps=20)
   self.assertEqual(10, experiment.train_steps)
   self.assertEqual(20, experiment.eval_steps)
Beispiel #38
0
  def test_create_estimator_with_default_train_eval_steps(self):
    """Tests that number of train/eval defaults to config values."""
    run_config = tf.estimator.RunConfig()
    hparams = model_hparams.create_hparams(
        hparams_overrides='load_pretrained=false')
    pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST)
    configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
    config_train_steps = configs['train_config'].num_steps
    train_and_eval_dict = model_lib.create_estimator_and_inputs(
        run_config, hparams, pipeline_config_path)
    estimator = train_and_eval_dict['estimator']
    train_steps = train_and_eval_dict['train_steps']

    self.assertIsInstance(estimator, tf.estimator.Estimator)
    self.assertEqual(config_train_steps, train_steps)
Beispiel #39
0
def main(unused_argv):
  flags.mark_flag_as_required('model_dir')
  flags.mark_flag_as_required('pipeline_config_path')
  config = tf.estimator.RunConfig(model_dir=FLAGS.model_dir)

  train_and_eval_dict = model_lib.create_estimator_and_inputs(
      run_config=config,
      hparams=model_hparams.create_hparams(FLAGS.hparams_overrides),
      pipeline_config_path=FLAGS.pipeline_config_path,
      train_steps=FLAGS.num_train_steps,
      sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples,
      sample_1_of_n_eval_on_train_examples=(
          FLAGS.sample_1_of_n_eval_on_train_examples))
  estimator = train_and_eval_dict['estimator']
  train_input_fn = train_and_eval_dict['train_input_fn']
  eval_input_fns = train_and_eval_dict['eval_input_fns']
  eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
  predict_input_fn = train_and_eval_dict['predict_input_fn']
  train_steps = train_and_eval_dict['train_steps']

  if FLAGS.checkpoint_dir:
    if FLAGS.eval_training_data:
      name = 'training_data'
      input_fn = eval_on_train_input_fn
    else:
      name = 'validation_data'
      # The first eval input will be evaluated.
      input_fn = eval_input_fns[0]
    if FLAGS.run_once:
      estimator.evaluate(input_fn,
                         steps=None,
                         checkpoint_path=tf.train.latest_checkpoint(
                             FLAGS.checkpoint_dir))
    else:
      model_lib.continuous_eval(estimator, FLAGS.checkpoint_dir, input_fn,
                                train_steps, name)
  else:
    train_spec, eval_specs = model_lib.create_train_and_eval_specs(
        train_input_fn,
        eval_input_fns,
        eval_on_train_input_fn,
        predict_input_fn,
        train_steps,
        eval_on_train_data=False)

    # Currently only a single Eval Spec is allowed.
    tf.estimator.train_and_evaluate(estimator, train_spec, eval_specs[0])
Beispiel #40
0
  def _assert_outputs_for_train_eval(self, configs, mode, class_agnostic=False):
    model_config = configs['model']
    train_config = configs['train_config']
    with tf.Graph().as_default():
      if mode == tf.estimator.ModeKeys.TRAIN:
        features, labels = inputs.create_train_input_fn(
            configs['train_config'],
            configs['train_input_config'],
            configs['model'])()
        batch_size = train_config.batch_size
      else:
        features, labels = inputs.create_eval_input_fn(
            configs['eval_config'],
            configs['eval_input_config'],
            configs['model'])()
        batch_size = 1

      detection_model_fn = functools.partial(
          model_builder.build, model_config=model_config, is_training=True)

      hparams = model_hparams.create_hparams(
          hparams_overrides='load_pretrained=false')

      model_fn = model.create_model_fn(detection_model_fn, configs, hparams)
      estimator_spec = model_fn(features, labels, mode)

      self.assertIsNotNone(estimator_spec.loss)
      self.assertIsNotNone(estimator_spec.predictions)
      if class_agnostic:
        self.assertNotIn('detection_classes', estimator_spec.predictions)
      else:
        detection_classes = estimator_spec.predictions['detection_classes']
        self.assertEqual(batch_size, detection_classes.shape.as_list()[0])
        self.assertEqual(tf.float32, detection_classes.dtype)
      detection_boxes = estimator_spec.predictions['detection_boxes']
      detection_scores = estimator_spec.predictions['detection_scores']
      num_detections = estimator_spec.predictions['num_detections']
      self.assertEqual(batch_size, detection_boxes.shape.as_list()[0])
      self.assertEqual(tf.float32, detection_boxes.dtype)
      self.assertEqual(batch_size, detection_scores.shape.as_list()[0])
      self.assertEqual(tf.float32, detection_scores.dtype)
      self.assertEqual(tf.float32, num_detections.dtype)
      if mode == tf.estimator.ModeKeys.TRAIN:
        self.assertIsNotNone(estimator_spec.train_op)
      return estimator_spec
Beispiel #41
0
  def test_create_tpu_estimator_and_inputs(self):
    """Tests that number of train/eval defaults to config values."""

    run_config = tpu_config.RunConfig()
    hparams = model_hparams.create_hparams(
        hparams_overrides='load_pretrained=false')
    pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST)
    train_steps = 20
    train_and_eval_dict = model_lib.create_estimator_and_inputs(
        run_config,
        hparams,
        pipeline_config_path,
        train_steps=train_steps,
        use_tpu_estimator=True)
    estimator = train_and_eval_dict['estimator']
    train_steps = train_and_eval_dict['train_steps']

    self.assertIsInstance(estimator, tpu_estimator.TPUEstimator)
    self.assertEqual(20, train_steps)
Beispiel #42
0
 def test_create_estimator_and_inputs(self):
   """Tests that Estimator and input function are constructed correctly."""
   run_config = tf.estimator.RunConfig()
   hparams = model_hparams.create_hparams(
       hparams_overrides='load_pretrained=false')
   pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST)
   train_steps = 20
   train_and_eval_dict = model_lib.create_estimator_and_inputs(
       run_config,
       hparams,
       pipeline_config_path,
       train_steps=train_steps)
   estimator = train_and_eval_dict['estimator']
   train_steps = train_and_eval_dict['train_steps']
   self.assertIsInstance(estimator, tf.estimator.Estimator)
   self.assertEqual(20, train_steps)
   self.assertIn('train_input_fn', train_and_eval_dict)
   self.assertIn('eval_input_fns', train_and_eval_dict)
   self.assertIn('eval_on_train_input_fn', train_and_eval_dict)
Beispiel #43
0
  def _assert_model_fn_for_predict(self, configs):
    model_config = configs['model']

    with tf.Graph().as_default():
      features, _ = _make_initializable_iterator(
          inputs.create_eval_input_fn(configs['eval_config'],
                                      configs['eval_input_config'],
                                      configs['model'])()).get_next()
      detection_model_fn = functools.partial(
          model_builder.build, model_config=model_config, is_training=False)

      hparams = model_hparams.create_hparams(
          hparams_overrides='load_pretrained=false')

      model_fn = model_lib.create_model_fn(detection_model_fn, configs, hparams)
      estimator_spec = model_fn(features, None, tf.estimator.ModeKeys.PREDICT)

      self.assertIsNone(estimator_spec.loss)
      self.assertIsNone(estimator_spec.train_op)
      self.assertIsNotNone(estimator_spec.predictions)
      self.assertIsNotNone(estimator_spec.export_outputs)
      self.assertIn(tf.saved_model.signature_constants.PREDICT_METHOD_NAME,
                    estimator_spec.export_outputs)
Beispiel #44
0
def main(unused_argv):
  tf.flags.mark_flag_as_required('model_dir')
  tf.flags.mark_flag_as_required('pipeline_config_path')

  if FLAGS.master is None and FLAGS.tpu_name is None:
    raise RuntimeError('You must specify either --master or --tpu_name.')

  if FLAGS.master is not None:
    if FLAGS.tpu_name is not None:
      tf.logging.warn('Both --master and --tpu_name are set. Ignoring '
                      '--tpu_name and using --master.')
    tpu_grpc_url = FLAGS.master
  else:
    tpu_cluster_resolver = (
        tf.contrib.cluster_resolver.python.training.TPUClusterResolver(
            tpu_names=[FLAGS.tpu_name],
            zone=FLAGS.tpu_zone,
            project=FLAGS.gcp_project))
    tpu_grpc_url = tpu_cluster_resolver.get_master()

  config = tpu_config.RunConfig(
      master=tpu_grpc_url,
      evaluation_master=tpu_grpc_url,
      model_dir=FLAGS.model_dir,
      tpu_config=tpu_config.TPUConfig(
          iterations_per_loop=FLAGS.iterations_per_loop,
          num_shards=FLAGS.num_shards))
  params = {}
  (estimator, train_input_fn, eval_validation_input_fn, eval_training_input_fn,
   train_steps, eval_steps) = (
       create_estimator(
           config,
           model_hparams.create_hparams(
               hparams_overrides=FLAGS.hparams_overrides),
           FLAGS.pipeline_config_path,
           train_steps=FLAGS.num_train_steps,
           eval_steps=FLAGS.num_eval_steps,
           train_batch_size=FLAGS.train_batch_size,
           use_tpu=FLAGS.use_tpu,
           num_shards=FLAGS.num_shards,
           params=params))

  if FLAGS.mode in ['train', 'train_and_eval']:
    estimator.train(input_fn=train_input_fn, max_steps=train_steps)

  if FLAGS.mode == 'train_and_eval':
    # Eval one time.
    eval_results = estimator.evaluate(
        input_fn=eval_validation_input_fn, steps=eval_steps)
    tf.logging.info('Eval results: %s' % eval_results)

  # Continuously evaluating.
  if FLAGS.mode == 'eval':
    def terminate_eval():
      tf.logging.info('Terminating eval after %d seconds of no checkpoints' %
                      FLAGS.eval_timeout_secs)
      return True

    # Run evaluation when there's a new checkpoint.
    for ckpt in evaluation.checkpoints_iterator(
        FLAGS.model_dir,
        min_interval_secs=FLAGS.min_eval_interval_secs,
        timeout=FLAGS.eval_timeout_secs,
        timeout_fn=terminate_eval):

      tf.logging.info('Starting to evaluate.')
      if FLAGS.eval_training_data:
        name = 'training_data'
        input_fn = eval_training_input_fn
      else:
        name = 'validation_data'
        input_fn = eval_validation_input_fn
      try:
        eval_results = estimator.evaluate(
            input_fn=input_fn,
            steps=eval_steps,
            checkpoint_path=ckpt,
            name=name)
        tf.logging.info('Eval results: %s' % eval_results)

        # Terminate eval job when final checkpoint is reached
        current_step = int(os.path.basename(ckpt).split('-')[1])
        if current_step >= train_steps:
          tf.logging.info(
              'Evaluation finished after training step %d' % current_step)
          break

      except tf.errors.NotFoundError:
        tf.logging.info(
            'Checkpoint %s no longer exists, skipping checkpoint' % ckpt)