示例#1
0
    def _assert_model_fn_for_predict(self, configs):
        model_config = configs['model']

        with tf.Graph().as_default():
            features, _ = _make_initializable_iterator(
                inputs.create_eval_input_fn(configs['eval_config'],
                                            configs['eval_input_config'],
                                            configs['model'])()).get_next()
            detection_model_fn = functools.partial(model_builder.build,
                                                   model_config=model_config,
                                                   is_training=False)

            hparams = model_hparams.create_hparams(
                hparams_overrides='load_pretrained=false')

            model_fn = model_lib.create_model_fn(detection_model_fn, configs,
                                                 hparams)
            estimator_spec = model_fn(features, None,
                                      tf.estimator.ModeKeys.PREDICT)

            self.assertIsNone(estimator_spec.loss)
            self.assertIsNone(estimator_spec.train_op)
            self.assertIsNotNone(estimator_spec.predictions)
            self.assertIsNotNone(estimator_spec.export_outputs)
            self.assertIn(
                tf.saved_model.signature_constants.PREDICT_METHOD_NAME,
                estimator_spec.export_outputs)
示例#2
0
    def test_create_train_and_eval_specs(self):
        """Tests that `TrainSpec` and `EvalSpec` is created correctly."""
        run_config = tf.estimator.RunConfig()
        hparams = model_hparams.create_hparams(
            hparams_overrides='load_pretrained=false')
        pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST)
        train_steps = 20
        train_and_eval_dict = model_lib.create_estimator_and_inputs(
            run_config, hparams, pipeline_config_path, train_steps=train_steps)
        train_input_fn = train_and_eval_dict['train_input_fn']
        eval_input_fns = train_and_eval_dict['eval_input_fns']
        eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
        predict_input_fn = train_and_eval_dict['predict_input_fn']
        train_steps = train_and_eval_dict['train_steps']

        train_spec, eval_specs = model_lib.create_train_and_eval_specs(
            train_input_fn,
            eval_input_fns,
            eval_on_train_input_fn,
            predict_input_fn,
            train_steps,
            eval_on_train_data=True,
            final_exporter_name='exporter',
            eval_spec_names=['holdout'])
        self.assertEqual(train_steps, train_spec.max_steps)
        self.assertEqual(2, len(eval_specs))
        self.assertEqual(None, eval_specs[0].steps)
        self.assertEqual('holdout', eval_specs[0].name)
        self.assertEqual('exporter_holdout', eval_specs[0].exporters[0].name)
        self.assertEqual(None, eval_specs[1].steps)
        self.assertEqual('eval_on_train', eval_specs[1].name)
示例#3
0
def main(unused_argv):
    flags.mark_flag_as_required('model_dir')
    flags.mark_flag_as_required('pipeline_config_path')

    tpu_cluster_resolver = (tf.contrib.cluster_resolver.TPUClusterResolver(
        tpu=[FLAGS.tpu_name], zone=FLAGS.tpu_zone, project=FLAGS.gcp_project))
    tpu_grpc_url = tpu_cluster_resolver.get_master()

    config = tf.contrib.tpu.RunConfig(
        master=tpu_grpc_url,
        evaluation_master=tpu_grpc_url,
        model_dir=FLAGS.model_dir,
        tpu_config=tf.contrib.tpu.TPUConfig(
            iterations_per_loop=FLAGS.iterations_per_loop,
            num_shards=FLAGS.num_shards))

    kwargs = {}
    if FLAGS.train_batch_size:
        kwargs['batch_size'] = FLAGS.train_batch_size

    train_and_eval_dict = model_lib.create_estimator_and_inputs(
        run_config=config,
        hparams=model_hparams.create_hparams(FLAGS.hparams_overrides),
        pipeline_config_path=FLAGS.pipeline_config_path,
        train_steps=FLAGS.num_train_steps,
        sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples,
        sample_1_of_n_eval_on_train_examples=(
            FLAGS.sample_1_of_n_eval_on_train_examples),
        use_tpu_estimator=True,
        use_tpu=FLAGS.use_tpu,
        num_shards=FLAGS.num_shards,
        **kwargs)
    estimator = train_and_eval_dict['estimator']
    train_input_fn = train_and_eval_dict['train_input_fn']
    eval_input_fns = train_and_eval_dict['eval_input_fns']
    eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
    train_steps = train_and_eval_dict['train_steps']

    if FLAGS.mode == 'train':
        estimator.train(input_fn=train_input_fn, max_steps=train_steps)

    # Continuously evaluating.
    if FLAGS.mode == 'eval':
        if FLAGS.eval_training_data:
            name = 'training_data'
            input_fn = eval_on_train_input_fn
        else:
            name = 'validation_data'
            # Currently only a single eval input is allowed.
            input_fn = eval_input_fns[0]
        model_lib.continuous_eval(estimator, FLAGS.model_dir, input_fn,
                                  train_steps, name)
示例#4
0
 def test_experiment(self):
     """Tests that the `Experiment` object is constructed correctly."""
     run_config = tf.estimator.RunConfig()
     hparams = model_hparams.create_hparams(
         hparams_overrides='load_pretrained=false')
     pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST)
     experiment = model_lib.populate_experiment(run_config,
                                                hparams,
                                                pipeline_config_path,
                                                train_steps=10,
                                                eval_steps=20)
     self.assertEqual(10, experiment.train_steps)
     self.assertEqual(None, experiment.eval_steps)
示例#5
0
    def test_create_estimator_with_default_train_eval_steps(self):
        """Tests that number of train/eval defaults to config values."""
        run_config = tf.estimator.RunConfig()
        hparams = model_hparams.create_hparams(
            hparams_overrides='load_pretrained=false')
        pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST)
        configs = config_util.get_configs_from_pipeline_file(
            pipeline_config_path)
        config_train_steps = configs['train_config'].num_steps
        train_and_eval_dict = model_lib.create_estimator_and_inputs(
            run_config, hparams, pipeline_config_path)
        estimator = train_and_eval_dict['estimator']
        train_steps = train_and_eval_dict['train_steps']

        self.assertIsInstance(estimator, tf.estimator.Estimator)
        self.assertEqual(config_train_steps, train_steps)
示例#6
0
 def test_create_estimator_and_inputs(self):
     """Tests that Estimator and input function are constructed correctly."""
     run_config = tf.estimator.RunConfig()
     hparams = model_hparams.create_hparams(
         hparams_overrides='load_pretrained=false')
     pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST)
     train_steps = 20
     train_and_eval_dict = model_lib.create_estimator_and_inputs(
         run_config, hparams, pipeline_config_path, train_steps=train_steps)
     estimator = train_and_eval_dict['estimator']
     train_steps = train_and_eval_dict['train_steps']
     self.assertIsInstance(estimator, tf.estimator.Estimator)
     self.assertEqual(20, train_steps)
     self.assertIn('train_input_fn', train_and_eval_dict)
     self.assertIn('eval_input_fns', train_and_eval_dict)
     self.assertIn('eval_on_train_input_fn', train_and_eval_dict)
示例#7
0
def main(unused_argv):
    flags.mark_flag_as_required('model_dir')
    flags.mark_flag_as_required('pipeline_config_path')
    config = tf.estimator.RunConfig(model_dir=FLAGS.model_dir)

    train_and_eval_dict = model_lib.create_estimator_and_inputs(
        run_config=config,
        hparams=model_hparams.create_hparams(FLAGS.hparams_overrides),
        pipeline_config_path=FLAGS.pipeline_config_path,
        train_steps=FLAGS.num_train_steps,
        sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples,
        sample_1_of_n_eval_on_train_examples=(
            FLAGS.sample_1_of_n_eval_on_train_examples))
    estimator = train_and_eval_dict['estimator']
    train_input_fn = train_and_eval_dict['train_input_fn']
    eval_input_fns = train_and_eval_dict['eval_input_fns']
    eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
    predict_input_fn = train_and_eval_dict['predict_input_fn']
    train_steps = train_and_eval_dict['train_steps']

    if FLAGS.checkpoint_dir:
        if FLAGS.eval_training_data:
            name = 'training_data'
            input_fn = eval_on_train_input_fn
        else:
            name = 'validation_data'
            # The first eval input will be evaluated.
            input_fn = eval_input_fns[0]
        if FLAGS.run_once:
            estimator.evaluate(input_fn,
                               num_eval_steps=None,
                               checkpoint_path=tf.train.latest_checkpoint(
                                   FLAGS.checkpoint_dir))
        else:
            model_lib.continuous_eval(estimator, FLAGS.checkpoint_dir,
                                      input_fn, train_steps, name)
    else:
        train_spec, eval_specs = model_lib.create_train_and_eval_specs(
            train_input_fn,
            eval_input_fns,
            eval_on_train_input_fn,
            predict_input_fn,
            train_steps,
            eval_on_train_data=False)

        # Currently only a single Eval Spec is allowed.
        tf.estimator.train_and_evaluate(estimator, train_spec, eval_specs[0])
示例#8
0
    def test_create_tpu_estimator_and_inputs(self):
        """Tests that number of train/eval defaults to config values."""

        run_config = tpu_config.RunConfig()
        hparams = model_hparams.create_hparams(
            hparams_overrides='load_pretrained=false')
        pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST)
        train_steps = 20
        train_and_eval_dict = model_lib.create_estimator_and_inputs(
            run_config,
            hparams,
            pipeline_config_path,
            train_steps=train_steps,
            use_tpu_estimator=True)
        estimator = train_and_eval_dict['estimator']
        train_steps = train_and_eval_dict['train_steps']

        self.assertIsInstance(estimator, tpu_estimator.TPUEstimator)
        self.assertEqual(20, train_steps)
示例#9
0
def main(unused_argv):
    flags.mark_flag_as_required('model_dir')
    flags.mark_flag_as_required('pipeline_config_path')
    config = tf.estimator.RunConfig(model_dir=FLAGS.model_dir)

    train_and_eval_dict = model_lib.create_estimator_and_inputs(
        run_config=config,
        hparams=model_hparams.create_hparams(FLAGS.hparams_overrides),
        pipeline_config_path=FLAGS.pipeline_config_path,
        train_steps=FLAGS.num_train_steps,
        eval_steps=FLAGS.num_eval_steps)
    estimator = train_and_eval_dict['estimator']
    train_input_fn = train_and_eval_dict['train_input_fn']
    eval_input_fn = train_and_eval_dict['eval_input_fn']
    eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
    predict_input_fn = train_and_eval_dict['predict_input_fn']
    train_steps = train_and_eval_dict['train_steps']
    eval_steps = train_and_eval_dict['eval_steps']

    if FLAGS.checkpoint_dir:
        estimator.evaluate(eval_input_fn,
                           eval_steps,
                           checkpoint_path=tf.train.latest_checkpoint(
                               FLAGS.checkpoint_dir))
    else:
        train_spec, eval_specs = model_lib.create_train_and_eval_specs(
            train_input_fn,
            eval_input_fn,
            eval_on_train_input_fn,
            predict_input_fn,
            train_steps,
            eval_steps,
            eval_on_train_data=False)

        # Currently only a single Eval Spec is allowed.
        tf.estimator.train_and_evaluate(estimator, train_spec, eval_specs[0])
示例#10
0
    def _assert_model_fn_for_train_eval(self,
                                        configs,
                                        mode,
                                        class_agnostic=False):
        features, labels, model_mode, batch_size = None, None, None, None
        model_config = configs['model']
        train_config = configs['train_config']
        with tf.Graph().as_default():
            if mode == 'train':
                features, labels = _make_initializable_iterator(
                    inputs.create_train_input_fn(
                        configs['train_config'], configs['train_input_config'],
                        configs['model'])()).get_next()
                model_mode = tf.estimator.ModeKeys.TRAIN
                batch_size = train_config.batch_size
            elif mode == 'eval':
                features, labels = _make_initializable_iterator(
                    inputs.create_eval_input_fn(
                        configs['eval_config'], configs['eval_input_config'],
                        configs['model'])()).get_next()
                model_mode = tf.estimator.ModeKeys.EVAL
                batch_size = 1
            elif mode == 'eval_on_train':
                features, labels = _make_initializable_iterator(
                    inputs.create_eval_input_fn(
                        configs['eval_config'], configs['train_input_config'],
                        configs['model'])()).get_next()
                model_mode = tf.estimator.ModeKeys.EVAL
                batch_size = 1

            detection_model_fn = functools.partial(model_builder.build,
                                                   model_config=model_config,
                                                   is_training=True)

            hparams = model_hparams.create_hparams(
                hparams_overrides='load_pretrained=false')

            model_fn = model_lib.create_model_fn(detection_model_fn, configs,
                                                 hparams)
            estimator_spec = model_fn(features, labels, model_mode)

            self.assertIsNotNone(estimator_spec.loss)
            self.assertIsNotNone(estimator_spec.predictions)
            if mode == 'eval' or mode == 'eval_on_train':
                if class_agnostic:
                    self.assertNotIn('detection_classes',
                                     estimator_spec.predictions)
                else:
                    detection_classes = estimator_spec.predictions[
                        'detection_classes']
                    self.assertEqual(batch_size,
                                     detection_classes.shape.as_list()[0])
                    self.assertEqual(tf.float32, detection_classes.dtype)
                detection_boxes = estimator_spec.predictions['detection_boxes']
                detection_scores = estimator_spec.predictions[
                    'detection_scores']
                num_detections = estimator_spec.predictions['num_detections']
                self.assertEqual(batch_size,
                                 detection_boxes.shape.as_list()[0])
                self.assertEqual(tf.float32, detection_boxes.dtype)
                self.assertEqual(batch_size,
                                 detection_scores.shape.as_list()[0])
                self.assertEqual(tf.float32, detection_scores.dtype)
                self.assertEqual(tf.float32, num_detections.dtype)
                if mode == 'eval':
                    self.assertIn('Detections_Left_Groundtruth_Right/0',
                                  estimator_spec.eval_metric_ops)
            if model_mode == tf.estimator.ModeKeys.TRAIN:
                self.assertIsNotNone(estimator_spec.train_op)
            return estimator_spec