def test_error_with_bad_eval_model_config(self): """Tests that a TypeError is raised with improper eval model config.""" configs = _get_configs_for_model('ssd_inception_v2_pets') configs['model'].ssd.num_classes = 37 eval_input_fn = inputs.create_eval_input_fn( eval_config=configs['eval_config'], eval_input_config=configs['eval_input_config'], model_config=configs['eval_config']) # Expecting `DetectionModel`. with self.assertRaises(TypeError): eval_input_fn()
def _assert_outputs_for_train_eval(self, configs, mode, class_agnostic=False): model_config = configs['model'] train_config = configs['train_config'] with tf.Graph().as_default(): if mode == tf.estimator.ModeKeys.TRAIN: features, labels = inputs.create_train_input_fn( configs['train_config'], configs['train_input_config'], configs['model'])() batch_size = train_config.batch_size else: features, labels = inputs.create_eval_input_fn( configs['eval_config'], configs['eval_input_config'], configs['model'])() batch_size = 1 detection_model_fn = functools.partial( model_builder.build, model_config=model_config, is_training=True) hparams = model_hparams.create_hparams( hparams_overrides='load_pretrained=false') model_fn = model.create_model_fn(detection_model_fn, configs, hparams) estimator_spec = model_fn(features, labels, mode) self.assertIsNotNone(estimator_spec.loss) self.assertIsNotNone(estimator_spec.predictions) if class_agnostic: self.assertNotIn('detection_classes', estimator_spec.predictions) else: detection_classes = estimator_spec.predictions['detection_classes'] self.assertEqual(batch_size, detection_classes.shape.as_list()[0]) self.assertEqual(tf.float32, detection_classes.dtype) detection_boxes = estimator_spec.predictions['detection_boxes'] detection_scores = estimator_spec.predictions['detection_scores'] num_detections = estimator_spec.predictions['num_detections'] self.assertEqual(batch_size, detection_boxes.shape.as_list()[0]) self.assertEqual(tf.float32, detection_boxes.dtype) self.assertEqual(batch_size, detection_scores.shape.as_list()[0]) self.assertEqual(tf.float32, detection_scores.dtype) self.assertEqual(tf.float32, num_detections.dtype) if mode == tf.estimator.ModeKeys.TRAIN: self.assertIsNotNone(estimator_spec.train_op) return estimator_spec
def test_faster_rcnn_resnet50_eval_input(self): """Tests the eval input function for FasterRcnnResnet50.""" configs = _get_configs_for_model('faster_rcnn_resnet50_pets') model_config = configs['model'] model_config.faster_rcnn.num_classes = 37 eval_input_fn = inputs.create_eval_input_fn( configs['eval_config'], configs['eval_input_config'], model_config) features, labels = eval_input_fn() self.assertAllEqual([1, None, None, 3], features[fields.InputDataFields.image].shape.as_list()) self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype) self.assertAllEqual( [1, None, None, 3], features[fields.InputDataFields.original_image].shape.as_list()) self.assertEqual(tf.uint8, features[fields.InputDataFields.original_image].dtype) self.assertAllEqual([1], features[inputs.HASH_KEY].shape.as_list()) self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype) self.assertAllEqual( [1, None, 4], labels[fields.InputDataFields.groundtruth_boxes].shape.as_list()) self.assertEqual(tf.float32, labels[fields.InputDataFields.groundtruth_boxes].dtype) self.assertAllEqual( [1, None, model_config.faster_rcnn.num_classes], labels[fields.InputDataFields.groundtruth_classes].shape.as_list()) self.assertEqual(tf.float32, labels[fields.InputDataFields.groundtruth_classes].dtype) self.assertAllEqual( [1, None], labels[fields.InputDataFields.groundtruth_area].shape.as_list()) self.assertEqual(tf.float32, labels[fields.InputDataFields.groundtruth_area].dtype) self.assertAllEqual( [1, None], labels[fields.InputDataFields.groundtruth_is_crowd].shape.as_list()) self.assertEqual( tf.bool, labels[fields.InputDataFields.groundtruth_is_crowd].dtype) self.assertAllEqual( [1, None], labels[fields.InputDataFields.groundtruth_difficult].shape.as_list()) self.assertEqual( tf.int32, labels[fields.InputDataFields.groundtruth_difficult].dtype)
def _assert_outputs_for_predict(self, configs): model_config = configs['model'] with tf.Graph().as_default(): features, _ = inputs.create_eval_input_fn( configs['eval_config'], configs['eval_input_config'], configs['model'])() detection_model_fn = functools.partial( model_builder.build, model_config=model_config, is_training=False) hparams = model_hparams.create_hparams( hparams_overrides='load_pretrained=false') model_fn = model.create_model_fn(detection_model_fn, configs, hparams) estimator_spec = model_fn(features, None, tf.estimator.ModeKeys.PREDICT) self.assertIsNone(estimator_spec.loss) self.assertIsNone(estimator_spec.train_op) self.assertIsNotNone(estimator_spec.predictions) self.assertIsNotNone(estimator_spec.export_outputs) self.assertIn(tf.saved_model.signature_constants.PREDICT_METHOD_NAME, estimator_spec.export_outputs)
def create_estimator(run_config, hparams, pipeline_config_path, train_steps=None, eval_steps=None, train_batch_size=None, model_fn_creator=model.create_model_fn, use_tpu=False, num_shards=1, params=None, **kwargs): """Creates an `Estimator` object. Args: run_config: A `RunConfig`. hparams: A `HParams`. pipeline_config_path: A path to a pipeline config file. train_steps: Number of training steps. If None, the number of training steps is set from the `TrainConfig` proto. eval_steps: Number of evaluation steps per evaluation cycle. If None, the number of evaluation steps is set from the `EvalConfig` proto. train_batch_size: Training batch size. If none, use batch size from `TrainConfig` proto. model_fn_creator: A function that creates a `model_fn` for `Estimator`. Follows the signature: * Args: * `detection_model_fn`: Function that returns `DetectionModel` instance. * `configs`: Dictionary of pipeline config objects. * `hparams`: `HParams` object. * Returns: `model_fn` for `Estimator`. use_tpu: Boolean, whether training and evaluation should run on TPU. num_shards: Number of shards (TPU cores). params: Parameter dictionary passed from the estimator. **kwargs: Additional keyword arguments for configuration override. Returns: Estimator: A estimator object used for training and evaluation train_input_fn: Input function for the training loop eval_input_fn: Input function for the evaluation run train_steps: Number of training steps either from arg `train_steps` or `TrainConfig` proto eval_steps: Number of evaluation steps either from arg `eval_steps` or `EvalConfig` proto """ configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) configs = config_util.merge_external_params_with_configs( configs, hparams, train_steps=train_steps, eval_steps=eval_steps, batch_size=train_batch_size, **kwargs) model_config = configs['model'] train_config = configs['train_config'] train_input_config = configs['train_input_config'] eval_config = configs['eval_config'] eval_input_config = configs['eval_input_config'] if params is None: params = {} if train_steps is None: train_steps = train_config.num_steps if train_config.num_steps else None if eval_steps is None: eval_steps = eval_config.num_examples if eval_config.num_examples else None detection_model_fn = functools.partial( model_builder.build, model_config=model_config) # Create the input functions for TRAIN/EVAL. train_input_fn = inputs.create_train_input_fn( train_config=train_config, train_input_config=train_input_config, model_config=model_config) eval_input_fn = inputs.create_eval_input_fn( eval_config=eval_config, eval_input_config=eval_input_config, model_config=model_config) estimator = tpu_estimator.TPUEstimator( model_fn=model_fn_creator(detection_model_fn, configs, hparams, use_tpu), train_batch_size=train_config.batch_size, # For each core, only batch size 1 is supported for eval. eval_batch_size=num_shards * 1 if use_tpu else 1, use_tpu=use_tpu, config=run_config, params=params) return estimator, train_input_fn, eval_input_fn, train_steps, eval_steps
def populate_experiment(run_config, hparams, pipeline_config_path, train_steps=None, eval_steps=None, model_fn_creator=create_model_fn, **kwargs): """Populates an `Experiment` object. Args: run_config: A `RunConfig`. hparams: A `HParams`. pipeline_config_path: A path to a pipeline config file. train_steps: Number of training steps. If None, the number of training steps is set from the `TrainConfig` proto. eval_steps: Number of evaluation steps per evaluation cycle. If None, the number of evaluation steps is set from the `EvalConfig` proto. model_fn_creator: A function that creates a `model_fn` for `Estimator`. Follows the signature: * Args: * `detection_model_fn`: Function that returns `DetectionModel` instance. * `configs`: Dictionary of pipeline config objects. * `hparams`: `HParams` object. * Returns: `model_fn` for `Estimator`. **kwargs: Additional keyword arguments for configuration override. Returns: An `Experiment` that defines all aspects of training, evaluation, and export. """ configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) configs = config_util.merge_external_params_with_configs( configs, hparams, train_steps=train_steps, eval_steps=eval_steps, **kwargs) model_config = configs['model'] train_config = configs['train_config'] train_input_config = configs['train_input_config'] eval_config = configs['eval_config'] eval_input_config = configs['eval_input_config'] if train_steps is None: train_steps = train_config.num_steps if train_config.num_steps else None if eval_steps is None: eval_steps = eval_config.num_examples if eval_config.num_examples else None detection_model_fn = functools.partial( model_builder.build, model_config=model_config) # Create the input functions for TRAIN/EVAL. train_input_fn = inputs.create_train_input_fn( train_config=train_config, train_input_config=train_input_config, model_config=model_config) eval_input_fn = inputs.create_eval_input_fn( eval_config=eval_config, eval_input_config=eval_input_config, model_config=model_config) export_strategies = [ tf.contrib.learn.utils.saved_model_export_utils.make_export_strategy( serving_input_fn=inputs.create_predict_input_fn( model_config=model_config)) ] estimator = tf.estimator.Estimator( model_fn=model_fn_creator(detection_model_fn, configs, hparams), config=run_config) if run_config.is_chief: # Store the final pipeline config for traceability. pipeline_config_final = config_util.create_pipeline_proto_from_configs( configs) pipeline_config_final_path = os.path.join(estimator.model_dir, 'pipeline.config') config_text = text_format.MessageToString(pipeline_config_final) with tf.gfile.Open(pipeline_config_final_path, 'wb') as f: tf.logging.info('Writing as-run pipeline config file to %s', pipeline_config_final_path) f.write(config_text) return tf.contrib.learn.Experiment( estimator=estimator, train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, train_steps=train_steps, eval_steps=eval_steps, export_strategies=export_strategies, eval_delay_secs=120,)