def _assert_model_fn_for_predict(self, configs): model_config = configs['model'] with tf.Graph().as_default(): features, _ = _make_initializable_iterator( inputs.create_eval_input_fn(configs['eval_config'], configs['eval_input_config'], configs['model'])()).get_next() detection_model_fn = functools.partial(model_builder.build, model_config=model_config, is_training=False) hparams = model_hparams.create_hparams( hparams_overrides='load_pretrained=false') model_fn = model_lib.create_model_fn(detection_model_fn, configs, hparams) estimator_spec = model_fn(features, None, tf.estimator.ModeKeys.PREDICT) self.assertIsNone(estimator_spec.loss) self.assertIsNone(estimator_spec.train_op) self.assertIsNotNone(estimator_spec.predictions) self.assertIsNotNone(estimator_spec.export_outputs) self.assertIn( tf.saved_model.signature_constants.PREDICT_METHOD_NAME, estimator_spec.export_outputs)
def _assert_model_fn_for_train_eval(self, configs, mode, class_agnostic=False): model_config = configs['model'] train_config = configs['train_config'] with tf.Graph().as_default(): if mode == 'train': features, labels = _make_initializable_iterator( inputs.create_train_input_fn(configs['train_config'], configs['train_input_config'], configs['model'])()).get_next() model_mode = tf.estimator.ModeKeys.TRAIN batch_size = train_config.batch_size elif mode == 'eval': features, labels = _make_initializable_iterator( inputs.create_eval_input_fn(configs['eval_config'], configs['eval_input_config'], configs['model'])()).get_next() model_mode = tf.estimator.ModeKeys.EVAL batch_size = 1 elif mode == 'eval_on_train': features, labels = _make_initializable_iterator( inputs.create_eval_input_fn(configs['eval_config'], configs['train_input_config'], configs['model'])()).get_next() model_mode = tf.estimator.ModeKeys.EVAL batch_size = 1 detection_model_fn = functools.partial( model_builder.build, model_config=model_config, is_training=True) hparams = model_hparams.create_hparams( hparams_overrides='load_pretrained=false') model_fn = model_lib.create_model_fn(detection_model_fn, configs, hparams) estimator_spec = model_fn(features, labels, model_mode) self.assertIsNotNone(estimator_spec.loss) self.assertIsNotNone(estimator_spec.predictions) if mode == 'eval' or mode == 'eval_on_train': if class_agnostic: self.assertNotIn('detection_classes', estimator_spec.predictions) else: detection_classes = estimator_spec.predictions['detection_classes'] self.assertEqual(batch_size, detection_classes.shape.as_list()[0]) self.assertEqual(tf.float32, detection_classes.dtype) detection_boxes = estimator_spec.predictions['detection_boxes'] detection_scores = estimator_spec.predictions['detection_scores'] num_detections = estimator_spec.predictions['num_detections'] self.assertEqual(batch_size, detection_boxes.shape.as_list()[0]) self.assertEqual(tf.float32, detection_boxes.dtype) self.assertEqual(batch_size, detection_scores.shape.as_list()[0]) self.assertEqual(tf.float32, detection_scores.dtype) self.assertEqual(tf.float32, num_detections.dtype) if mode == 'eval': self.assertIn('Detections_Left_Groundtruth_Right/0', estimator_spec.eval_metric_ops) if model_mode == tf.estimator.ModeKeys.TRAIN: self.assertIsNotNone(estimator_spec.train_op) return estimator_spec
def _assert_model_fn_for_train_eval(self, configs, mode, class_agnostic=False): model_config = configs['model'] train_config = configs['train_config'] with tf.Graph().as_default(): if mode == 'train_bkp': features, labels = inputs.create_train_input_fn( configs['train_config'], configs['train_input_config'], configs['model'])() model_mode = tf.estimator.ModeKeys.TRAIN batch_size = train_config.batch_size elif mode == 'eval_bkp': features, labels = inputs.create_eval_input_fn( configs['eval_config'], configs['eval_input_config'], configs['model'])() model_mode = tf.estimator.ModeKeys.EVAL batch_size = 1 elif mode == 'eval_on_train': features, labels = inputs.create_eval_input_fn( configs['eval_config'], configs['train_input_config'], configs['model'])() model_mode = tf.estimator.ModeKeys.EVAL batch_size = 1 detection_model_fn = functools.partial(model_builder.build, model_config=model_config, is_training=True) hparams = model_hparams.create_hparams( hparams_overrides='load_pretrained=false') model_fn = model_lib.create_model_fn(detection_model_fn, configs, hparams) estimator_spec = model_fn(features, labels, model_mode) self.assertIsNotNone(estimator_spec.loss) self.assertIsNotNone(estimator_spec.predictions) if class_agnostic: self.assertNotIn('detection_classes', estimator_spec.predictions) else: detection_classes = estimator_spec.predictions[ 'detection_classes'] self.assertEqual(batch_size, detection_classes.shape.as_list()[0]) self.assertEqual(tf.float32, detection_classes.dtype) detection_boxes = estimator_spec.predictions['detection_boxes'] detection_scores = estimator_spec.predictions['detection_scores'] num_detections = estimator_spec.predictions['num_detections'] self.assertEqual(batch_size, detection_boxes.shape.as_list()[0]) self.assertEqual(tf.float32, detection_boxes.dtype) self.assertEqual(batch_size, detection_scores.shape.as_list()[0]) self.assertEqual(tf.float32, detection_scores.dtype) self.assertEqual(tf.float32, num_detections.dtype) if model_mode == tf.estimator.ModeKeys.TRAIN: self.assertIsNotNone(estimator_spec.train_op) return estimator_spec
def _assert_model_fn_for_predict(self, configs): model_config = configs['model'] with tf.Graph().as_default(): features, _ = _make_initializable_iterator( inputs.create_eval_input_fn(configs['eval_config'], configs['eval_input_config'], configs['model'])()).get_next() detection_model_fn = functools.partial( model_builder.build, model_config=model_config, is_training=False) hparams = model_hparams.create_hparams( hparams_overrides='load_pretrained=false') model_fn = model_lib.create_model_fn(detection_model_fn, configs, hparams) estimator_spec = model_fn(features, None, tf.estimator.ModeKeys.PREDICT) self.assertIsNone(estimator_spec.loss) self.assertIsNone(estimator_spec.train_op) self.assertIsNotNone(estimator_spec.predictions) self.assertIsNotNone(estimator_spec.export_outputs) self.assertIn(tf.saved_model.signature_constants.PREDICT_METHOD_NAME, estimator_spec.export_outputs)
def image_trainmodel(dataset, source, config_path, ip, port, model_name, label_map_path=None, label=None, model_dir="model_dir", export_dir="export_dir", data_dir="data_dir", steps_per_epoch=-1, threshold=0.5, temp_files_num=5, max_checkpoints_num=5, run_eval=False, eval_steps=50, use_display_name=False, tf_logging_level=40, api=None, exclude=None): tf.logging.set_verbosity(tf_logging_level) _create_dir(model_dir) _create_dir(export_dir) _create_dir(data_dir) log("Building the Tensorflow Object Detection API model") run_config = tf.estimator.RunConfig(model_dir=model_dir, keep_checkpoint_max=max_checkpoints_num ) odapi_configs = config_util.get_configs_from_pipeline_file(config_path) if label_map_path: log("Overriding label_map_path given in the odapi config file") odapi_configs["train_input_config"].label_map_path = label_map_path odapi_configs["eval_input_config"].label_map_path = label_map_path else: label_map_path = odapi_configs["train_input_config"].label_map_path # Set input reader config low to make sure you don't hit memory errors train_input_config = odapi_configs["train_input_config"] train_input_config.shuffle = False train_input_config.num_readers = 1 train_input_config.num_parallel_batches = 1 train_input_config.num_prefetch_batches = -1 # autotune train_input_config.queue_capacity = 2 train_input_config.min_after_dequeue = 1 train_input_config.read_block_length = 10 train_input_config.prefetch_size = 2 train_input_config.num_parallel_map_calls = 2 # key class names reverse_class_mapping_dict = label_map_util.get_label_map_dict( label_map_path=label_map_path, use_display_name=use_display_name) if label is None: label = [k for k in reverse_class_mapping_dict.keys()] # key int class_mapping_dict = {v: k for k, v in reverse_class_mapping_dict.items()} detection_model_fn = functools.partial(model_builder.build, model_config=odapi_configs["model"]) model_func = create_model_fn(detection_model_fn, hparams=create_hparams(None), configs=odapi_configs, use_tpu=False, postprocess_on_cpu=False) estimator = tf.estimator.Estimator(model_fn=model_func, config=run_config) if estimator.latest_checkpoint() is None: log(("Running a single dummy training step! " "Else saving SavedModel for Tensorflow Serving does not work")) train_input_config = odapi_configs["train_input_config"] train_input_fn = create_train_input_fn( train_config=odapi_configs["train_config"], model_config=odapi_configs["model"], train_input_config=train_input_config) estimator.train(input_fn=train_input_fn, steps=1) _export_saved_model(export_dir, estimator, odapi_configs) log("Make sure to start Tensorflow Serving before opening Prodigy") log(("Training and evaluation (if enabled) can be monitored by " "pointing Tensorboard to {} directory").format(model_dir)) stream = get_stream(source, api=api, loader="images", input_key="image") stream = fetch_images(stream) update_fn = functools.partial( update_odapi_model, estimator=estimator, data_dir=data_dir, reverse_class_mapping_dict=reverse_class_mapping_dict, odapi_configs=odapi_configs, steps_per_epoch=steps_per_epoch, export_dir=export_dir, run_eval=run_eval, eval_steps=eval_steps, temp_files_num=temp_files_num) return { "view_id": "image_manual", "dataset": dataset, "stream": get_image_stream(stream, class_mapping_dict, ip, port, model_name, float(threshold)), "exclude": exclude, "update": update_fn, # "progress": lambda *args, **kwargs: 0, 'config': { 'label': ', '.join(label) if label is not None else 'all', 'labels': label, # Selectable label options, } }