def model_fn(images, labels): cls_outputs, box_outputs = model(images, training=False) detections = postprocess.generate_detections( config, cls_outputs, box_outputs, labels['image_scales'], labels['source_ids']) tf.numpy_function(evaluator.update_state, [ labels['groundtruth_data'], postprocess.transform_detections(detections) ], [])
def _get_detections(self, images, labels): cls_outputs, box_outputs = util_keras.fp16_to_fp32_nested( self.model(images, training=False)) detections = postprocess.generate_detections(self.config, cls_outputs, box_outputs, labels['image_scales'], labels['source_ids']) tf.numpy_function(self.evaluator.update_state, [labels['groundtruth_data'], postprocess.transform_detections(detections)], [])
def test_postprocess_per_class_tf_nms(self): """Test postprocess with per class nms using the tensorflow nms.""" tf.random.set_seed(1111) cls_outputs = { 1: tf.random.normal([2, 4, 4, 2]), 2: tf.random.normal([2, 2, 2, 2]) } box_outputs = { 1: tf.random.normal([2, 4, 4, 4]), 2: tf.random.normal([2, 2, 2, 4]) } cls_outputs_list = [cls_outputs[1], cls_outputs[2]] box_outputs_list = [box_outputs[1], box_outputs[2]] scales = [1.0, 2.0] ids = [0, 1] self.params['max_detection_points'] = 10 self.params['nms_configs']['pyfunc'] = False outputs = postprocess.generate_detections(self.params, cls_outputs_list, box_outputs_list, scales, ids) self.assertAllClose( outputs.numpy(), [[[0., -1.177383, 1.793507, 8.340945, 4.418388, 0.901576, 2.], [0., 5.676410, 6.102146, 7.785691, 8.537168, 0.888125, 1.]], [[1., 5.885427, 13.529362, 11.410081, 14.154047, 0.884544, 1.], [1., 8.145872, -9.660868, 14.173973, 10.41237, 0.815883, 2.]]]) outputs_flipped = postprocess.generate_detections( self.params, cls_outputs_list, box_outputs_list, scales, ids, True) self.assertAllClose( outputs_flipped.numpy(), [[[0., -0.340945, 1.793507, 9.177383, 4.418388, 0.901576, 2.], [0., 0.214309, 6.102146, 2.32359, 8.537168, 0.888125, 1.]], [[1., 4.589919, 13.529362, 10.114573, 14.154047, 0.884544, 1.], [1., 1.826027, -9.660868, 7.854128, 10.41237, 0.815883, 2.]]])
def main(_): config = hparams_config.get_efficientdet_config(FLAGS.model_name) config.override(FLAGS.hparams) config.val_json_file = FLAGS.val_json_file config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS config.drop_remainder = False # eval all examples w/o drop. config.image_size = utils.parse_image_size(config['image_size']) # Evaluator for AP calculation. label_map = label_util.get_label_map(config.label_map) evaluator = coco_metric.EvaluationMetric(filename=config.val_json_file, label_map=label_map) # dataset batch_size = 1 ds = dataloader.InputReader( FLAGS.val_file_pattern, is_training=False, max_instances_per_image=config.max_instances_per_image)( config, batch_size=batch_size) eval_samples = FLAGS.eval_samples if eval_samples: ds = ds.take((eval_samples + batch_size - 1) // batch_size) # Network lite_runner = LiteRunner(FLAGS.tflite_path, FLAGS.only_network) eval_samples = FLAGS.eval_samples or 5000 pbar = tf.keras.utils.Progbar( (eval_samples + batch_size - 1) // batch_size) for i, (images, labels) in enumerate(ds): if not FLAGS.only_network: nms_boxes_bs, nms_classes_bs, nms_scores_bs, _ = lite_runner.run( images) nms_classes_bs += postprocess.CLASS_OFFSET height, width = utils.parse_image_size(config.image_size) normalize_factor = tf.constant([height, width, height, width], dtype=tf.float32) nms_boxes_bs *= normalize_factor if labels['image_scales'] is not None: scales = tf.expand_dims( tf.expand_dims(labels['image_scales'], -1), -1) nms_boxes_bs = nms_boxes_bs * tf.cast(scales, nms_boxes_bs.dtype) detections = postprocess.generate_detections_from_nms_output( nms_boxes_bs, nms_classes_bs, nms_scores_bs, labels['source_ids']) else: cls_outputs, box_outputs = lite_runner.run(images) detections = postprocess.generate_detections( config, cls_outputs, box_outputs, labels['image_scales'], labels['source_ids'], pre_class_nms=FLAGS.pre_class_nms) detections = postprocess.transform_detections(detections) evaluator.update_state(labels['groundtruth_data'].numpy(), detections.numpy()) pbar.update(i) # compute the final eval results. metrics = evaluator.result() metric_dict = {} for i, name in enumerate(evaluator.metric_names): metric_dict[name] = metrics[i] if label_map: for i, cid in enumerate(sorted(label_map.keys())): name = 'AP_/%s' % label_map[cid] metric_dict[name] = metrics[i + len(evaluator.metric_names)] print(FLAGS.model_name, metric_dict)