def set_model(self, model: tf.keras.Model): self.model = model config = model.config self.config = config label_map = label_util.get_label_map(config.label_map) log_dir = os.path.join(config.model_dir, 'coco') self.file_writer = tf.summary.create_file_writer(log_dir) self.evaluator = coco_metric.EvaluationMetric( filename=config.val_json_file, label_map=label_map)
def estimator_metric_fn(self, detections, groundtruth_data): """Constructs the metric function for tf.TPUEstimator. For each metric, we return the evaluation op and an update op; the update op is shared across all metrics and simply appends the set of detections to the `self.detections` list. The metric op is invoked after all examples have been seen and computes the aggregate COCO metrics. Please find details API in: https://www.tensorflow.org/api_docs/python/tf/contrib/learn/MetricSpec Args: detections: Detection results in a tensor with each row representing [image_id, x, y, width, height, score, class] groundtruth_data: Groundtruth annotations in a tensor with each row representing [y1, x1, y2, x2, is_crowd, area, class]. Returns: metrics_dict: A dictionary mapping from evaluation name to a tuple of operations (`metric_op`, `update_op`). `update_op` appends the detections for the metric to the `self.detections` list. """ with tf.name_scope('coco_metric'): if self.testdev_dir: update_op = tf.numpy_function(self.update_state, [groundtruth_data, detections], []) metrics = tf.numpy_function(self.result, [], tf.float32) metrics_dict = {'AP': (metrics, update_op)} return metrics_dict else: update_op = tf.numpy_function(self.update_state, [groundtruth_data, detections], []) metrics = tf.numpy_function(self.result, [], tf.float32) metrics_dict = {} for i, name in enumerate(self.metric_names): metrics_dict[name] = (metrics[i], update_op) if self.label_map: # process per-class AP. label_map = label_util.get_label_map(self.label_map) for i, cid in enumerate(sorted(label_map.keys())): name = 'AP_/%s' % label_map[cid] metrics_dict[name] = (metrics[i + len(self.metric_names)], update_op) return metrics_dict
def visualize_image(image, boxes, classes, scores, label_map=None, min_score_thresh=0.01, max_boxes_to_draw=1000, line_thickness=2, **kwargs): """Visualizes a given image. Args: image: a image with shape [H, W, C]. boxes: a box prediction with shape [N, 4] ordered [ymin, xmin, ymax, xmax]. classes: a class prediction with shape [N]. scores: A list of float value with shape [N]. label_map: a dictionary from class id to name. min_score_thresh: minimal score for showing. If claass probability is below this threshold, then the object will not show up. max_boxes_to_draw: maximum bounding box to draw. line_thickness: how thick is the bounding box line. **kwargs: extra parameters. Returns: output_image: an output image with annotated boxes and classes. """ label_map = label_util.get_label_map(label_map or 'coco') category_index = {k: {'id': k, 'name': label_map[k]} for k in label_map} img = np.array(image) vis_utils.visualize_boxes_and_labels_on_image_array( img, boxes, classes, scores, category_index, min_score_thresh=min_score_thresh, max_boxes_to_draw=max_boxes_to_draw, line_thickness=line_thickness, **kwargs) return img
def main(_): config = hparams_config.get_efficientdet_config(FLAGS.model_name) config.override(FLAGS.hparams) config.val_json_file = FLAGS.val_json_file config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS config.drop_remainder = False # eval all examples w/o drop. config.image_size = utils.parse_image_size(config['image_size']) # Evaluator for AP calculation. label_map = label_util.get_label_map(config.label_map) evaluator = coco_metric.EvaluationMetric(filename=config.val_json_file, label_map=label_map) # dataset batch_size = 1 ds = dataloader.InputReader( FLAGS.val_file_pattern, is_training=False, max_instances_per_image=config.max_instances_per_image)( config, batch_size=batch_size) eval_samples = FLAGS.eval_samples if eval_samples: ds = ds.take((eval_samples + batch_size - 1) // batch_size) # Network lite_runner = LiteRunner(FLAGS.tflite_path, FLAGS.only_network) eval_samples = FLAGS.eval_samples or 5000 pbar = tf.keras.utils.Progbar( (eval_samples + batch_size - 1) // batch_size) for i, (images, labels) in enumerate(ds): if not FLAGS.only_network: nms_boxes_bs, nms_classes_bs, nms_scores_bs, _ = lite_runner.run( images) nms_classes_bs += postprocess.CLASS_OFFSET height, width = utils.parse_image_size(config.image_size) normalize_factor = tf.constant([height, width, height, width], dtype=tf.float32) nms_boxes_bs *= normalize_factor if labels['image_scales'] is not None: scales = tf.expand_dims( tf.expand_dims(labels['image_scales'], -1), -1) nms_boxes_bs = nms_boxes_bs * tf.cast(scales, nms_boxes_bs.dtype) detections = postprocess.generate_detections_from_nms_output( nms_boxes_bs, nms_classes_bs, nms_scores_bs, labels['source_ids']) else: cls_outputs, box_outputs = lite_runner.run(images) detections = postprocess.generate_detections( config, cls_outputs, box_outputs, labels['image_scales'], labels['source_ids'], per_class_nms=FLAGS.per_class_nms) detections = postprocess.transform_detections(detections) evaluator.update_state(labels['groundtruth_data'].numpy(), detections.numpy()) pbar.update(i) # compute the final eval results. metrics = evaluator.result() metric_dict = {} for i, name in enumerate(evaluator.metric_names): metric_dict[name] = metrics[i] if label_map: for i, cid in enumerate(sorted(label_map.keys())): name = 'AP_/%s' % label_map[cid] metric_dict[name] = metrics[i + len(evaluator.metric_names)] print(FLAGS.model_name, metric_dict)
def main(_): config = hparams_config.get_efficientdet_config(FLAGS.model_name) config.override(FLAGS.hparams) config.val_json_file = FLAGS.val_json_file config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS config.drop_remainder = False # eval all examples w/o drop. config.image_size = utils.parse_image_size(config['image_size']) if config.strategy == 'tpu': tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver( FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) tf.config.experimental_connect_to_cluster(tpu_cluster_resolver) tf.tpu.experimental.initialize_tpu_system(tpu_cluster_resolver) ds_strategy = tf.distribute.TPUStrategy(tpu_cluster_resolver) logging.info('All devices: %s', tf.config.list_logical_devices('TPU')) elif config.strategy == 'gpus': ds_strategy = tf.distribute.MirroredStrategy() logging.info('All devices: %s', tf.config.list_physical_devices('GPU')) else: if tf.config.list_physical_devices('GPU'): ds_strategy = tf.distribute.OneDeviceStrategy('device:GPU:0') else: ds_strategy = tf.distribute.OneDeviceStrategy('device:CPU:0') with ds_strategy.scope(): # Network model = efficientdet_keras.EfficientDetNet(config=config) model.build((None, *config.image_size, 3)) util_keras.restore_ckpt(model, tf.train.latest_checkpoint(FLAGS.model_dir), config.moving_average_decay, skip_mismatch=False) @tf.function def model_fn(images, labels): cls_outputs, box_outputs = model(images, training=False) detections = postprocess.generate_detections( config, cls_outputs, box_outputs, labels['image_scales'], labels['source_ids']) tf.numpy_function(evaluator.update_state, [ labels['groundtruth_data'], postprocess.transform_detections(detections) ], []) # Evaluator for AP calculation. label_map = label_util.get_label_map(config.label_map) evaluator = coco_metric.EvaluationMetric(filename=config.val_json_file, label_map=label_map) # dataset batch_size = FLAGS.batch_size # global batch size. ds = dataloader.InputReader( FLAGS.val_file_pattern, is_training=False, max_instances_per_image=config.max_instances_per_image)( config, batch_size=batch_size) if FLAGS.eval_samples: ds = ds.take((FLAGS.eval_samples + batch_size - 1) // batch_size) ds = ds_strategy.experimental_distribute_dataset(ds) # evaluate all images. eval_samples = FLAGS.eval_samples or 5000 pbar = tf.keras.utils.Progbar( (eval_samples + batch_size - 1) // batch_size) for i, (images, labels) in enumerate(ds): ds_strategy.run(model_fn, (images, labels)) pbar.update(i) # compute the final eval results. metrics = evaluator.result() metric_dict = {} for i, name in enumerate(evaluator.metric_names): metric_dict[name] = metrics[i] if label_map: for i, cid in enumerate(sorted(label_map.keys())): name = 'AP_/%s' % label_map[cid] metric_dict[name] = metrics[i + len(evaluator.metric_names)] print(FLAGS.model_name, metric_dict)