示例#1
0
    def __init__(self, min_level, max_level, num_scales, aspect_ratios,
                 anchor_scale, image_size):
        """Constructs multiscale anchors.

    Args:
      min_level: integer number of minimum level of the output feature pyramid.
      max_level: integer number of maximum level of the output feature pyramid.
      num_scales: integer number representing intermediate scales added
        on each level. For instances, num_scales=2 adds two additional
        anchor scales [2^0, 2^0.5] on each level.
      aspect_ratios: list of representing the aspect ratio anchors added
        on each level. For instances, aspect_ratios = [1.0, 2.0, 0..5]
        adds three anchors on each level.
      anchor_scale: float number representing the scale of size of the base
        anchor to the feature stride 2^level. Or a list, one value per layer.
      image_size: integer number or tuple of integer number of input image size.
    """
        self.min_level = min_level
        self.max_level = max_level
        self.num_scales = num_scales
        self.aspect_ratios = aspect_ratios
        if isinstance(anchor_scale, (list, tuple)):
            assert len(anchor_scale) == max_level - min_level + 1
            self.anchor_scales = anchor_scale
        else:
            self.anchor_scales = [anchor_scale] * (max_level - min_level + 1)
        self.image_size = utils.parse_image_size(image_size)
        self.feat_sizes = utils.get_feat_sizes(image_size, max_level)
        self.config = self._generate_configs()
        self.boxes = self._generate_boxes()
示例#2
0
 def _get_model_and_spec(self, tflite=None):
     """Get model instance and export spec."""
     if self.only_network or tflite:
         image_size = utils.parse_image_size(self.params['image_size'])
         spec = tf.TensorSpec(shape=[self.batch_size, *image_size, 3],
                              dtype=tf.float32,
                              name='images')
         if self.only_network:
             export_model = ExportNetwork(self.model)
         else:
             # If export tflite, we should remove preprocessing since TFLite doesn't
             # support dynamic shape.
             logging.info('Export model without preprocessing.')
             # This section is only used for TFLite, so we use the applicable
             # pre_ & post_ modes.
             export_model = ExportModel(self.model,
                                        pre_mode=None,
                                        post_mode='tflite')
         return export_model, spec
     else:
         spec = tf.TensorSpec(shape=[self.batch_size, None, None, 3],
                              dtype=tf.uint8,
                              name='images')
         export_model = ExportModel(self.model)
         return export_model, spec
示例#3
0
    def _preprocessing(self,
                       raw_images,
                       image_size,
                       mean_rgb,
                       stddev_rgb,
                       mode=None):
        """Preprocess images before feeding to the network."""
        if not mode:
            return raw_images, None

        image_size = utils.parse_image_size(image_size)
        if mode != 'infer':
            # We only support inference for now.
            raise ValueError('preprocessing must be infer or empty')

        def map_fn(image):
            input_processor = dataloader.DetectionInputProcessor(
                image, image_size)
            input_processor.normalize_image(mean_rgb, stddev_rgb)
            input_processor.set_scale_factors_to_output_size()
            image = input_processor.resize_and_crop_image()
            image_scale = input_processor.image_scale_to_original
            return image, image_scale

        if raw_images.shape.as_list()[0]:  # fixed batch size.
            batch_size = raw_images.shape.as_list()[0]
            outputs = [map_fn(raw_images[i]) for i in range(batch_size)]
            return [tf.stack(y) for y in zip(*outputs)]

        # otherwise treat it as dynamic batch size.
        return tf.vectorized_map(map_fn, raw_images)
示例#4
0
def generate_detections(params,
                        cls_outputs,
                        box_outputs,
                        image_scales,
                        image_ids,
                        flip=False,
                        pre_class_nms=True):
    """A legacy interface for generating [id, x, y, w, h, score, class]."""
    _, width = utils.parse_image_size(params['image_size'])

    original_image_widths = tf.expand_dims(image_scales, -1) * width

    if params['nms_configs'].get('pyfunc', True):
        # numpy based soft-nms gives better accuracy than the tensorflow builtin
        # the reason why is unknown
        detections_bs = []
        boxes, scores, classes = pre_nms(params, cls_outputs, box_outputs)
        for index in range(boxes.shape[0]):
            nms_configs = params['nms_configs']
            detections = tf.numpy_function(
                functools.partial(nms_np.per_class_nms,
                                  nms_configs=nms_configs), [
                                      boxes[index],
                                      scores[index],
                                      classes[index],
                                      tf.slice(image_ids, [index], [1]),
                                      tf.slice(image_scales, [index], [1]),
                                      params['num_classes'],
                                      nms_configs['max_output_size'],
                                  ], tf.float32)

            if flip:
                detections = tf.stack(
                    [
                        detections[:, 0],
                        # the mirrored location of the left edge is the image width
                        # minus the position of the right edge
                        original_image_widths[index] - detections[:, 3],
                        detections[:, 2],
                        # the mirrored location of the right edge is the image width
                        # minus the position of the left edge
                        original_image_widths[index] - detections[:, 1],
                        detections[:, 4],
                        detections[:, 5],
                        detections[:, 6],
                    ],
                    axis=-1)
            detections_bs.append(detections)
        return tf.stack(detections_bs, axis=0, name='detections')

    if pre_class_nms:
        postprocess = postprocess_per_class
    else:
        postprocess = postprocess_global
    nms_boxes_bs, nms_scores_bs, nms_classes_bs, _ = postprocess(
        params, cls_outputs, box_outputs, image_scales)

    return generate_detections_from_nms_output(nms_boxes_bs, nms_classes_bs,
                                               nms_scores_bs, image_ids,
                                               original_image_widths, flip)
示例#5
0
    def evaluate_tflite(self,
                        tflite_filepath: str,
                        dataset: tf.data.Dataset,
                        steps: int,
                        json_file: Optional[str] = None) -> Dict[str, float]:
        """Evaluate the EfficientDet TFLite model.

    Args:
      tflite_filepath: File path to the TFLite model.
      dataset: tf.data.Dataset used for evaluation.
      steps: Number of steps to evaluate the model.
      json_file: JSON with COCO data format containing golden bounding boxes.
        Used for validation. If None, use the ground truth from the dataloader.
        Refer to
        https://towardsdatascience.com/coco-data-format-for-object-detection-a4c5eaf518c5
          for the description of COCO data format.

    Returns:
      A dict contains AP metrics.
    """
        # TODO(b/182441458): Use the task library for evaluation instead once it
        # supports python interface.
        evaluator, label_map = self._get_evaluator_and_label_map(json_file)
        dataset = dataset.take(steps)

        lite_runner = eval_tflite.LiteRunner(tflite_filepath,
                                             only_network=False)
        progbar = tf.keras.utils.Progbar(steps)
        for i, (images, labels) in enumerate(dataset):
            # Get the output result after post-processing NMS op.
            nms_boxes, nms_classes, nms_scores, _ = lite_runner.run(images)

            # CLASS_OFFSET is used since label_id for `background` is 0 in label_map
            # while it's not actually included the model. We don't need to add the
            # offset in the Android application.
            nms_classes += postprocess.CLASS_OFFSET

            height, width = utils.parse_image_size(self.config.image_size)
            normalize_factor = tf.constant([height, width, height, width],
                                           dtype=tf.float32)
            nms_boxes *= normalize_factor
            if labels['image_scales'] is not None:
                scales = tf.expand_dims(
                    tf.expand_dims(labels['image_scales'], -1), -1)
                nms_boxes = nms_boxes * tf.cast(scales, nms_boxes.dtype)
            detections = postprocess.generate_detections_from_nms_output(
                nms_boxes, nms_classes, nms_scores, labels['source_ids'])

            detections = postprocess.transform_detections(detections)
            evaluator.update_state(labels['groundtruth_data'].numpy(),
                                   detections.numpy())
            progbar.update(i + 1)
        print()

        metric_dict = self._get_metric_dict(evaluator, label_map)
        return metric_dict
示例#6
0
def main(_):
    config = hparams_config.get_efficientdet_config(FLAGS.model_name)
    config.override(FLAGS.hparams)
    config.val_json_file = FLAGS.val_json_file
    config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
    config.drop_remainder = False  # eval all examples w/o drop.
    config.image_size = utils.parse_image_size(config['image_size'])

    # Evaluator for AP calculation.
    label_map = label_util.get_label_map(config.label_map)
    evaluator = coco_metric.EvaluationMetric(filename=config.val_json_file,
                                             label_map=label_map)

    # dataset
    batch_size = 1
    ds = dataloader.InputReader(
        FLAGS.val_file_pattern,
        is_training=False,
        max_instances_per_image=config.max_instances_per_image)(
            config, batch_size=batch_size)
    eval_samples = FLAGS.eval_samples
    if eval_samples:
        ds = ds.take((eval_samples + batch_size - 1) // batch_size)

    # Network
    lite_runner = LiteRunner(FLAGS.tflite_path)
    eval_samples = FLAGS.eval_samples or 5000
    pbar = tf.keras.utils.Progbar(
        (eval_samples + batch_size - 1) // batch_size)
    for i, (images, labels) in enumerate(ds):
        cls_outputs, box_outputs = lite_runner.run(images)
        detections = postprocess.generate_detections(config, cls_outputs,
                                                     box_outputs,
                                                     labels['image_scales'],
                                                     labels['source_ids'])
        detections = postprocess.transform_detections(detections)
        evaluator.update_state(labels['groundtruth_data'].numpy(),
                               detections.numpy())
        pbar.update(i)

    # compute the final eval results.
    metrics = evaluator.result()
    metric_dict = {}
    for i, name in enumerate(evaluator.metric_names):
        metric_dict[name] = metrics[i]

    if label_map:
        for i, cid in enumerate(sorted(label_map.keys())):
            name = 'AP_/%s' % label_map[cid]
            metric_dict[name] = metrics[i + len(evaluator.metric_names)]
    print(FLAGS.model_name, metric_dict)
示例#7
0
def main(_):
    # Parse and override hparams
    config = hparams_config.get_detection_config(FLAGS.model_name)
    config.override(FLAGS.hparams)

    # Parse image size in case it is in string format.
    config.image_size = utils.parse_image_size(config.image_size)
    try:
        recordinspect = RecordInspect(config)
        recordinspect.visualize()
    except Exception as e:  # pylint: disable=broad-except
        logging.error(e)
    else:
        logging.info('Done, please find samples at %s', FLAGS.save_samples_dir)
    def __init__(self,
                 model_name: Text,
                 logdir: Text,
                 tensorrt: Text = False,
                 use_xla: bool = False,
                 ckpt_path: Text = None,
                 export_ckpt: Text = None,
                 saved_model_dir: Text = None,
                 tflite_path: Text = None,
                 batch_size: int = 1,
                 hparams: Text = '',
                 **kwargs):  # pytype: disable=annotation-type-mismatch
        self.model_name = model_name
        self.logdir = logdir
        self.tensorrt = tensorrt
        self.use_xla = use_xla
        self.ckpt_path = ckpt_path
        self.export_ckpt = export_ckpt
        self.saved_model_dir = saved_model_dir
        self.tflite_path = tflite_path

        model_config = hparams_config.get_detection_config(model_name)
        model_config.override(hparams)  # Add custom overrides
        model_config.is_training_bn = False
        model_config.image_size = utils.parse_image_size(
            model_config.image_size)

        # If batch size is 0, then build a graph with dynamic batch size.
        self.batch_size = batch_size or None
        self.labels_shape = [batch_size, model_config.num_classes]

        # A hack to make flag consistent with nms configs.
        if kwargs.get('score_thresh', None):
            model_config.nms_configs.score_thresh = kwargs['score_thresh']
        if kwargs.get('nms_method', None):
            model_config.nms_configs.method = kwargs['nms_method']
        if kwargs.get('max_output_size', None):
            model_config.nms_configs.max_output_size = kwargs[
                'max_output_size']

        height, width = model_config.image_size
        if model_config.data_format == 'channels_first':
            self.inputs_shape = [batch_size, 3, height, width]
        else:
            self.inputs_shape = [batch_size, height, width, 3]

        self.model_config = model_config
示例#9
0
    def set_training_random_scale_factors(self,
                                          scale_min,
                                          scale_max,
                                          target_size=None):
        """Set the parameters for multiscale training.

    Notably, if train and eval use different sizes, then target_size should be
    set as eval size to avoid the discrency between train and eval.

    Args:
      scale_min: minimal scale factor.
      scale_max: maximum scale factor.
      target_size: targeted size, usually same as eval. If None, use train size.
    """
        if not target_size:
            target_size = self._output_size
        target_size = utils.parse_image_size(target_size)
        logging.info('target_size = %s, output_size = %s', target_size,
                     self._output_size)

        # Select a random scale factor.
        random_scale_factor = tf.random.uniform([], scale_min, scale_max)
        scaled_y = tf.cast(random_scale_factor * target_size[0], tf.int32)
        scaled_x = tf.cast(random_scale_factor * target_size[1], tf.int32)

        # Recompute the accurate scale_factor using rounded scaled image size.
        height = tf.cast(tf.shape(self._image)[0], tf.float32)
        width = tf.cast(tf.shape(self._image)[1], tf.float32)
        image_scale_y = tf.cast(scaled_y, tf.float32) / height
        image_scale_x = tf.cast(scaled_x, tf.float32) / width
        image_scale = tf.minimum(image_scale_x, image_scale_y)

        # Select non-zero random offset (x, y) if scaled image is larger than
        # self._output_size.
        scaled_height = tf.cast(height * image_scale, tf.int32)
        scaled_width = tf.cast(width * image_scale, tf.int32)
        offset_y = tf.cast(scaled_height - self._output_size[0], tf.float32)
        offset_x = tf.cast(scaled_width - self._output_size[1], tf.float32)
        offset_y = tf.maximum(0.0, offset_y) * tf.random.uniform([], 0, 1)
        offset_x = tf.maximum(0.0, offset_x) * tf.random.uniform([], 0, 1)
        offset_y = tf.cast(offset_y, tf.int32)
        offset_x = tf.cast(offset_x, tf.int32)
        self._image_scale = image_scale
        self._scaled_height = scaled_height
        self._scaled_width = scaled_width
        self._crop_offset_x = offset_x
        self._crop_offset_y = offset_y
示例#10
0
 def build(self, params_override=None):
     """Build model and restore checkpoints."""
     params = copy.deepcopy(self.params)
     if params_override:
         params.update(params_override)
     config = hparams_config.get_efficientdet_config(self.model_name)
     config.override(params)
     if self.only_network:
         self.model = efficientdet_keras.EfficientDetNet(config=config)
     else:
         self.model = efficientdet_keras.EfficientDetModel(config=config)
     image_size = utils.parse_image_size(params['image_size'])
     self.model.build((self.batch_size, *image_size, 3))
     util_keras.restore_ckpt(self.model,
                             self.ckpt_path,
                             self.params['moving_average_decay'],
                             skip_mismatch=False)
示例#11
0
def tflite_pre_nms(params, cls_outputs, box_outputs):
    """Pre-NMS that is compatible with TFLite's custom NMS op.

  For details, see tensorflow/lite/kernels/detection_postprocess.cc

  Args:
    params: a dict of parameters.
    cls_outputs: a list of tensors for classes, each tensor denotes a level of
      logits with shape [1, H, W, num_class * num_anchors].
    box_outputs: a list of tensors for boxes, each tensor ddenotes a level of
      boxes with shape [1, H, W, 4 * num_anchors]. Each box format is [y_min,
      x_min, y_max, x_man].

  Returns:
    boxes: boxes encoded as {y_center, x_center, height, width}
    scores: scores converted from `cls_outputs` logits using sigmoid
    anchors: normalized anchors encoded as {y_center, x_center, height, width}
  """
    cls_outputs = to_list(cls_outputs)
    box_outputs = to_list(box_outputs)
    cls_outputs, box_outputs = merge_class_box_level_outputs(
        params, cls_outputs, box_outputs)
    eval_anchors = anchors.Anchors(params['min_level'], params['max_level'],
                                   params['num_scales'],
                                   params['aspect_ratios'],
                                   params['anchor_scale'],
                                   params['image_size'])

    # TODO(b/175166514): Consider computing Top-K boxes & anchors here. We don't
    # do this currently since the resultant graph does not support TFLite
    # delegates well. `topk_class_boxes` won't work as-is, since the outputs
    # will need to be modified appropriately for TFLite op's consumption.

    # TFLite's object detection APIs require normalized anchors.
    height, width = utils.parse_image_size(params['image_size'])
    normalize_factor = tf.constant([height, width, height, width],
                                   dtype=tf.float32)
    normalized_anchors = eval_anchors.boxes / normalize_factor
    decoded_anchors = anchors.decode_anchors_to_centersize(
        box_outputs, normalized_anchors)

    # convert logits to scores.
    scores = tf.math.sigmoid(cls_outputs)

    return box_outputs, scores, decoded_anchors
示例#12
0
def main(_):
    if FLAGS.strategy == 'tpu':
        tf.disable_eager_execution()
        tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
        tpu_grpc_url = tpu_cluster_resolver.get_master()
        tf.Session.reset(tpu_grpc_url)
    else:
        tpu_cluster_resolver = None

    # Check data path
    if FLAGS.mode in ('train', 'train_and_eval'):
        if FLAGS.train_file_pattern is None:
            raise RuntimeError('Must specify --train_file_pattern for train.')
    if FLAGS.mode in ('eval', 'train_and_eval'):
        if FLAGS.val_file_pattern is None:
            raise RuntimeError('Must specify --val_file_pattern for eval.')

    # Parse and override hparams
    config = hparams_config.get_detection_config(FLAGS.model_name)
    config.override(FLAGS.hparams)
    if FLAGS.num_epochs:  # NOTE: remove this flag after updating all docs.
        config.num_epochs = FLAGS.num_epochs

    # Parse image size in case it is in string format.
    config.image_size = utils.parse_image_size(config.image_size)

    # The following is for spatial partitioning. `features` has one tensor while
    # `labels` had 4 + (`max_level` - `min_level` + 1) * 2 tensors. The input
    # partition is performed on `features` and all partitionable tensors of
    # `labels`, see the partition logic below.
    # In the TPUEstimator context, the meaning of `shard` and `replica` is the
    # same; follwing the API, here has mixed use of both.
    if FLAGS.use_spatial_partition:
        # Checks input_partition_dims agrees with num_cores_per_replica.
        if FLAGS.num_cores_per_replica != np.prod(FLAGS.input_partition_dims):
            raise RuntimeError(
                '--num_cores_per_replica must be a product of array'
                'elements in --input_partition_dims.')

        labels_partition_dims = {
            'mean_num_positives': None,
            'source_ids': None,
            'groundtruth_data': None,
            'image_scales': None,
            'image_masks': None,
        }
        # The Input Partition Logic: We partition only the partition-able tensors.
        feat_sizes = utils.get_feat_sizes(config.get('image_size'),
                                          config.get('max_level'))
        for level in range(config.get('min_level'),
                           config.get('max_level') + 1):

            def _can_partition(spatial_dim):
                partitionable_index = np.where(
                    spatial_dim % np.array(FLAGS.input_partition_dims) == 0)
                return len(partitionable_index[0]) == len(
                    FLAGS.input_partition_dims)

            spatial_dim = feat_sizes[level]
            if _can_partition(spatial_dim['height']) and _can_partition(
                    spatial_dim['width']):
                labels_partition_dims['box_targets_%d' %
                                      level] = FLAGS.input_partition_dims
                labels_partition_dims['cls_targets_%d' %
                                      level] = FLAGS.input_partition_dims
            else:
                labels_partition_dims['box_targets_%d' % level] = None
                labels_partition_dims['cls_targets_%d' % level] = None
        num_cores_per_replica = FLAGS.num_cores_per_replica
        input_partition_dims = [
            FLAGS.input_partition_dims, labels_partition_dims
        ]
        num_shards = FLAGS.num_cores // num_cores_per_replica
    else:
        num_cores_per_replica = None
        input_partition_dims = None
        num_shards = FLAGS.num_cores

    params = dict(config.as_dict(),
                  model_name=FLAGS.model_name,
                  iterations_per_loop=FLAGS.iterations_per_loop,
                  model_dir=FLAGS.model_dir,
                  num_shards=num_shards,
                  num_examples_per_epoch=FLAGS.num_examples_per_epoch,
                  strategy=FLAGS.strategy,
                  backbone_ckpt=FLAGS.backbone_ckpt,
                  ckpt=FLAGS.ckpt,
                  val_json_file=FLAGS.val_json_file,
                  testdev_dir=FLAGS.testdev_dir,
                  profile=FLAGS.profile,
                  mode=FLAGS.mode)
    config_proto = tf.ConfigProto(allow_soft_placement=True,
                                  log_device_placement=False)
    if FLAGS.strategy != 'tpu':
        if FLAGS.use_xla:
            config_proto.graph_options.optimizer_options.global_jit_level = (
                tf.OptimizerOptions.ON_1)
            config_proto.gpu_options.allow_growth = True

    model_dir = FLAGS.model_dir
    model_fn_instance = det_model_fn.get_model_fn(FLAGS.model_name)
    max_instances_per_image = config.max_instances_per_image
    if FLAGS.eval_samples:
        eval_steps = int((FLAGS.eval_samples + FLAGS.eval_batch_size - 1) //
                         FLAGS.eval_batch_size)
    else:
        eval_steps = None
    total_examples = int(config.num_epochs * FLAGS.num_examples_per_epoch)
    train_steps = total_examples // FLAGS.train_batch_size
    logging.info(params)

    if not tf.io.gfile.exists(model_dir):
        tf.io.gfile.makedirs(model_dir)

    config_file = os.path.join(model_dir, 'config.yaml')
    if not tf.io.gfile.exists(config_file):
        tf.io.gfile.GFile(config_file, 'w').write(str(config))

    train_input_fn = dataloader.InputReader(
        FLAGS.train_file_pattern,
        is_training=True,
        use_fake_data=FLAGS.use_fake_data,
        max_instances_per_image=max_instances_per_image)
    eval_input_fn = dataloader.InputReader(
        FLAGS.val_file_pattern,
        is_training=False,
        use_fake_data=FLAGS.use_fake_data,
        max_instances_per_image=max_instances_per_image)

    if FLAGS.strategy == 'tpu':
        tpu_config = tf.estimator.tpu.TPUConfig(
            FLAGS.iterations_per_loop if FLAGS.strategy == 'tpu' else 1,
            num_cores_per_replica=num_cores_per_replica,
            input_partition_dims=input_partition_dims,
            per_host_input_for_training=tf.estimator.tpu.InputPipelineConfig.
            PER_HOST_V2)
        run_config = tf.estimator.tpu.RunConfig(
            cluster=tpu_cluster_resolver,
            model_dir=model_dir,
            log_step_count_steps=FLAGS.iterations_per_loop,
            session_config=config_proto,
            tpu_config=tpu_config,
            save_checkpoints_steps=FLAGS.save_checkpoints_steps,
            tf_random_seed=FLAGS.tf_random_seed,
        )
        # TPUEstimator can do both train and eval.
        train_est = tf.estimator.tpu.TPUEstimator(
            model_fn=model_fn_instance,
            train_batch_size=FLAGS.train_batch_size,
            eval_batch_size=FLAGS.eval_batch_size,
            config=run_config,
            params=params)
        eval_est = train_est
    else:
        strategy = None
        if FLAGS.strategy == 'gpus':
            strategy = tf.distribute.MirroredStrategy()
        run_config = tf.estimator.RunConfig(
            model_dir=model_dir,
            train_distribute=strategy,
            log_step_count_steps=FLAGS.iterations_per_loop,
            session_config=config_proto,
            save_checkpoints_steps=FLAGS.save_checkpoints_steps,
            tf_random_seed=FLAGS.tf_random_seed,
        )

        def get_estimator(global_batch_size):
            params['num_shards'] = getattr(strategy, 'num_replicas_in_sync', 1)
            params['batch_size'] = global_batch_size // params['num_shards']
            return tf.estimator.Estimator(model_fn=model_fn_instance,
                                          config=run_config,
                                          params=params)

        # train and eval need different estimator due to different batch size.
        train_est = get_estimator(FLAGS.train_batch_size)
        eval_est = get_estimator(FLAGS.eval_batch_size)

    # start train/eval flow.
    if FLAGS.mode == 'train':
        train_est.train(input_fn=train_input_fn, max_steps=train_steps)
        if FLAGS.eval_after_train:
            eval_est.evaluate(input_fn=eval_input_fn, steps=eval_steps)

    elif FLAGS.mode == 'eval':
        # Run evaluation when there's a new checkpoint
        for ckpt in tf.train.checkpoints_iterator(
                FLAGS.model_dir,
                min_interval_secs=FLAGS.min_eval_interval,
                timeout=FLAGS.eval_timeout):

            logging.info('Starting to evaluate.')
            try:
                eval_results = eval_est.evaluate(eval_input_fn,
                                                 steps=eval_steps)
                # Terminate eval job when final checkpoint is reached.
                try:
                    current_step = int(os.path.basename(ckpt).split('-')[1])
                except IndexError:
                    logging.info('%s has no global step info: stop!', ckpt)
                    break

                utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)
                if current_step >= train_steps:
                    logging.info('Eval finished step %d/%d', current_step,
                                 train_steps)
                    break

            except tf.errors.NotFoundError:
                # Checkpoint might be not already deleted by the time eval finished.
                # We simply skip ssuch case.
                logging.info('Checkpoint %s no longer exists, skipping.', ckpt)

    elif FLAGS.mode == 'train_and_eval':
        ckpt = tf.train.latest_checkpoint(FLAGS.model_dir)
        try:
            step = int(os.path.basename(ckpt).split('-')[1])
            current_epoch = (step * FLAGS.train_batch_size //
                             FLAGS.num_examples_per_epoch)
            logging.info('found ckpt at step %d (epoch %d)', step,
                         current_epoch)
        except (IndexError, TypeError):
            logging.info('Folder %s has no ckpt with valid step.',
                         FLAGS.model_dir)
            current_epoch = 0

        def run_train_and_eval(e):
            print('\n   =====> Starting training, epoch: %d.' % e)
            train_est.train(input_fn=train_input_fn,
                            max_steps=e * FLAGS.num_examples_per_epoch //
                            FLAGS.train_batch_size)
            print('\n   =====> Starting evaluation, epoch: %d.' % e)
            eval_results = eval_est.evaluate(input_fn=eval_input_fn,
                                             steps=eval_steps)
            ckpt = tf.train.latest_checkpoint(FLAGS.model_dir)
            utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)

        epochs_per_cycle = 1  # higher number has less graph construction overhead.
        for e in range(current_epoch + 1, config.num_epochs + 1,
                       epochs_per_cycle):
            if FLAGS.run_epoch_in_child_process:
                p = multiprocessing.Process(target=run_train_and_eval,
                                            args=(e, ))
                p.start()
                p.join()
                if p.exitcode != 0:
                    return p.exitcode
            else:
                tf.compat.v1.reset_default_graph()
                run_train_and_eval(e)

    else:
        logging.info('Invalid mode: %s', FLAGS.mode)
 def __init__(self, model_name):
     self.model_name = model_name
     config = hparams_config.get_detection_config(model_name)
     config.image_size = utils.parse_image_size(config.image_size)
     config.update({'debug': False})
     self.config = config
示例#14
0
  def __init__(self,
               model_name: str,
               uri: str,
               hparams: str = '',
               model_dir: Optional[str] = None,
               epochs: int = 50,
               batch_size: int = 64,
               steps_per_execution: int = 1,
               moving_average_decay: int = 0,
               var_freeze_expr: str = '(efficientnet|fpn_cells|resample_p6)',
               tflite_max_detections: int = 25,
               strategy: Optional[str] = None,
               tpu: Optional[str] = None,
               gcp_project: Optional[str] = None,
               tpu_zone: Optional[str] = None,
               use_xla: bool = False,
               profile: bool = False,
               debug: bool = False,
               tf_random_seed: int = 111111,
               verbose: int = 0) -> None:
    """Initialze an instance with model paramaters.

    Args:
      model_name: Model name.
      uri: TF-Hub path/url to EfficientDet module.
      hparams: Hyperparameters used to overwrite default configuration. Can be
        1) Dict, contains parameter names and values; 2) String, Comma separated
        k=v pairs of hyperparameters; 3) String, yaml filename which's a module
        containing attributes to use as hyperparameters.
      model_dir: The location to save the model checkpoint files.
      epochs: Default training epochs.
      batch_size: Training & Evaluation batch size.
      steps_per_execution: Number of steps per training execution.
      moving_average_decay: Float. The decay to use for maintaining moving
        averages of the trained parameters.
      var_freeze_expr: Expression to freeze variables.
      tflite_max_detections: The max number of output detections in the TFLite
        model.
      strategy:  A string specifying which distribution strategy to use.
        Accepted values are 'tpu', 'gpus', None. tpu' means to use TPUStrategy.
        'gpus' mean to use MirroredStrategy for multi-gpus. If None, use TF
        default with OneDeviceStrategy.
      tpu: The Cloud TPU to use for training. This should be either the name
        used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470
          url.
      gcp_project: Project name for the Cloud TPU-enabled project. If not
        specified, we will attempt to automatically detect the GCE project from
        metadata.
      tpu_zone: GCE zone where the Cloud TPU is located in. If not specified, we
        will attempt to automatically detect the GCE project from metadata.
      use_xla: Use XLA even if strategy is not tpu. If strategy is tpu, always
        use XLA, and this flag has no effect.
      profile: Enable profile mode.
      debug: Enable debug mode.
      tf_random_seed: Fixed random seed for deterministic execution across runs
        for debugging.
      verbose: verbosity mode for `tf.keras.callbacks.ModelCheckpoint`, 0 or 1.
    """
    self.model_name = model_name
    self.uri = uri
    self.batch_size = batch_size
    config = hparams_config.get_efficientdet_config(model_name)
    config.override(hparams)
    config.image_size = utils.parse_image_size(config.image_size)
    config.var_freeze_expr = var_freeze_expr
    config.moving_average_decay = moving_average_decay
    config.tflite_max_detections = tflite_max_detections
    if epochs:
      config.num_epochs = epochs

    if use_xla and strategy != 'tpu':
      tf.config.optimizer.set_jit(True)
      for gpu in tf.config.list_physical_devices('GPU'):
        tf.config.experimental.set_memory_growth(gpu, True)

    if debug:
      tf.config.experimental_run_functions_eagerly(True)
      tf.debugging.set_log_device_placement(True)
      os.environ['TF_DETERMINISTIC_OPS'] = '1'
      tf.random.set_seed(tf_random_seed)
      logging.set_verbosity(logging.DEBUG)

    if strategy == 'tpu':
      tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
          tpu, zone=tpu_zone, project=gcp_project)
      tf.config.experimental_connect_to_cluster(tpu_cluster_resolver)
      tf.tpu.experimental.initialize_tpu_system(tpu_cluster_resolver)
      ds_strategy = tf.distribute.TPUStrategy(tpu_cluster_resolver)
      logging.info('All devices: %s', tf.config.list_logical_devices('TPU'))
      tf.config.set_soft_device_placement(True)
    elif strategy == 'gpus':
      ds_strategy = tf.distribute.MirroredStrategy()
      logging.info('All devices: %s', tf.config.list_physical_devices('GPU'))
    else:
      if tf.config.list_physical_devices('GPU'):
        ds_strategy = tf.distribute.OneDeviceStrategy('device:GPU:0')
      else:
        ds_strategy = tf.distribute.OneDeviceStrategy('device:CPU:0')

    self.ds_strategy = ds_strategy

    if model_dir is None:
      model_dir = tempfile.mkdtemp()
    params = dict(
        profile=profile,
        model_name=model_name,
        steps_per_execution=steps_per_execution,
        model_dir=model_dir,
        strategy=strategy,
        batch_size=batch_size,
        tf_random_seed=tf_random_seed,
        debug=debug,
        verbose=verbose)
    config.override(params, True)
    self.config = config

    # set mixed precision policy by keras api.
    precision = utils.get_precision(config.strategy, config.mixed_precision)
    policy = tf.keras.mixed_precision.experimental.Policy(precision)
    tf.keras.mixed_precision.experimental.set_policy(policy)
示例#15
0
def main(_):
    # Parse and override hparams
    config = hparams_config.get_detection_config(FLAGS.model_name)
    config.override(FLAGS.hparams)
    if FLAGS.num_epochs:  # NOTE: remove this flag after updating all docs.
        config.num_epochs = FLAGS.num_epochs

    # Parse image size in case it is in string format.
    config.image_size = utils.parse_image_size(config.image_size)

    if FLAGS.use_xla and FLAGS.strategy != 'tpu':
        tf.config.optimizer.set_jit(True)
        for gpu in tf.config.list_physical_devices('GPU'):
            tf.config.experimental.set_memory_growth(gpu, True)

    if FLAGS.debug:
        tf.config.run_functions_eagerly(True)
        tf.debugging.set_log_device_placement(True)
        os.environ['TF_DETERMINISTIC_OPS'] = '1'
        tf.random.set_seed(FLAGS.tf_random_seed)
        logging.set_verbosity(logging.DEBUG)

    if FLAGS.strategy == 'tpu':
        tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
        tf.config.experimental_connect_to_cluster(tpu_cluster_resolver)
        tf.tpu.experimental.initialize_tpu_system(tpu_cluster_resolver)
        ds_strategy = tf.distribute.TPUStrategy(tpu_cluster_resolver)
        logging.info('All devices: %s', tf.config.list_logical_devices('TPU'))
    elif FLAGS.strategy == 'gpus':
        ds_strategy = tf.distribute.MirroredStrategy()
        logging.info('All devices: %s', tf.config.list_physical_devices('GPU'))
    else:
        if tf.config.list_physical_devices('GPU'):
            ds_strategy = tf.distribute.OneDeviceStrategy('device:GPU:0')
        else:
            ds_strategy = tf.distribute.OneDeviceStrategy('device:CPU:0')

    steps_per_epoch = FLAGS.num_examples_per_epoch // FLAGS.batch_size
    params = dict(profile=FLAGS.profile,
                  model_name=FLAGS.model_name,
                  steps_per_execution=FLAGS.steps_per_execution,
                  model_dir=FLAGS.model_dir,
                  steps_per_epoch=steps_per_epoch,
                  strategy=FLAGS.strategy,
                  batch_size=FLAGS.batch_size,
                  tf_random_seed=FLAGS.tf_random_seed,
                  debug=FLAGS.debug,
                  val_json_file=FLAGS.val_json_file,
                  eval_samples=FLAGS.eval_samples,
                  num_shards=ds_strategy.num_replicas_in_sync)
    config.override(params, True)
    # set mixed precision policy by keras api.
    precision = utils.get_precision(config.strategy, config.mixed_precision)
    policy = tf.keras.mixed_precision.Policy(precision)
    tf.keras.mixed_precision.set_global_policy(policy)

    def get_dataset(is_training, config):
        file_pattern = (FLAGS.train_file_pattern
                        if is_training else FLAGS.val_file_pattern)
        if not file_pattern:
            raise ValueError('No matching files.')

        return dataloader.InputReader(
            file_pattern,
            is_training=is_training,
            use_fake_data=FLAGS.use_fake_data,
            max_instances_per_image=config.max_instances_per_image,
            debug=FLAGS.debug)(config.as_dict())

    with ds_strategy.scope():
        if config.model_optimizations:
            tfmot.set_config(config.model_optimizations.as_dict())
        if FLAGS.hub_module_url:
            model = train_lib.EfficientDetNetTrainHub(
                config=config, hub_module_url=FLAGS.hub_module_url)
        else:
            model = train_lib.EfficientDetNetTrain(config=config)
        model = setup_model(model, config)
        if FLAGS.pretrained_ckpt and not FLAGS.hub_module_url:
            ckpt_path = tf.train.latest_checkpoint(FLAGS.pretrained_ckpt)
            util_keras.restore_ckpt(model, ckpt_path,
                                    config.moving_average_decay)
        init_experimental(config)
        if 'train' in FLAGS.mode:
            val_dataset = get_dataset(False,
                                      config) if 'eval' in FLAGS.mode else None
            model.fit(
                get_dataset(True, config),
                epochs=config.num_epochs,
                steps_per_epoch=steps_per_epoch,
                callbacks=train_lib.get_callbacks(config.as_dict(),
                                                  val_dataset),
                validation_data=val_dataset,
                validation_steps=(FLAGS.eval_samples // FLAGS.batch_size))
        else:
            # Continuous eval.
            for ckpt in tf.train.checkpoints_iterator(FLAGS.model_dir,
                                                      min_interval_secs=180):
                logging.info('Starting to evaluate.')
                # Terminate eval job when final checkpoint is reached.
                try:
                    current_epoch = int(os.path.basename(ckpt).split('-')[1])
                except IndexError:
                    current_epoch = 0

                val_dataset = get_dataset(False, config)
                logging.info('start loading model.')
                model.load_weights(tf.train.latest_checkpoint(FLAGS.model_dir))
                logging.info('finish loading model.')
                coco_eval = train_lib.COCOCallback(val_dataset, 1)
                coco_eval.set_model(model)
                eval_results = coco_eval.on_epoch_end(current_epoch)
                logging.info('eval results for %s: %s', ckpt, eval_results)

                try:
                    utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)
                except tf.errors.NotFoundError:
                    # Checkpoint might be not already deleted by the time eval finished.
                    logging.info('Checkpoint %s no longer exists, skipping.',
                                 ckpt)

                if current_epoch >= config.num_epochs or not current_epoch:
                    logging.info('Eval epoch %d / %d', current_epoch,
                                 config.num_epochs)
                    break
示例#16
0
    def export(self,
               output_dir: Optional[Text] = None,
               tensorrt: Optional[Text] = None,
               tflite: Optional[Text] = None,
               file_pattern: Optional[Text] = None,
               num_calibration_steps: int = 2000):
        """Export a saved model, frozen graph, and potential tflite/tensorrt model.

    Args:
      output_dir: the output folder for saved model.
      tensorrt: If not None, must be {'FP32', 'FP16', 'INT8'}.
      tflite: Type for post-training quantization.
      file_pattern: Glob for tfrecords, e.g. coco/val-*.tfrecord.
      num_calibration_steps: Number of post-training quantization calibration
        steps to run.
    """
        export_model, input_spec = self._get_model_and_spec(tflite)
        image_size = utils.parse_image_size(self.params['image_size'])
        if output_dir:
            tf.saved_model.save(
                export_model,
                output_dir,
                signatures=export_model.__call__.get_concrete_function(
                    input_spec))
            logging.info('Model saved at %s', output_dir)

            # also save freeze pb file.
            graphdef = self.freeze(
                export_model.__call__.get_concrete_function(input_spec))
            proto_path = tf.io.write_graph(graphdef,
                                           output_dir,
                                           self.model_name + '_frozen.pb',
                                           as_text=False)
            logging.info('Frozen graph saved at %s', proto_path)

        if tflite:
            shape = (self.batch_size, *image_size, 3)
            input_spec = tf.TensorSpec(shape=shape,
                                       dtype=input_spec.dtype,
                                       name=input_spec.name)
            # from_saved_model supports advanced converter features like op fusing.
            converter = tf.lite.TFLiteConverter.from_saved_model(output_dir)
            if tflite == 'FP32':
                converter.optimizations = [tf.lite.Optimize.DEFAULT]
                converter.target_spec.supported_types = [tf.float32]
            elif tflite == 'FP16':
                converter.optimizations = [tf.lite.Optimize.DEFAULT]
                converter.target_spec.supported_types = [tf.float16]
            elif tflite == 'INT8':
                # Enables MLIR-based post-training quantization.
                converter.experimental_new_quantizer = True
                if file_pattern:
                    config = hparams_config.get_efficientdet_config(
                        self.model_name)
                    config.override(self.params)
                    ds = dataloader.InputReader(file_pattern,
                                                is_training=False,
                                                max_instances_per_image=config.
                                                max_instances_per_image)(
                                                    config,
                                                    batch_size=self.batch_size)

                    def representative_dataset_gen():
                        for image, _ in ds.take(num_calibration_steps):
                            yield [image]
                else:  # Used for debugging, can remove later.
                    logging.warn(
                        'Use real representative dataset instead of fake ones.'
                    )
                    num_calibration_steps = 10

                    def representative_dataset_gen(
                    ):  # rewrite this for real data.
                        for _ in range(num_calibration_steps):
                            yield [tf.ones(shape, dtype=input_spec.dtype)]

                converter.representative_dataset = representative_dataset_gen
                converter.optimizations = [tf.lite.Optimize.DEFAULT]
                converter.inference_input_type = tf.uint8
                # TFLite's custom NMS op isn't supported by post-training quant,
                # so we add TFLITE_BUILTINS as well.
                supported_ops = [
                    tf.lite.OpsSet.TFLITE_BUILTINS_INT8,
                    tf.lite.OpsSet.TFLITE_BUILTINS
                ]
                converter.target_spec.supported_ops = supported_ops

            else:
                raise ValueError(
                    f'Invalid tflite {tflite}: must be FP32, FP16, INT8.')

            tflite_path = os.path.join(output_dir, tflite.lower() + '.tflite')
            tflite_model = converter.convert()
            tf.io.gfile.GFile(tflite_path, 'wb').write(tflite_model)
            logging.info('TFLite is saved at %s', tflite_path)

        if tensorrt:
            trt_path = os.path.join(output_dir, 'tensorrt_' + tensorrt.lower())
            conversion_params = tf.experimental.tensorrt.ConversionParams(
                max_workspace_size_bytes=(2 << 20),
                maximum_cached_engines=1,
                precision_mode=tensorrt.upper())
            converter = tf.experimental.tensorrt.Converter(
                output_dir, conversion_params=conversion_params)
            converter.convert()
            converter.save(trt_path)
            logging.info('TensorRT model is saved at %s', trt_path)
示例#17
0
def clip_boxes(boxes: T, image_size: int) -> T:
    """Clip boxes to fit the image size."""
    image_size = utils.parse_image_size(image_size) * 2
    return tf.clip_by_value(boxes, [0], image_size)
示例#18
0
def main(_):
    config = hparams_config.get_efficientdet_config(FLAGS.model_name)
    config.override(FLAGS.hparams)
    config.val_json_file = FLAGS.val_json_file
    config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
    config.drop_remainder = False  # eval all examples w/o drop.
    config.image_size = utils.parse_image_size(config['image_size'])

    # Evaluator for AP calculation.
    label_map = label_util.get_label_map(config.label_map)
    evaluator = coco_metric.EvaluationMetric(filename=config.val_json_file,
                                             label_map=label_map)

    # dataset
    batch_size = 1
    ds = dataloader.InputReader(
        FLAGS.val_file_pattern,
        is_training=False,
        max_instances_per_image=config.max_instances_per_image)(
            config, batch_size=batch_size)
    eval_samples = FLAGS.eval_samples
    if eval_samples:
        ds = ds.take((eval_samples + batch_size - 1) // batch_size)

    # Network
    lite_runner = LiteRunner(FLAGS.tflite_path, FLAGS.only_network)
    eval_samples = FLAGS.eval_samples or 5000
    pbar = tf.keras.utils.Progbar(
        (eval_samples + batch_size - 1) // batch_size)
    for i, (images, labels) in enumerate(ds):
        if not FLAGS.only_network:
            nms_boxes_bs, nms_classes_bs, nms_scores_bs, _ = lite_runner.run(
                images)
            nms_classes_bs += postprocess.CLASS_OFFSET

            height, width = utils.parse_image_size(config.image_size)
            normalize_factor = tf.constant([height, width, height, width],
                                           dtype=tf.float32)
            nms_boxes_bs *= normalize_factor
            if labels['image_scales'] is not None:
                scales = tf.expand_dims(
                    tf.expand_dims(labels['image_scales'], -1), -1)
                nms_boxes_bs = nms_boxes_bs * tf.cast(scales,
                                                      nms_boxes_bs.dtype)
            detections = postprocess.generate_detections_from_nms_output(
                nms_boxes_bs, nms_classes_bs, nms_scores_bs,
                labels['source_ids'])
        else:
            cls_outputs, box_outputs = lite_runner.run(images)
            detections = postprocess.generate_detections(
                config,
                cls_outputs,
                box_outputs,
                labels['image_scales'],
                labels['source_ids'],
                pre_class_nms=FLAGS.pre_class_nms)

        detections = postprocess.transform_detections(detections)
        evaluator.update_state(labels['groundtruth_data'].numpy(),
                               detections.numpy())
        pbar.update(i)

    # compute the final eval results.
    metrics = evaluator.result()
    metric_dict = {}
    for i, name in enumerate(evaluator.metric_names):
        metric_dict[name] = metrics[i]

    if label_map:
        for i, cid in enumerate(sorted(label_map.keys())):
            name = 'AP_/%s' % label_map[cid]
            metric_dict[name] = metrics[i + len(evaluator.metric_names)]
    print(FLAGS.model_name, metric_dict)
示例#19
0
    def export(self,
               output_dir: Text,
               tflite_path: Text = None,
               tensorrt: Text = None):
        """Export a saved model, frozen graph, and potential tflite/tensorrt model.

    Args:
      output_dir: the output folder for saved model.
      tflite_path: the path for saved tflite file.
      tensorrt: If not None, must be {'FP32', 'FP16', 'INT8'}.
    """
        signitures = self.signitures
        signature_def_map = {
            'serving_default':
            tf.saved_model.predict_signature_def(
                {signitures['image_arrays'].name: signitures['image_arrays']},
                {signitures['prediction'].name: signitures['prediction']}),
        }
        b = tf.saved_model.Builder(output_dir)
        b.add_meta_graph_and_variables(self.sess,
                                       tags=['serve'],
                                       signature_def_map=signature_def_map,
                                       assets_collection=tf.get_collection(
                                           tf.GraphKeys.ASSET_FILEPATHS),
                                       clear_devices=True)
        b.save()
        logging.info('Model saved at %s', output_dir)

        # also save freeze pb file.
        graphdef = self.freeze()
        pb_path = os.path.join(output_dir, self.model_name + '_frozen.pb')
        tf.io.gfile.GFile(pb_path, 'wb').write(graphdef.SerializeToString())
        logging.info('Frozen graph saved at %s', pb_path)

        if tflite_path:
            height, width = utils.parse_image_size(self.params['image_size'])
            input_name = signitures['image_arrays'].op.name
            input_shapes = {input_name: [None, height, width, 3]}
            converter = tf.lite.TFLiteConverter.from_saved_model(
                output_dir,
                input_arrays=[input_name],
                input_shapes=input_shapes,
                output_arrays=[signitures['prediction'].op.name])
            converter.target_spec.supported_ops = [
                tf.lite.OpsSet.TFLITE_BUILTINS
            ]
            tflite_model = converter.convert()

            tf.io.gfile.GFile(tflite_path, 'wb').write(tflite_model)
            logging.info('TFLite is saved at %s', tflite_path)

        if tensorrt:
            from tensorflow.python.compiler.tensorrt import trt  # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
            sess_config = tf.ConfigProto(gpu_options=tf.GPUOptions(
                allow_growth=True))
            trt_path = os.path.join(output_dir, 'tensorrt_' + tensorrt.lower())
            trt.create_inference_graph(None,
                                       None,
                                       precision_mode=tensorrt,
                                       input_saved_model_dir=output_dir,
                                       output_saved_model_dir=trt_path,
                                       session_config=sess_config)
            logging.info('TensorRT model is saved at %s', trt_path)
示例#20
0
def main(_):
    tf.config.run_functions_eagerly(FLAGS.debug)
    devices = tf.config.list_physical_devices('GPU')
    for device in devices:
        tf.config.experimental.set_memory_growth(device, True)

    model_config = hparams_config.get_detection_config(FLAGS.model_name)
    model_config.override(FLAGS.hparams)  # Add custom overrides
    model_config.is_training_bn = False
    if FLAGS.image_size != -1:
        model_config.image_size = FLAGS.image_size
    model_config.image_size = utils.parse_image_size(model_config.image_size)

    model_params = model_config.as_dict()
    ckpt_path_or_file = FLAGS.model_dir
    if tf.io.gfile.isdir(ckpt_path_or_file):
        ckpt_path_or_file = tf.train.latest_checkpoint(ckpt_path_or_file)
    driver = inference.ServingDriver(FLAGS.model_name, ckpt_path_or_file,
                                     FLAGS.batch_size or None,
                                     FLAGS.only_network, model_params)
    if FLAGS.mode == 'export':
        if not FLAGS.saved_model_dir:
            raise ValueError('Please specify --saved_model_dir=')
        model_dir = FLAGS.saved_model_dir
        if tf.io.gfile.exists(model_dir):
            tf.io.gfile.rmtree(model_dir)
        driver.export(model_dir, FLAGS.tensorrt, FLAGS.tflite,
                      FLAGS.file_pattern, FLAGS.num_calibration_steps)
        print('Model are exported to %s' % model_dir)
    elif FLAGS.mode == 'infer':
        image_file = tf.io.read_file(FLAGS.input_image)
        image_arrays = tf.io.decode_image(image_file)
        image_arrays.set_shape((None, None, 3))
        image_arrays = tf.expand_dims(image_arrays, axis=0)
        if FLAGS.saved_model_dir:
            driver.load(FLAGS.saved_model_dir)
            if FLAGS.saved_model_dir.endswith('.tflite'):
                image_size = utils.parse_image_size(model_config.image_size)
                image_arrays = tf.image.resize_with_pad(
                    image_arrays, *image_size)
                image_arrays = tf.cast(image_arrays, tf.uint8)
        detections_bs = driver.serve(image_arrays)
        boxes, scores, classes, _ = tf.nest.map_structure(
            np.array, detections_bs)
        raw_image = Image.fromarray(np.array(image_arrays)[0])
        img = driver.visualize(
            raw_image,
            boxes[0],
            classes[0],
            scores[0],
            min_score_thresh=model_config.nms_configs.score_thresh or 0.4,
            max_boxes_to_draw=model_config.nms_configs.max_output_size)
        output_image_path = os.path.join(FLAGS.output_image_dir, '0.jpg')
        Image.fromarray(img).save(output_image_path)
        print('writing file to %s' % output_image_path)
    elif FLAGS.mode == 'benchmark':
        if FLAGS.saved_model_dir:
            driver.load(FLAGS.saved_model_dir)

        batch_size = FLAGS.batch_size or 1
        if FLAGS.input_image:
            image_file = tf.io.read_file(FLAGS.input_image)
            image_arrays = tf.image.decode_image(image_file)
            image_arrays.set_shape((None, None, 3))
            image_arrays = tf.expand_dims(image_arrays, 0)
            if batch_size > 1:
                image_arrays = tf.tile(image_arrays, [batch_size, 1, 1, 1])
        else:
            # use synthetic data if no image is provided.
            image_arrays = tf.ones((batch_size, *model_config.image_size, 3),
                                   dtype=tf.uint8)
        driver.benchmark(image_arrays, FLAGS.bm_runs, FLAGS.trace_filename)
    elif FLAGS.mode == 'dry':
        # transfer to tf2 format ckpt
        driver.build()
        if FLAGS.export_ckpt:
            driver.model.save_weights(FLAGS.export_ckpt)
    elif FLAGS.mode == 'video':
        import cv2  # pylint: disable=g-import-not-at-top
        if FLAGS.saved_model_dir:
            driver.load(FLAGS.saved_model_dir)
        cap = cv2.VideoCapture(FLAGS.input_video)
        if not cap.isOpened():
            print('Error opening input video: {}'.format(FLAGS.input_video))

        out_ptr = None
        if FLAGS.output_video:
            frame_width, frame_height = int(cap.get(3)), int(cap.get(4))
            out_ptr = cv2.VideoWriter(
                FLAGS.output_video, cv2.VideoWriter_fourcc('m', 'p', '4', 'v'),
                25, (frame_width, frame_height))

        while cap.isOpened():
            # Capture frame-by-frame
            ret, frame = cap.read()
            if not ret:
                break

            raw_frames = np.array([frame])
            detections_bs = driver.serve(raw_frames)
            boxes, scores, classes, _ = tf.nest.map_structure(
                np.array, detections_bs)
            new_frame = driver.visualize(
                raw_frames[0],
                boxes[0],
                classes[0],
                scores[0],
                min_score_thresh=model_config.nms_configs.score_thresh or 0.4,
                max_boxes_to_draw=model_config.nms_configs.max_output_size)

            if out_ptr:
                # write frame into output file.
                out_ptr.write(new_frame)
            else:
                # show the frame online, mainly used for real-time speed test.
                cv2.imshow('Frame', new_frame)
                # Press Q on keyboard to  exit
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break
示例#21
0
def main(_):
    config = hparams_config.get_efficientdet_config(FLAGS.model_name)
    config.override(FLAGS.hparams)
    config.val_json_file = FLAGS.val_json_file
    config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
    config.drop_remainder = False  # eval all examples w/o drop.
    config.image_size = utils.parse_image_size(config['image_size'])

    if config.strategy == 'tpu':
        tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
        tf.config.experimental_connect_to_cluster(tpu_cluster_resolver)
        tf.tpu.experimental.initialize_tpu_system(tpu_cluster_resolver)
        ds_strategy = tf.distribute.TPUStrategy(tpu_cluster_resolver)
        logging.info('All devices: %s', tf.config.list_logical_devices('TPU'))
    elif config.strategy == 'gpus':
        ds_strategy = tf.distribute.MirroredStrategy()
        logging.info('All devices: %s', tf.config.list_physical_devices('GPU'))
    else:
        if tf.config.list_physical_devices('GPU'):
            ds_strategy = tf.distribute.OneDeviceStrategy('device:GPU:0')
        else:
            ds_strategy = tf.distribute.OneDeviceStrategy('device:CPU:0')

    with ds_strategy.scope():
        # Network
        model = efficientdet_keras.EfficientDetNet(config=config)
        model.build((None, *config.image_size, 3))
        util_keras.restore_ckpt(model,
                                tf.train.latest_checkpoint(FLAGS.model_dir),
                                config.moving_average_decay,
                                skip_mismatch=False)

        @tf.function
        def model_fn(images, labels):
            cls_outputs, box_outputs = model(images, training=False)
            detections = postprocess.generate_detections(
                config, cls_outputs, box_outputs, labels['image_scales'],
                labels['source_ids'])
            tf.numpy_function(evaluator.update_state, [
                labels['groundtruth_data'],
                postprocess.transform_detections(detections)
            ], [])

        # Evaluator for AP calculation.
        label_map = label_util.get_label_map(config.label_map)
        evaluator = coco_metric.EvaluationMetric(filename=config.val_json_file,
                                                 label_map=label_map)

        # dataset
        batch_size = FLAGS.batch_size  # global batch size.
        ds = dataloader.InputReader(
            FLAGS.val_file_pattern,
            is_training=False,
            max_instances_per_image=config.max_instances_per_image)(
                config, batch_size=batch_size)
        if FLAGS.eval_samples:
            ds = ds.take((FLAGS.eval_samples + batch_size - 1) // batch_size)
        ds = ds_strategy.experimental_distribute_dataset(ds)

        # evaluate all images.
        eval_samples = FLAGS.eval_samples or 5000
        pbar = tf.keras.utils.Progbar(
            (eval_samples + batch_size - 1) // batch_size)
        for i, (images, labels) in enumerate(ds):
            ds_strategy.run(model_fn, (images, labels))
            pbar.update(i)

    # compute the final eval results.
    metrics = evaluator.result()
    metric_dict = {}
    for i, name in enumerate(evaluator.metric_names):
        metric_dict[name] = metrics[i]

    if label_map:
        for i, cid in enumerate(sorted(label_map.keys())):
            name = 'AP_/%s' % label_map[cid]
            metric_dict[name] = metrics[i + len(evaluator.metric_names)]
    print(FLAGS.model_name, metric_dict)