Exemple #1
0
def _parse_and_store_boxes(filename, dataset_directory, orig_dims):
    filename_split = tf.unstack(tf.string_split([filename], "_").values[:-1],
                                num=3)
    strip_filename = tf.string_join(filename_split, "_")
    txt_dir = tf.cast(os.path.join(dataset_directory, 'panoptic_txt_weights/'),
                      tf.string)
    txt_ext = tf.cast('_gtFine_instanceIds.txt', tf.string)
    txt_filename = tf.string_join([txt_dir, strip_filename, txt_ext])

    la_in_txt = tf.read_file(txt_filename)
    la_in_txt = tf.string_split([la_in_txt], delimiter='\n').values
    la_in_txt = tf.string_split(la_in_txt, delimiter=' ').values
    la_in_int = tf.reshape(tf.string_to_number(la_in_txt, out_type=tf.int32),
                           [-1, 7])

    # i_ids = la_in_int[:, 0]

    weights = la_in_int[:, 6]
    boxes_orig = la_in_int[:, 2:6]
    boxes_format = convert_input_box_format(boxes_orig)
    boxes_norm = normalize_boxes(boxes_format,
                                 orig_height=orig_dims[0],
                                 orig_width=orig_dims[1])
    classes = la_in_int[:, 1]
    instance_ids = la_in_int[:, 0]

    return boxes_norm, classes, weights, instance_ids
Exemple #2
0
    def _serving_model_graph(features, params):
        """Build the model graph for serving."""
        images = features['images']
        _, height, width, _ = images.get_shape().as_list()

        input_anchor = anchor.Anchor(params.anchor.min_level,
                                     params.anchor.max_level,
                                     params.anchor.num_scales,
                                     params.anchor.aspect_ratios,
                                     params.anchor.anchor_size,
                                     (height, width))

        model_fn = factory.model_generator(params)
        model_outputs = model_fn.build_outputs(
            features['images'],
            labels={'anchor_boxes': input_anchor.multilevel_boxes},
            mode=mode_keys.PREDICT)

        if cast_num_detections_to_float:
            model_outputs['num_detections'] = tf.cast(
                model_outputs['num_detections'], dtype=tf.float32)

        if output_image_info:
            model_outputs.update({
                'image_info': features['image_info'],
            })

        if output_normalized_coordinates:
            model_outputs['detection_boxes'] = box_utils.normalize_boxes(
                model_outputs['detection_boxes'],
                features['image_info'][:, 1:2, :])

        return model_outputs
Exemple #3
0
  def _serving_model_graph(features, params):
    """Build the model graph for serving."""
    images = features['images']
    batch_size, height, width, _ = images.get_shape().as_list()

    input_anchor = anchor.Anchor(
        params.anchor.min_level, params.anchor.max_level,
        params.anchor.num_scales, params.anchor.aspect_ratios,
        params.anchor.anchor_size, (height, width))

    multilevel_boxes = {}
    for k, v in six.iteritems(input_anchor.multilevel_boxes):
      multilevel_boxes[k] = tf.tile(
          tf.expand_dims(v, 0), [batch_size, 1, 1])

    model_fn = factory.model_generator(params)
    model_outputs = model_fn.build_outputs(
        features['images'],
        labels={
            'anchor_boxes': multilevel_boxes,
            'image_info': features['image_info'],
        },
        mode=mode_keys.PREDICT)

    if cast_num_detections_to_float:
      model_outputs['num_detections'] = tf.cast(
          model_outputs['num_detections'], dtype=tf.float32)

    if output_image_info:
      model_outputs.update({
          'image_info': features['image_info'],
      })

    if output_normalized_coordinates:
      model_outputs['detection_boxes'] = box_utils.normalize_boxes(
          model_outputs['detection_boxes'],
          features['image_info'][:, 1:2, :])

    predictions = {
        'num_detections': tf.identity(
            model_outputs['num_detections'], 'NumDetections'),
        'detection_boxes': tf.identity(
            model_outputs['detection_boxes'], 'DetectionBoxes'),
        'detection_classes': tf.identity(
            model_outputs['detection_classes'], 'DetectionClasses'),
        'detection_scores': tf.identity(
            model_outputs['detection_scores'], 'DetectionScores'),
    }
    if 'detection_masks' in model_outputs:
      predictions.update({
          'detection_masks':
              tf.identity(model_outputs['detection_masks'], 'DetectionMasks'),
      })

    if output_image_info:
      predictions['image_info'] = tf.identity(
          model_outputs['image_info'], 'ImageInfo')

    return predictions
    def _predict_rpn(self, features):
        with tf.variable_scope("RPN"):
            anchors = anchor_generator.generate(
                base_size=self._anchor_size,
                stride=self._anchor_stride,
                scales=self._anchor_scales,
                ratios=self._anchor_ratios,
                features_height=tf.shape(features)[1],
                features_width=tf.shape(features)[2],
                offset=self._anchor_offset)

            anchors_normalized = box_utils.normalize_boxes(
                anchors, self.params.height_input, self.params.width_input)

            rpn_sliding_window = slim.conv2d(features,
                                             512,
                                             kernel_size=[3, 3],
                                             activation_fn=tf.nn.relu,
                                             scope='sliding_window')

            rpn_objectness = slim.conv2d(rpn_sliding_window,
                                         self.num_anchors_per_location * 2,
                                         kernel_size=[1, 1],
                                         activation_fn=None,
                                         padding="VALID",
                                         scope='objectness')

            rpn_box_encoded = slim.conv2d(rpn_sliding_window,
                                          self.num_anchors_per_location * 4,
                                          kernel_size=[1, 1],
                                          activation_fn=None,
                                          padding="VALID",
                                          scope='box')

            rpn_objectness = tf.reshape(rpn_objectness,
                                        [self.params.Nb, -1, 2])
            rpn_box_encoded = tf.reshape(rpn_box_encoded,
                                         [self.params.Nb, -1, 4])

            prediction_dict = {
                'rpn_objectness': rpn_objectness,
                'rpn_box_encoded': rpn_box_encoded,
                'anchors': anchors,
                'anchors_normalized': anchors_normalized
            }

            print(prediction_dict)

        return prediction_dict
def visualize_images_with_bounding_boxes(images, box_outputs, step,
                                         summary_writer):
  """Records subset of evaluation images with bounding boxes."""
  if not isinstance(images, list):
    logging.warning('visualize_images_with_bounding_boxes expects list of '
                    'images but received type: %s and value: %s',
                    type(images), images)
    return

  image_shape = tf.shape(images[0])
  image_height = tf.cast(image_shape[0], tf.float32)
  image_width = tf.cast(image_shape[1], tf.float32)
  normalized_boxes = box_utils.normalize_boxes(box_outputs,
                                               [image_height, image_width])

  bounding_box_color = tf.constant([[1.0, 1.0, 0.0, 1.0]])
  image_summary = tf.image.draw_bounding_boxes(
      tf.cast(images, tf.float32), normalized_boxes, bounding_box_color)
  with summary_writer.as_default():
    tf.summary.image('bounding_box_summary', image_summary, step=step)
    summary_writer.flush()
Exemple #6
0
def build_predictions(features,
                      params,
                      output_image_info,
                      output_normalized_coordinates,
                      cast_num_detections_to_float,
                      cast_detection_classes_to_float=False):
    """Builds the model graph for serving.

  Args:
    features: features to be passed to the serving model graph
    params: hyperparameters to be passed to the serving model graph
    output_image_info: bool, whether output the image_info node.
    output_normalized_coordinates: bool, whether box outputs are in the
      normalized coordinates.
    cast_num_detections_to_float: bool, whether to cast the number of detections
      to float type.
    cast_detection_classes_to_float: bool, whether or not cast the detection
      classes  to float type.

  Returns:
    predictions: model outputs for serving.
    model_outputs: a dict of model output tensors.
  """
    images = features['images']
    batch_size, height, width, _ = images.get_shape().as_list()

    input_anchor = anchor.Anchor(params.architecture.min_level,
                                 params.architecture.max_level,
                                 params.anchor.num_scales,
                                 params.anchor.aspect_ratios,
                                 params.anchor.anchor_size, (height, width))

    multilevel_boxes = {}
    for k, v in six.iteritems(input_anchor.multilevel_boxes):
        multilevel_boxes[k] = tf.tile(tf.expand_dims(v, 0), [batch_size, 1, 1])

    model_fn = factory.model_generator(params)
    model_outputs = model_fn.build_outputs(features['images'],
                                           labels={
                                               'anchor_boxes':
                                               multilevel_boxes,
                                               'image_info':
                                               features['image_info'],
                                           },
                                           mode=mode_keys.PREDICT)

    # Return flattened raw outputs.
    if not params.postprocess.apply_nms:
        predictions = {
            'raw_boxes': tf.identity(model_outputs['raw_boxes'], 'RawBoxes'),
            'raw_scores': tf.identity(model_outputs['raw_scores'],
                                      'RawScores'),
        }
        return predictions, model_outputs

    if cast_num_detections_to_float:
        model_outputs['num_detections'] = tf.cast(
            model_outputs['num_detections'], dtype=tf.float32)

    if cast_detection_classes_to_float:
        model_outputs['detection_classes'] = tf.cast(
            model_outputs['detection_classes'], dtype=tf.float32)

    if output_image_info:
        model_outputs.update({
            'image_info': features['image_info'],
        })

    if output_normalized_coordinates:
        detection_boxes = (
            model_outputs['detection_boxes'] /
            tf.tile(features['image_info'][:, 2:3, :], [1, 1, 2]))
        model_outputs['detection_boxes'] = box_utils.normalize_boxes(
            detection_boxes, features['image_info'][:, 0:1, :])

    predictions = {
        'num_detections':
        tf.identity(model_outputs['num_detections'], 'NumDetections'),
        'detection_boxes':
        tf.identity(model_outputs['detection_boxes'], 'DetectionBoxes'),
        'detection_classes':
        tf.identity(model_outputs['detection_classes'], 'DetectionClasses'),
        'detection_scores':
        tf.identity(model_outputs['detection_scores'], 'DetectionScores'),
    }
    if 'detection_masks' in model_outputs:
        predictions.update({
            'detection_masks':
            tf.identity(model_outputs['detection_masks'], 'DetectionMasks'),
        })
        if 'detection_outer_boxes' in model_outputs:
            predictions.update({
                'detection_outer_boxes':
                tf.identity(model_outputs['detection_outer_boxes'],
                            'DetectionOuterBoxes'),
            })

    if output_image_info:
        predictions['image_info'] = tf.identity(model_outputs['image_info'],
                                                'ImageInfo')

    return predictions, model_outputs
    def _parse_train_data(self, data):
        """Parses data for training.

    Args:
      data: the decoded tensor dictionary from TfExampleDecoder.

    Returns:
      image: image tensor that is preproessed to have normalized value and
        dimension [output_size[0], output_size[1], 3]
      labels: a dictionary of tensors used for training. The following describes
        {key: value} pairs in the dictionary.
        image_info: a 2D `Tensor` that encodes the information of the image and
          the applied preprocessing. It is in the format of
          [[original_height, original_width], [scaled_height, scaled_width],
        anchor_boxes: ordered dictionary with keys
          [min_level, min_level+1, ..., max_level]. The values are tensor with
          shape [height_l, width_l, 4] representing anchor boxes at each level.
        rpn_score_targets: ordered dictionary with keys
          [min_level, min_level+1, ..., max_level]. The values are tensor with
          shape [height_l, width_l, anchors_per_location]. The height_l and
          width_l represent the dimension of class logits at l-th level.
        rpn_box_targets: ordered dictionary with keys
          [min_level, min_level+1, ..., max_level]. The values are tensor with
          shape [height_l, width_l, anchors_per_location * 4]. The height_l and
          width_l represent the dimension of bounding box regression output at
          l-th level.
        gt_boxes: Groundtruth bounding box annotations. The box is represented
           in [y1, x1, y2, x2] format. The coordinates are w.r.t the scaled
           image that is fed to the network. The tennsor is padded with -1 to
           the fixed dimension [self._max_num_instances, 4].
        gt_classes: Groundtruth classes annotations. The tennsor is padded
          with -1 to the fixed dimension [self._max_num_instances].
        gt_masks: groundtrugh masks cropped by the bounding box and
          resized to a fixed size determined by mask_crop_size.
    """
        classes = data['groundtruth_classes']
        boxes = data['groundtruth_boxes']
        if self._include_mask:
            masks = data['groundtruth_instance_masks']

        is_crowds = data['groundtruth_is_crowd']
        # Skips annotations with `is_crowd` = True.
        if self._skip_crowd_during_training and self._is_training:
            num_groundtrtuhs = tf.shape(classes)[0]
            with tf.control_dependencies([num_groundtrtuhs, is_crowds]):
                indices = tf.cond(
                    tf.greater(tf.size(is_crowds), 0),
                    lambda: tf.where(tf.logical_not(is_crowds))[:, 0],
                    lambda: tf.cast(tf.range(num_groundtrtuhs), tf.int64))
            classes = tf.gather(classes, indices)
            boxes = tf.gather(boxes, indices)
            if self._include_mask:
                masks = tf.gather(masks, indices)

        # Gets original image and its size.
        image = data['image']
        image_shape = tf.shape(image)[0:2]

        # Normalizes image with mean and std pixel values.
        image = input_utils.normalize_image(image)

        # Flips image randomly during training.
        if self._aug_rand_hflip:
            if self._include_mask:
                image, boxes, masks = input_utils.random_horizontal_flip(
                    image, boxes, masks)
            else:
                image, boxes = input_utils.random_horizontal_flip(image, boxes)

        # Converts boxes from normalized coordinates to pixel coordinates.
        # Now the coordinates of boxes are w.r.t. the original image.
        boxes = box_utils.denormalize_boxes(boxes, image_shape)

        # Resizes and crops image.
        image, image_info = input_utils.resize_and_crop_image(
            image,
            self._output_size,
            padded_size=input_utils.compute_padded_size(
                self._output_size, 2**self._max_level),
            aug_scale_min=self._aug_scale_min,
            aug_scale_max=self._aug_scale_max)
        image_height, image_width, _ = image.get_shape().as_list()

        # Resizes and crops boxes.
        # Now the coordinates of boxes are w.r.t the scaled image.
        image_scale = image_info[2, :]
        offset = image_info[3, :]
        boxes = input_utils.resize_and_crop_boxes(boxes, image_scale,
                                                  image_info[1, :], offset)

        # Filters out ground truth boxes that are all zeros.
        indices = box_utils.get_non_empty_box_indices(boxes)
        boxes = tf.gather(boxes, indices)
        classes = tf.gather(classes, indices)
        if self._include_mask:
            masks = tf.gather(masks, indices)
            # Transfer boxes to the original image space and do normalization.
            cropped_boxes = boxes + tf.tile(tf.expand_dims(offset, axis=0),
                                            [1, 2])
            cropped_boxes /= tf.tile(tf.expand_dims(image_scale, axis=0),
                                     [1, 2])
            cropped_boxes = box_utils.normalize_boxes(cropped_boxes,
                                                      image_shape)
            num_masks = tf.shape(masks)[0]
            masks = tf.image.crop_and_resize(
                tf.expand_dims(masks, axis=-1),
                cropped_boxes,
                box_indices=tf.range(num_masks, dtype=tf.int32),
                crop_size=[self._mask_crop_size, self._mask_crop_size],
                method='bilinear')
            masks = tf.squeeze(masks, axis=-1)

        # Assigns anchor targets.
        # Note that after the target assignment, box targets are absolute pixel
        # offsets w.r.t. the scaled image.
        input_anchor = anchor.Anchor(self._min_level, self._max_level,
                                     self._num_scales, self._aspect_ratios,
                                     self._anchor_size,
                                     (image_height, image_width))
        anchor_labeler = anchor.RpnAnchorLabeler(input_anchor,
                                                 self._rpn_match_threshold,
                                                 self._rpn_unmatched_threshold,
                                                 self._rpn_batch_size_per_im,
                                                 self._rpn_fg_fraction)
        rpn_score_targets, rpn_box_targets = anchor_labeler.label_anchors(
            boxes, tf.cast(tf.expand_dims(classes, axis=-1), dtype=tf.float32))

        # If bfloat16 is used, casts input image to tf.bfloat16.
        if self._use_bfloat16:
            image = tf.cast(image, dtype=tf.bfloat16)

        # Packs labels for model_fn outputs.
        labels = {
            'anchor_boxes': input_anchor.multilevel_boxes,
            'image_info': image_info,
            'rpn_score_targets': rpn_score_targets,
            'rpn_box_targets': rpn_box_targets,
        }
        labels['gt_boxes'] = input_utils.clip_or_pad_to_fixed_size(
            boxes, self._max_num_instances, -1)
        labels['gt_classes'] = input_utils.clip_or_pad_to_fixed_size(
            classes, self._max_num_instances, -1)
        if self._include_mask:
            labels['gt_masks'] = input_utils.clip_or_pad_to_fixed_size(
                masks, self._max_num_instances, -1)

        return image, labels
    def parse_train_data(self, data):
        """Parse data for ShapeMask training."""
        classes = data['groundtruth_classes']
        boxes = data['groundtruth_boxes']
        masks = data['groundtruth_instance_masks']
        is_crowds = data['groundtruth_is_crowd']
        # Skips annotations with `is_crowd` = True.
        if self._skip_crowd_during_training and self._is_training:
            num_groundtrtuhs = tf.shape(classes)[0]
            with tf.control_dependencies([num_groundtrtuhs, is_crowds]):
                indices = tf.cond(
                    tf.greater(tf.size(is_crowds), 0),
                    lambda: tf.where(tf.logical_not(is_crowds))[:, 0],
                    lambda: tf.cast(tf.range(num_groundtrtuhs), tf.int64))
            classes = tf.gather(classes, indices)
            boxes = tf.gather(boxes, indices)
            masks = tf.gather(masks, indices)

        # If not using category, makes all categories with id = 0.
        if not self._use_category:
            classes = tf.cast(tf.greater(classes, 0), dtype=tf.float32)

        image = self.get_normalized_image(data)

        # Flips image randomly during training.
        if self._aug_rand_hflip:
            image, boxes, masks = input_utils.random_horizontal_flip(
                image, boxes, masks)

        # Converts boxes from normalized coordinates to pixel coordinates.
        image_shape = tf.shape(image)[0:2]
        boxes = box_utils.denormalize_boxes(boxes, image_shape)

        # Resizes and crops image.
        image, image_info = input_utils.resize_and_crop_image(
            image,
            self._output_size,
            self._output_size,
            aug_scale_min=self._aug_scale_min,
            aug_scale_max=self._aug_scale_max)
        self._train_image_scale = image_info[2, :]
        self._train_offset = image_info[3, :]

        # Resizes and crops boxes and masks.
        boxes = input_utils.resize_and_crop_boxes(boxes,
                                                  self._train_image_scale,
                                                  image_info[1, :],
                                                  self._train_offset)

        # Filters out ground truth boxes that are all zeros.
        indices = box_utils.get_non_empty_box_indices(boxes)
        boxes = tf.gather(boxes, indices)
        classes = tf.gather(classes, indices)
        masks = tf.gather(masks, indices)

        # Assigns anchors.
        input_anchor = anchor.Anchor(self._min_level, self._max_level,
                                     self._num_scales, self._aspect_ratios,
                                     self._anchor_size, self._output_size)
        anchor_labeler = anchor.AnchorLabeler(input_anchor,
                                              self._match_threshold,
                                              self._unmatched_threshold)
        (cls_targets, box_targets,
         num_positives) = anchor_labeler.label_anchors(
             boxes, tf.cast(tf.expand_dims(classes, axis=1), tf.float32))

        # Sample groundtruth masks/boxes/classes for mask branch.
        num_masks = tf.shape(masks)[0]
        mask_shape = tf.shape(masks)[1:3]

        # Pad sampled boxes/masks/classes to a constant batch size.
        padded_boxes = input_utils.pad_to_fixed_size(boxes,
                                                     self._num_sampled_masks)
        padded_classes = input_utils.pad_to_fixed_size(classes,
                                                       self._num_sampled_masks)
        padded_masks = input_utils.pad_to_fixed_size(masks,
                                                     self._num_sampled_masks)

        # Randomly sample groundtruth masks for mask branch training. For the image
        # without groundtruth masks, it will sample the dummy padded tensors.
        rand_indices = tf.random.shuffle(
            tf.range(tf.maximum(num_masks, self._num_sampled_masks)))
        rand_indices = tf.mod(rand_indices, tf.maximum(num_masks, 1))
        rand_indices = rand_indices[0:self._num_sampled_masks]
        rand_indices = tf.reshape(rand_indices, [self._num_sampled_masks])

        sampled_boxes = tf.gather(padded_boxes, rand_indices)
        sampled_classes = tf.gather(padded_classes, rand_indices)
        sampled_masks = tf.gather(padded_masks, rand_indices)
        # Jitter the sampled boxes to mimic the noisy detections.
        sampled_boxes = box_utils.jitter_boxes(
            sampled_boxes, noise_scale=self._box_jitter_scale)
        sampled_boxes = box_utils.clip_boxes(sampled_boxes, self._output_size)
        # Compute mask targets in feature crop. A feature crop fully contains a
        # sampled box.
        mask_outer_boxes = box_utils.compute_outer_boxes(
            sampled_boxes, tf.shape(image)[0:2], scale=self._outer_box_scale)
        mask_outer_boxes = box_utils.clip_boxes(mask_outer_boxes,
                                                self._output_size)
        # Compensate the offset of mask_outer_boxes to map it back to original image
        # scale.
        mask_outer_boxes_ori = mask_outer_boxes
        mask_outer_boxes_ori += tf.tile(
            tf.expand_dims(self._train_offset, axis=0), [1, 2])
        mask_outer_boxes_ori /= tf.tile(
            tf.expand_dims(self._train_image_scale, axis=0), [1, 2])
        norm_mask_outer_boxes_ori = box_utils.normalize_boxes(
            mask_outer_boxes_ori, mask_shape)

        # Set sampled_masks shape to [batch_size, height, width, 1].
        sampled_masks = tf.cast(tf.expand_dims(sampled_masks, axis=-1),
                                tf.float32)
        mask_targets = tf.image.crop_and_resize(
            sampled_masks,
            norm_mask_outer_boxes_ori,
            box_ind=tf.range(self._num_sampled_masks),
            crop_size=[self._mask_crop_size, self._mask_crop_size],
            method='bilinear',
            extrapolation_value=0,
            name='train_mask_targets')
        mask_targets = tf.where(tf.greater_equal(mask_targets, 0.5),
                                tf.ones_like(mask_targets),
                                tf.zeros_like(mask_targets))
        mask_targets = tf.squeeze(mask_targets, axis=-1)
        if self._up_sample_factor > 1:
            fine_mask_targets = tf.image.crop_and_resize(
                sampled_masks,
                norm_mask_outer_boxes_ori,
                box_ind=tf.range(self._num_sampled_masks),
                crop_size=[
                    self._mask_crop_size * self._up_sample_factor,
                    self._mask_crop_size * self._up_sample_factor
                ],
                method='bilinear',
                extrapolation_value=0,
                name='train_mask_targets')
            fine_mask_targets = tf.where(
                tf.greater_equal(fine_mask_targets, 0.5),
                tf.ones_like(fine_mask_targets),
                tf.zeros_like(fine_mask_targets))
            fine_mask_targets = tf.squeeze(fine_mask_targets, axis=-1)
        else:
            fine_mask_targets = mask_targets

        # If bfloat16 is used, casts input image to tf.bfloat16.
        if self._use_bfloat16:
            image = tf.cast(image, dtype=tf.bfloat16)

        valid_image = tf.cast(tf.not_equal(num_masks, 0), tf.int32)
        if self._mask_train_class == 'all':
            mask_is_valid = valid_image * tf.ones_like(sampled_classes,
                                                       tf.int32)
        else:
            # Get the intersection of sampled classes with training splits.
            mask_valid_classes = tf.cast(
                tf.expand_dims(
                    class_utils.coco_split_class_ids(self._mask_train_class),
                    1), sampled_classes.dtype)
            match = tf.reduce_any(
                tf.equal(tf.expand_dims(sampled_classes, 0),
                         mask_valid_classes), 0)
            mask_is_valid = valid_image * tf.cast(match, tf.int32)

        # Packs labels for model_fn outputs.
        labels = {
            'cls_targets': cls_targets,
            'box_targets': box_targets,
            'anchor_boxes': input_anchor.multilevel_boxes,
            'num_positives': num_positives,
            'image_info': image_info,
            # For ShapeMask.
            'mask_boxes': sampled_boxes,
            'mask_outer_boxes': mask_outer_boxes,
            'mask_targets': mask_targets,
            'fine_mask_targets': fine_mask_targets,
            'mask_classes': sampled_classes,
            'mask_is_valid': mask_is_valid,
        }
        return image, labels
    def _parse_train_data(self, data):
        """Parses data for training.

    Args:
      data: the decoded tensor dictionary from TfExampleDecoder.

    Returns:
      image: image tensor that is preproessed to have normalized value and
        dimension [output_size[0], output_size[1], 3]
      labels: a dictionary of tensors used for training. The following describes
        {key: value} pairs in the dictionary.
        image: image tensor that is preproessed to have normalized value and
          dimension [output_size[0], output_size[1], 3]
        image_info: a 2D `Tensor` that encodes the information of the image and
          the applied preprocessing. It is in the format of
          [[original_height, original_width], [scaled_height, scaled_width],
        num_groundtrtuhs: number of objects.
        boxes: Groundtruth bounding box annotations. The box is represented
           in [y1, x1, y2, x2] format. The coordinates are w.r.t the scaled
           image that is fed to the network. The tennsor is padded with -1 to
           the fixed dimension [self._max_num_instances, 4].
        classes: Groundtruth classes annotations. The tennsor is padded
          with -1 to the fixed dimension [self._max_num_instances].
        masks: groundtrugh masks cropped by the bounding box and
          resized to a fixed size determined by mask_crop_size.
        pasted_objects_mask: a binary tensor with the same size as image which
          is computed as the union of all the objects masks.
    """
        classes = data['groundtruth_classes']
        boxes = data['groundtruth_boxes']
        if self._include_mask:
            masks = data['groundtruth_instance_masks']

        is_crowds = data['groundtruth_is_crowd']
        # Skips annotations with `is_crowd` = True.
        if self._skip_crowd_during_training:
            num_groundtrtuhs = tf.shape(classes)[0]
            with tf.control_dependencies([num_groundtrtuhs, is_crowds]):
                indices = tf.cond(
                    tf.greater(tf.size(is_crowds), 0),
                    lambda: tf.where(tf.logical_not(is_crowds))[:, 0],
                    lambda: tf.cast(tf.range(num_groundtrtuhs), tf.int64))
            classes = tf.gather(classes, indices)
            boxes = tf.gather(boxes, indices)
            if self._include_mask:
                masks = tf.gather(masks, indices)

        # Gets original image and its size.
        image = data['image']
        image_shape = tf.shape(image)[0:2]

        # Normalizes image with mean and std pixel values.
        image = input_utils.normalize_image(image)

        # Flips image randomly during training.
        if self._aug_rand_hflip:
            if self._include_mask:
                image, boxes, masks = input_utils.random_horizontal_flip(
                    image, boxes, masks)
            else:
                image, boxes = input_utils.random_horizontal_flip(image, boxes)

        # Converts boxes from normalized coordinates to pixel coordinates.
        # Now the coordinates of boxes are w.r.t. the original image.
        boxes = box_utils.denormalize_boxes(boxes, image_shape)

        # Resizes and crops image.
        image, image_info = input_utils.resize_and_crop_image(
            image,
            self._output_size,
            padded_size=input_utils.compute_padded_size(
                self._output_size, 2**self._max_level),
            aug_scale_min=self._aug_scale_min,
            aug_scale_max=self._aug_scale_max)

        # Resizes and crops boxes.
        # Now the coordinates of boxes are w.r.t the scaled image.
        image_scale = image_info[2, :]
        offset = image_info[3, :]
        boxes = input_utils.resize_and_crop_boxes(boxes, image_scale,
                                                  image_info[1, :], offset)

        # Filters out ground truth boxes that are all zeros.
        indices = box_utils.get_non_empty_box_indices(boxes)
        boxes = tf.gather(boxes, indices)
        classes = tf.gather(classes, indices)
        if self._include_mask:
            masks = tf.gather(masks, indices)
            uncropped_masks = tf.cast(masks, tf.int8)
            uncropped_masks = tf.expand_dims(uncropped_masks, axis=3)
            uncropped_masks = input_utils.resize_and_crop_masks(
                uncropped_masks, image_scale, self._output_size, offset)
            # Transfer boxes to the original image space and do normalization.
            cropped_boxes = boxes + tf.tile(tf.expand_dims(offset, axis=0),
                                            [1, 2])
            cropped_boxes /= tf.tile(tf.expand_dims(image_scale, axis=0),
                                     [1, 2])
            cropped_boxes = box_utils.normalize_boxes(cropped_boxes,
                                                      image_shape)
            num_masks = tf.shape(masks)[0]
            masks = tf.image.crop_and_resize(
                tf.expand_dims(masks, axis=-1),
                cropped_boxes,
                box_indices=tf.range(num_masks, dtype=tf.int32),
                crop_size=[self._mask_crop_size, self._mask_crop_size],
                method='bilinear')
            masks = tf.squeeze(masks, axis=-1)
        indices = tf.range(start=0, limit=tf.shape(classes)[0], dtype=tf.int32)

        # Samples the numbers of masks for pasting.
        m = tf.random.uniform(shape=[],
                              maxval=tf.shape(classes)[0] + 1,
                              dtype=tf.int32)
        m = tf.math.minimum(m, tf.shape(classes)[0])

        # Shuffles the indices of objects and keep the first m objects for pasting.
        shuffled_indices = tf.random.shuffle(indices)
        shuffled_indices = tf.slice(shuffled_indices, [0], [m])

        boxes = tf.gather(boxes, shuffled_indices)
        masks = tf.gather(masks, shuffled_indices)
        classes = tf.gather(classes, shuffled_indices)
        uncropped_masks = tf.gather(uncropped_masks, shuffled_indices)
        pasted_objects_mask = tf.reduce_max(uncropped_masks, 0)
        pasted_objects_mask = tf.cast(pasted_objects_mask, tf.bool)

        labels = {
            'image': image,
            'image_info': image_info,
            'num_groundtrtuhs': tf.shape(classes)[0],
            'boxes': boxes,
            'masks': masks,
            'classes': classes,
            'pasted_objects_mask': pasted_objects_mask,
        }
        return labels
Exemple #10
0
def _normalize_box_coordinates(boxes, image_info):
  boxes = boxes / tf.tile(image_info[:, 2:3, :], [1, 1, 2])
  return box_utils.normalize_boxes(boxes, image_info[:, 0:1, :])
Exemple #11
0
    def _parse_train_data(self, data):
        """Parse data for ShapeMask training."""
        classes = data['groundtruth_classes']
        boxes = data['groundtruth_boxes']
        masks = data['groundtruth_instance_masks']
        is_crowds = data['groundtruth_is_crowd']
        # Skips annotations with `is_crowd` = True.
        if self._skip_crowd_during_training and self._is_training:
            num_groundtrtuhs = tf.shape(classes)[0]
            with tf.control_dependencies([num_groundtrtuhs, is_crowds]):
                indices = tf.cond(
                    tf.greater(tf.size(is_crowds), 0),
                    lambda: tf.where(tf.logical_not(is_crowds))[:, 0],
                    lambda: tf.cast(tf.range(num_groundtrtuhs), tf.int64))
            classes = tf.gather(classes, indices)
            boxes = tf.gather(boxes, indices)
            masks = tf.gather(masks, indices)

        # Gets original image and its size.
        image = data['image']
        image_shape = tf.shape(image)[0:2]

        # If not using category, makes all categories with id = 0.
        if not self._use_category:
            classes = tf.cast(tf.greater(classes, 0), dtype=tf.float32)

        # Normalizes image with mean and std pixel values.
        image = input_utils.normalize_image(image)

        # Flips image randomly during training.
        if self._aug_rand_hflip:
            image, boxes, masks = input_utils.random_horizontal_flip(
                image, boxes, masks)

        # Converts boxes from normalized coordinates to pixel coordinates.
        boxes = box_utils.denormalize_boxes(boxes, image_shape)

        # Resizes and crops image.
        image, image_info = input_utils.resize_and_crop_image(
            image,
            self._output_size,
            self._output_size,
            aug_scale_min=self._aug_scale_min,
            aug_scale_max=self._aug_scale_max)
        image_scale = image_info[2, :]
        offset = image_info[3, :]

        # Resizes and crops boxes and masks.
        boxes = input_utils.resize_and_crop_boxes(boxes, image_scale,
                                                  self._output_size, offset)
        masks = input_utils.resize_and_crop_masks(
            tf.expand_dims(masks, axis=-1), image_scale, self._output_size,
            offset)
        masks = tf.squeeze(masks, axis=-1)

        # Filters out ground truth boxes that are all zeros.
        indices = input_utils.get_non_empty_box_indices(boxes)
        boxes = tf.gather(boxes, indices)
        classes = tf.gather(classes, indices)
        masks = tf.gather(masks, indices)

        # Assigns anchors.
        input_anchor = anchor.Anchor(self._min_level, self._max_level,
                                     self._num_scales, self._aspect_ratios,
                                     self._anchor_size, self._output_size)
        anchor_labeler = anchor.AnchorLabeler(input_anchor,
                                              self._match_threshold,
                                              self._unmatched_threshold)
        (cls_targets, box_targets,
         num_positives) = anchor_labeler.label_anchors(
             boxes, tf.cast(tf.expand_dims(classes, axis=1), tf.float32))

        # Sample groundtruth masks/boxes/classes for mask branch.
        num_masks = tf.shape(masks)[0]
        mask_shape = tf.shape(masks)[1:3]

        # Pad sampled boxes/masks/classes to a constant batch size.
        padded_boxes = input_utils.pad_to_fixed_size(boxes,
                                                     self._num_sampled_masks)
        padded_classes = input_utils.pad_to_fixed_size(classes,
                                                       self._num_sampled_masks)
        padded_masks = input_utils.pad_to_fixed_size(masks,
                                                     self._num_sampled_masks)

        # Randomly sample groundtruth masks for mask branch training. For the image
        # without groundtruth masks, it will sample the dummy padded tensors.
        rand_indices = tf.random.uniform([self._num_sampled_masks],
                                         minval=0,
                                         maxval=tf.maximum(num_masks, 1),
                                         dtype=tf.dtypes.int32)
        sampled_boxes = tf.gather(padded_boxes, rand_indices)
        sampled_classes = tf.gather(padded_classes, rand_indices)
        sampled_masks = tf.gather(padded_masks, rand_indices)

        # Compute mask targets in feature crop. A feature crop fully contains a
        # sampled box.
        mask_outer_boxes = box_utils.compute_outer_boxes(
            sampled_boxes, mask_shape, scale=self._outer_box_scale)
        norm_mask_outer_boxes = box_utils.normalize_boxes(
            mask_outer_boxes, mask_shape)

        # Set sampled_masks shape to [batch_size, height, width, 1].
        sampled_masks = tf.expand_dims(sampled_masks, axis=-1)
        mask_targets = tf.image.crop_and_resize(
            sampled_masks,
            norm_mask_outer_boxes,
            box_ind=tf.range(self._num_sampled_masks),
            crop_size=[self._mask_crop_size, self._mask_crop_size],
            method='bilinear',
            extrapolation_value=0,
            name='train_mask_targets')
        mask_targets = tf.where(tf.greater_equal(mask_targets, 0.5),
                                tf.ones_like(mask_targets),
                                tf.zeros_like(mask_targets))
        mask_targets = tf.squeeze(mask_targets, axis=-1)

        # If bfloat16 is used, casts input image to tf.bfloat16.
        if self._use_bfloat16:
            image = tf.cast(image, dtype=tf.bfloat16)

        # Packs labels for model_fn outputs.
        labels = {
            'cls_targets': cls_targets,
            'box_targets': box_targets,
            'anchor_boxes': input_anchor.multilevel_boxes,
            'num_positives': num_positives,
            'image_info': image_info,
            # For ShapeMask.
            'mask_boxes': sampled_boxes,
            'mask_outer_boxes': mask_outer_boxes,
            'mask_targets': mask_targets,
            'mask_classes': sampled_classes,
            'mask_is_valid': tf.cast(tf.not_equal(num_masks, 0), tf.int32)
        }
        return image, labels
Exemple #12
0
    def _parse_train_data_v2(self, data):
        """Parses data for training.

        Args:
          data: the decoded tensor dictionary from TfExampleDecoder.

        Returns:
          image: image tensor that is preproessed to have normalized value and
            dimension [output_size[0], output_size[1], 3]
          labels: a dictionary of tensors used for training. The following describes
            {key: value} pairs in the dictionary.
            image_info: a 2D `Tensor` that encodes the information of the image and
              the applied preprocessing. It is in the format of
              [[original_height, original_width], [scaled_height, scaled_width],
            anchor_boxes: ordered dictionary with keys
              [min_level, min_level+1, ..., max_level]. The values are tensor with
              shape [height_l, width_l, 4] representing anchor boxes at each level.
            rpn_score_targets: ordered dictionary with keys
              [min_level, min_level+1, ..., max_level]. The values are tensor with
              shape [height_l, width_l, anchors_per_location]. The height_l and
              width_l represent the dimension of class logits at l-th level.
            rpn_box_targets: ordered dictionary with keys
              [min_level, min_level+1, ..., max_level]. The values are tensor with
              shape [height_l, width_l, anchors_per_location * 4]. The height_l and
              width_l represent the dimension of bounding box regression output at
              l-th level.
            gt_boxes: Groundtruth bounding box annotations. The box is represented
               in [y1, x1, y2, x2] format. The coordinates are w.r.t the scaled
               image that is fed to the network. The tennsor is padded with -1 to
               the fixed dimension [self._max_num_instances, 4].
            gt_classes: Groundtruth classes annotations. The tennsor is padded
              with -1 to the fixed dimension [self._max_num_instances].
            gt_masks: groundtrugh masks cropped by the bounding box and
              resized to a fixed size determined by mask_crop_size.
        """
        if self._use_autoaugment:
            try:
                from utils import (
                    autoaugment_utils, )  # pylint: disable=g-import-not-at-top
            except ImportError as e:
                logging.exception("Autoaugment is not supported in TF 2.x.")
                raise e

        classes = data["groundtruth_classes"]
        boxes = data["groundtruth_boxes"]
        masks = None
        attributes = None

        if self._include_mask:
            masks = data["groundtruth_instance_masks"]

        if self._num_attributes:
            attributes = data["groundtruth_attributes"]

        is_crowds = data["groundtruth_is_crowd"]
        # Skips annotations with `is_crowd` = True.
        if self._skip_crowd_during_training and self._is_training:
            num_groundtrtuhs = tf.shape(input=classes)[0]
            with tf.control_dependencies([num_groundtrtuhs, is_crowds]):
                indices = tf.cond(
                    pred=tf.greater(tf.size(input=is_crowds), 0),
                    true_fn=lambda: tf.compat.v1.where(
                        tf.logical_not(is_crowds))[:, 0],
                    false_fn=lambda: tf.cast(tf.range(num_groundtrtuhs), tf.
                                             int64),
                )
            classes = tf.gather(classes, indices)
            boxes = tf.gather(boxes, indices)

            if self._include_mask:
                masks = tf.gather(masks, indices)

            if self._num_attributes:
                attributes = tf.gather(attributes, indices)

        # Gets original image and its size.
        image = data["image"]

        # NOTE: The autoaugment method works best when used alongside the standard
        # horizontal flipping of images along with size jittering and normalization.
        if self._use_autoaugment and not self._apply_autoaugment_after_resizing:
            (
                image,
                boxes,
                masks,
            ) = autoaugment_utils.distort_image_and_masks_with_autoaugment(
                image, boxes, masks, self._autoaugment_policy_name)

        # Flips image randomly during training.
        if self._aug_rand_hflip:
            if self._include_mask:
                image, boxes, masks = input_utils.random_horizontal_flip(
                    image, boxes, masks)
            else:
                image, boxes = input_utils.random_horizontal_flip(image, boxes)

        # Resizes and crops image.
        image = tf.image.convert_image_dtype(image, dtype=tf.float32)
        image, image_info = input_utils.resize_and_crop_image(
            image,
            self._output_size,
            aug_scale_min=self._aug_scale_min,
            aug_scale_max=self._aug_scale_max,
        )

        # Converts boxes from normalized coordinates to pixel coordinates.
        # Now the coordinates of boxes are w.r.t. the original image.
        orig_image_shape = image_info[0]
        boxes = box_utils.denormalize_boxes(boxes, orig_image_shape)

        # Resizes and crops boxes.
        # Now the coordinates of boxes are w.r.t the scaled image.
        rescaled_image_shape = tf.shape(input=image)[:2]
        image_scale = image_info[2, :]
        offset = image_info[3, :]
        boxes = input_utils.resize_and_crop_boxes(boxes, image_scale,
                                                  rescaled_image_shape, offset)

        # Filters out ground truth boxes that are all zeros.
        boxes, classes, masks, attributes = self._remove_empty_boxes(
            boxes, classes, masks, attributes)

        # apply the autoaugment after resizing
        if self._use_autoaugment and self._apply_autoaugment_after_resizing:
            # prepare image and boxes for the autoaugment
            image = tf.image.convert_image_dtype(image, dtype=tf.uint8)
            boxes = box_utils.normalize_boxes(boxes, rescaled_image_shape)

            # prepare masks for the autoaugment
            masks = tf.expand_dims(masks, axis=-1)
            scaled_mask_size = tf.cast(
                tf.round(orig_image_shape * image_scale), tf.int32)
            scaled_masks = tf.image.resize(
                masks, scaled_mask_size, method=tf.image.ResizeMethod.BILINEAR)
            offset_int = tf.cast(offset, tf.int32)
            masks = scaled_masks[:, offset_int[0]:offset_int[0] +
                                 rescaled_image_shape[0],
                                 offset_int[1]:offset_int[1] +
                                 rescaled_image_shape[1], ]
            masks = tf.squeeze(masks, axis=-1)
            masks = tf.cast(tf.greater(masks, 0.5), tf.float32)

            # apply the autoaugment
            (
                image,
                boxes,
                masks,
            ) = autoaugment_utils.distort_image_and_masks_with_autoaugment(
                image, boxes, masks, self._autoaugment_policy_name)

            # convert the image back to float32 and denormalize bboxes
            image = tf.image.convert_image_dtype(image, dtype=tf.float32)
            boxes = box_utils.denormalize_boxes(boxes, rescaled_image_shape)

            # filters out empty bboxes
            boxes, classes, masks, attributes = self._remove_empty_boxes(
                boxes, classes, masks, attributes)

        if self._include_mask:
            if self._use_autoaugment and self._apply_autoaugment_after_resizing:
                # don't rescale boxes as masks were already resized
                cropped_boxes = box_utils.normalize_boxes(
                    boxes, rescaled_image_shape)
            else:
                # transfer boxes to the original image space
                cropped_boxes = boxes + tf.tile(tf.expand_dims(offset, axis=0),
                                                [1, 2])
                cropped_boxes /= tf.tile(tf.expand_dims(image_scale, axis=0),
                                         [1, 2])
                cropped_boxes = box_utils.normalize_boxes(
                    cropped_boxes, orig_image_shape)

            num_masks = tf.shape(input=masks)[0]
            masks = tf.image.crop_and_resize(
                tf.expand_dims(masks, axis=-1),
                cropped_boxes,
                box_indices=tf.range(num_masks, dtype=tf.int32),
                crop_size=[self._mask_crop_size, self._mask_crop_size],
                method="bilinear",
            )
            masks = tf.squeeze(masks, axis=-1)

        # Normalizes image with mean and std pixel values.
        image = input_utils.normalize_image(image)

        # pad the image
        padded_size = input_utils.compute_padded_size(self._output_size,
                                                      2**self._max_level)
        image = tf.image.pad_to_bounding_box(image, 0, 0, padded_size[0],
                                             padded_size[1])

        image_height, image_width, _ = image.get_shape().as_list()

        # Assigns anchor targets.
        # Note that after the target assignment, box targets are absolute pixel
        # offsets w.r.t. the scaled image.
        input_anchor = anchor.Anchor(
            self._min_level,
            self._max_level,
            self._num_scales,
            self._aspect_ratios,
            self._anchor_size,
            (image_height, image_width),
        )
        anchor_labeler = anchor.RpnAnchorLabeler(
            input_anchor,
            self._rpn_match_threshold,
            self._rpn_unmatched_threshold,
            self._rpn_batch_size_per_im,
            self._rpn_fg_fraction,
        )
        rpn_score_targets, rpn_box_targets = anchor_labeler.label_anchors(
            boxes, tf.cast(tf.expand_dims(classes, axis=-1), dtype=tf.float32))

        # If bfloat16 is used, casts input image to tf.bfloat16.
        if self._use_bfloat16:
            image = tf.cast(image, dtype=tf.bfloat16)

        # Packs labels for model_fn outputs.
        labels = {
            "anchor_boxes": input_anchor.multilevel_boxes,
            "image_info": image_info,
            "rpn_score_targets": rpn_score_targets,
            "rpn_box_targets": rpn_box_targets,
        }
        labels["gt_boxes"] = input_utils.pad_to_fixed_size(
            boxes, self._max_num_instances, -1)
        labels["gt_classes"] = input_utils.pad_to_fixed_size(
            classes, self._max_num_instances, -1)

        if self._include_mask:
            labels["gt_masks"] = input_utils.pad_to_fixed_size(
                masks, self._max_num_instances, -1)

        if self._num_attributes:
            labels["gt_attributes"] = input_utils.pad_to_fixed_size(
                attributes, self._max_num_instances, -1)

        return image, labels