コード例 #1
0
    def _parse_train_data(self, data):
        """Parses data for training and evaluation."""
        classes = data['groundtruth_classes']
        boxes = data['groundtruth_boxes']
        is_crowds = data['groundtruth_is_crowd']
        # Skips annotations with `is_crowd` = True.
        if self._skip_crowd_during_training and self._is_training:
            num_groundtrtuhs = tf.shape(classes)[0]
            with tf.control_dependencies([num_groundtrtuhs, is_crowds]):
                indices = tf.cond(
                    tf.greater(tf.size(is_crowds), 0),
                    lambda: tf.where(tf.logical_not(is_crowds))[:, 0],
                    lambda: tf.cast(tf.range(num_groundtrtuhs), tf.int64))
            classes = tf.gather(classes, indices)
            boxes = tf.gather(boxes, indices)

        # Gets original image and its size.
        image = data['image']

        if self._aug_policy:
            if self._aug_policy in AUTOAUG_POLICIES:
                if autoaug_imported:
                    image, boxes = autoaugment_utils.distort_image_with_autoaugment(
                        image, boxes, self._aug_policy)
                else:
                    raise ImportError(
                        'Unable to get autoaugment_utils, likely due '
                        'to imcompatability with TF 2.X.')

        image_shape = tf.shape(image)[0:2]

        # Normalizes image with mean and std pixel values.
        image = input_utils.normalize_image(image)

        # Flips image randomly during training.
        if self._aug_rand_hflip:
            image, boxes = input_utils.random_horizontal_flip(image, boxes)

        # Converts boxes from normalized coordinates to pixel coordinates.
        # Now the coordinates of boxes are w.r.t. the original image.
        boxes = box_utils.denormalize_boxes(boxes, image_shape)

        # Resizes and crops image.
        image, image_info = input_utils.resize_and_crop_image(
            image,
            self._output_size,
            padded_size=input_utils.compute_padded_size(
                self._output_size, 2**self._max_level),
            aug_scale_min=self._aug_scale_min,
            aug_scale_max=self._aug_scale_max)
        image_height, image_width, _ = image.get_shape().as_list()

        # Resizes and crops boxes.
        # Now the coordinates of boxes are w.r.t the scaled image.
        image_scale = image_info[2, :]
        offset = image_info[3, :]
        boxes = input_utils.resize_and_crop_boxes(boxes, image_scale,
                                                  image_info[1, :], offset)

        # Filters out ground truth boxes that are all zeros.
        indices = box_utils.get_non_empty_box_indices(boxes)
        boxes = tf.gather(boxes, indices)
        classes = tf.gather(classes, indices)

        # Assigns anchor targets.
        # Note that after the target assignment, box targets are absolute pixel
        # offsets w.r.t. the scaled image.
        input_anchor = anchor.Anchor(self._min_level, self._max_level,
                                     self._num_scales, self._aspect_ratios,
                                     self._anchor_size,
                                     (image_height, image_width))
        anchor_labeler = anchor.AnchorLabeler(input_anchor,
                                              self._match_threshold,
                                              self._unmatched_threshold)
        (cls_targets, box_targets,
         num_positives) = anchor_labeler.label_anchors(
             boxes, tf.cast(tf.expand_dims(classes, axis=1), tf.float32))

        # If bfloat16 is used, casts input image to tf.bfloat16.
        if self._use_bfloat16:
            image = tf.cast(image, dtype=tf.bfloat16)

        # Packs labels for model_fn outputs.
        labels = {
            'cls_targets': cls_targets,
            'box_targets': box_targets,
            'anchor_boxes': input_anchor.multilevel_boxes,
            'num_positives': num_positives,
            'image_info': image_info,
        }
        return image, labels
コード例 #2
0
    def _parse_predict_data(self, data):
        """Parses data for prediction."""
        # Gets original image and its size.
        image = data['image']
        image_shape = tf.shape(image)[0:2]

        # Normalizes image with mean and std pixel values.
        image = input_utils.normalize_image(image)

        # Resizes and crops image.
        image, image_info = input_utils.resize_and_crop_image(
            image,
            self._output_size,
            padded_size=input_utils.compute_padded_size(
                self._output_size, 2**self._max_level),
            aug_scale_min=1.0,
            aug_scale_max=1.0)
        image_height, image_width, _ = image.get_shape().as_list()

        # If bfloat16 is used, casts input image to tf.bfloat16.
        if self._use_bfloat16:
            image = tf.cast(image, dtype=tf.bfloat16)

        # Compute Anchor boxes.
        input_anchor = anchor.Anchor(self._min_level, self._max_level,
                                     self._num_scales, self._aspect_ratios,
                                     self._anchor_size,
                                     (image_height, image_width))

        labels = {
            'anchor_boxes': input_anchor.multilevel_boxes,
            'image_info': image_info,
        }
        # If mode is PREDICT_WITH_GT, returns groundtruths and training targets
        # in labels.
        if self._mode == ModeKeys.PREDICT_WITH_GT:
            # Converts boxes from normalized coordinates to pixel coordinates.
            boxes = box_utils.denormalize_boxes(data['groundtruth_boxes'],
                                                image_shape)
            groundtruths = {
                'source_id': data['source_id'],
                'height': data['height'],
                'width': data['width'],
                'num_detections': tf.shape(data['groundtruth_classes']),
                'boxes': boxes,
                'classes': data['groundtruth_classes'],
                'areas': data['groundtruth_area'],
                'is_crowds': tf.cast(data['groundtruth_is_crowd'], tf.int32),
            }
            groundtruths['source_id'] = dataloader_utils.process_source_id(
                groundtruths['source_id'])
            groundtruths = dataloader_utils.pad_groundtruths_to_fixed_size(
                groundtruths, self._max_num_instances)
            labels['groundtruths'] = groundtruths

            # Computes training objective for evaluation loss.
            classes = data['groundtruth_classes']

            image_scale = image_info[2, :]
            offset = image_info[3, :]
            boxes = input_utils.resize_and_crop_boxes(boxes, image_scale,
                                                      image_info[1, :], offset)
            # Filters out ground truth boxes that are all zeros.
            indices = box_utils.get_non_empty_box_indices(boxes)
            boxes = tf.gather(boxes, indices)
            classes = tf.gather(classes, indices)

            # Assigns anchors.
            anchor_labeler = anchor.AnchorLabeler(input_anchor,
                                                  self._match_threshold,
                                                  self._unmatched_threshold)
            (cls_targets, box_targets,
             num_positives) = anchor_labeler.label_anchors(
                 boxes, tf.cast(tf.expand_dims(classes, axis=1), tf.float32))
            labels['cls_targets'] = cls_targets
            labels['box_targets'] = box_targets
            labels['num_positives'] = num_positives
        return {
            'images': image,
            'labels': labels,
        }
コード例 #3
0
    def _parse_eval_data(self, data):
        """Parses data for evaluation.

    Args:
      data: the decoded tensor dictionary from TfExampleDecoder.

    Returns:
      image: image tensor that is preproessed to have normalized value and
        dimension [output_size[0], output_size[1], 3]
      labels: a dictionary of tensors used for training. The following describes
        {key: value} pairs in the dictionary.
        image_info: a 2D `Tensor` that encodes the information of the image and
          the applied preprocessing. It is in the format of
          [[original_height, original_width], [scaled_height, scaled_width],
        anchor_boxes: ordered dictionary with keys
          [min_level, min_level+1, ..., max_level]. The values are tensor with
          shape [height_l, width_l, 4] representing anchor boxes at each level.
        groundtruths:
          source_id: Groundtruth source id.
          height: Original image height.
          width: Original image width.
          boxes: Groundtruth bounding box annotations. The box is represented
             in [y1, x1, y2, x2] format. The coordinates are w.r.t the scaled
             image that is fed to the network. The tennsor is padded with -1 to
             the fixed dimension [self._max_num_instances, 4].
          classes: Groundtruth classes annotations. The tennsor is padded
            with -1 to the fixed dimension [self._max_num_instances].
          areas: Box area or mask area depend on whether mask is present.
          is_crowds: Whether the ground truth label is a crowd label.
          num_groundtruths: Number of ground truths in the image.
    """
        # Gets original image and its size.
        image = data['image']
        image_shape = tf.shape(image)[0:2]

        # Normalizes image with mean and std pixel values.
        image = input_utils.normalize_image(image)

        # Resizes and crops image.
        image, image_info = input_utils.resize_and_crop_image(
            image,
            self._output_size,
            padded_size=input_utils.compute_padded_size(
                self._output_size, 2**self._max_level),
            aug_scale_min=1.0,
            aug_scale_max=1.0)
        image_height, image_width, _ = image.get_shape().as_list()

        # Assigns anchor targets.
        input_anchor = anchor.Anchor(self._min_level, self._max_level,
                                     self._num_scales, self._aspect_ratios,
                                     self._anchor_size,
                                     (image_height, image_width))

        # If bfloat16 is used, casts input image to tf.bfloat16.
        if self._use_bfloat16:
            image = tf.cast(image, dtype=tf.bfloat16)

        # Sets up groundtruth data for evaluation.
        groundtruths = {
            'source_id':
            data['source_id'],
            'height':
            data['height'],
            'width':
            data['width'],
            'num_groundtruths':
            tf.shape(data['groundtruth_classes']),
            'boxes':
            box_utils.denormalize_boxes(data['groundtruth_boxes'],
                                        image_shape),
            'classes':
            data['groundtruth_classes'],
            'areas':
            data['groundtruth_area'],
            'is_crowds':
            tf.cast(data['groundtruth_is_crowd'], tf.int32),
        }
        # TODO(b/143766089): Add ground truth masks for segmentation metrics.
        groundtruths['source_id'] = dataloader_utils.process_source_id(
            groundtruths['source_id'])
        groundtruths = dataloader_utils.pad_groundtruths_to_fixed_size(
            groundtruths, self._max_num_instances)

        # Packs labels for model_fn outputs.
        labels = {
            'anchor_boxes': input_anchor.multilevel_boxes,
            'image_info': image_info,
            'groundtruths': groundtruths,
        }

        return image, labels
コード例 #4
0
    def _parse_predict_data(self, data):
        """Parses data for prediction.

    Args:
      data: the decoded tensor dictionary from TfExampleDecoder.

    Returns:
      A dictionary of {'images': image, 'labels': labels} where
        images: image tensor that is preproessed to have normalized value and
          dimension [output_size[0], output_size[1], 3]
        labels: a dictionary of tensors used for training. The following
          describes {key: value} pairs in the dictionary.
          source_ids: Source image id. Default value -1 if the source id is
            empty in the groundtruth annotation.
          image_info: a 2D `Tensor` that encodes the information of the image
            and the applied preprocessing. It is in the format of
            [[original_height, original_width], [scaled_height, scaled_width],
          anchor_boxes: ordered dictionary with keys
            [min_level, min_level+1, ..., max_level]. The values are tensor with
            shape [height_l, width_l, 4] representing anchor boxes at each
            level.
    """
        # Gets original image and its size.
        image = data['image']
        image_shape = tf.shape(image)[0:2]

        # Normalizes image with mean and std pixel values.
        image = input_utils.normalize_image(image)

        # Resizes and crops image.
        image, image_info = input_utils.resize_and_crop_image(
            image,
            self._output_size,
            padded_size=input_utils.compute_padded_size(
                self._output_size, 2**self._max_level),
            aug_scale_min=1.0,
            aug_scale_max=1.0)
        image_height, image_width, _ = image.get_shape().as_list()

        # If bfloat16 is used, casts input image to tf.bfloat16.
        if self._use_bfloat16:
            image = tf.cast(image, dtype=tf.bfloat16)

        # Compute Anchor boxes.
        input_anchor = anchor.Anchor(self._min_level, self._max_level,
                                     self._num_scales, self._aspect_ratios,
                                     self._anchor_size,
                                     (image_height, image_width))

        labels = {
            'source_id': dataloader_utils.process_source_id(data['source_id']),
            'anchor_boxes': input_anchor.multilevel_boxes,
            'image_info': image_info,
        }

        if self._mode == ModeKeys.PREDICT_WITH_GT:
            # Converts boxes from normalized coordinates to pixel coordinates.
            boxes = box_utils.denormalize_boxes(data['groundtruth_boxes'],
                                                image_shape)
            groundtruths = {
                'source_id': data['source_id'],
                'height': data['height'],
                'width': data['width'],
                'num_detections': tf.shape(data['groundtruth_classes']),
                'boxes': boxes,
                'classes': data['groundtruth_classes'],
                'areas': data['groundtruth_area'],
                'is_crowds': tf.cast(data['groundtruth_is_crowd'], tf.int32),
            }
            groundtruths['source_id'] = dataloader_utils.process_source_id(
                groundtruths['source_id'])
            groundtruths = dataloader_utils.pad_groundtruths_to_fixed_size(
                groundtruths, self._max_num_instances)
            labels['groundtruths'] = groundtruths

        return {
            'images': image,
            'labels': labels,
        }
コード例 #5
0
 def get_normalized_image(self, decoded_data):
     return input_utils.normalize_image(decoded_data['image'])
コード例 #6
0
    def _parse_train_data(self, data):
        """Parses data for training.

    Args:
      data: the decoded tensor dictionary from TfExampleDecoder.

    Returns:
      image: image tensor that is preproessed to have normalized value and
        dimension [output_size[0], output_size[1], 3]
      labels: a dictionary of tensors used for training. The following describes
        {key: value} pairs in the dictionary.
        image_info: a 2D `Tensor` that encodes the information of the image and
          the applied preprocessing. It is in the format of
          [[original_height, original_width], [scaled_height, scaled_width],
        anchor_boxes: ordered dictionary with keys
          [min_level, min_level+1, ..., max_level]. The values are tensor with
          shape [height_l, width_l, 4] representing anchor boxes at each level.
        rpn_score_targets: ordered dictionary with keys
          [min_level, min_level+1, ..., max_level]. The values are tensor with
          shape [height_l, width_l, anchors_per_location]. The height_l and
          width_l represent the dimension of class logits at l-th level.
        rpn_box_targets: ordered dictionary with keys
          [min_level, min_level+1, ..., max_level]. The values are tensor with
          shape [height_l, width_l, anchors_per_location * 4]. The height_l and
          width_l represent the dimension of bounding box regression output at
          l-th level.
        gt_boxes: Groundtruth bounding box annotations. The box is represented
           in [y1, x1, y2, x2] format. The coordinates are w.r.t the scaled
           image that is fed to the network. The tennsor is padded with -1 to
           the fixed dimension [self._max_num_instances, 4].
        gt_classes: Groundtruth classes annotations. The tennsor is padded
          with -1 to the fixed dimension [self._max_num_instances].
        gt_masks: groundtrugh masks cropped by the bounding box and
          resized to a fixed size determined by mask_crop_size.
    """
        classes = data['groundtruth_classes']
        boxes = data['groundtruth_boxes']
        if self._include_mask:
            masks = data['groundtruth_instance_masks']

        is_crowds = data['groundtruth_is_crowd']
        # Skips annotations with `is_crowd` = True.
        if self._skip_crowd_during_training and self._is_training:
            num_groundtrtuhs = tf.shape(classes)[0]
            with tf.control_dependencies([num_groundtrtuhs, is_crowds]):
                indices = tf.cond(
                    tf.greater(tf.size(is_crowds), 0),
                    lambda: tf.where(tf.logical_not(is_crowds))[:, 0],
                    lambda: tf.cast(tf.range(num_groundtrtuhs), tf.int64))
            classes = tf.gather(classes, indices)
            boxes = tf.gather(boxes, indices)
            if self._include_mask:
                masks = tf.gather(masks, indices)

        # Gets original image and its size.
        image = data['image']
        image_shape = tf.shape(image)[0:2]

        # Normalizes image with mean and std pixel values.
        image = input_utils.normalize_image(image)

        # Flips image randomly during training.
        if self._aug_rand_hflip:
            if self._include_mask:
                image, boxes, masks = input_utils.random_horizontal_flip(
                    image, boxes, masks)
            else:
                image, boxes = input_utils.random_horizontal_flip(image, boxes)

        # Converts boxes from normalized coordinates to pixel coordinates.
        # Now the coordinates of boxes are w.r.t. the original image.
        boxes = box_utils.denormalize_boxes(boxes, image_shape)

        # Resizes and crops image.
        image, image_info = input_utils.resize_and_crop_image(
            image,
            self._output_size,
            padded_size=input_utils.compute_padded_size(
                self._output_size, 2**self._max_level),
            aug_scale_min=self._aug_scale_min,
            aug_scale_max=self._aug_scale_max)
        image_height, image_width, _ = image.get_shape().as_list()

        # Resizes and crops boxes.
        # Now the coordinates of boxes are w.r.t the scaled image.
        image_scale = image_info[2, :]
        offset = image_info[3, :]
        boxes = input_utils.resize_and_crop_boxes(boxes, image_scale,
                                                  image_info[1, :], offset)

        # Filters out ground truth boxes that are all zeros.
        indices = box_utils.get_non_empty_box_indices(boxes)
        boxes = tf.gather(boxes, indices)
        classes = tf.gather(classes, indices)
        if self._include_mask:
            masks = tf.gather(masks, indices)
            # Transfer boxes to the original image space and do normalization.
            cropped_boxes = boxes + tf.tile(tf.expand_dims(offset, axis=0),
                                            [1, 2])
            cropped_boxes /= tf.tile(tf.expand_dims(image_scale, axis=0),
                                     [1, 2])
            cropped_boxes = box_utils.normalize_boxes(cropped_boxes,
                                                      image_shape)
            num_masks = tf.shape(masks)[0]
            masks = tf.image.crop_and_resize(
                tf.expand_dims(masks, axis=-1),
                cropped_boxes,
                box_indices=tf.range(num_masks, dtype=tf.int32),
                crop_size=[self._mask_crop_size, self._mask_crop_size],
                method='bilinear')
            masks = tf.squeeze(masks, axis=-1)

        # Assigns anchor targets.
        # Note that after the target assignment, box targets are absolute pixel
        # offsets w.r.t. the scaled image.
        input_anchor = anchor.Anchor(self._min_level, self._max_level,
                                     self._num_scales, self._aspect_ratios,
                                     self._anchor_size,
                                     (image_height, image_width))
        anchor_labeler = anchor.RpnAnchorLabeler(input_anchor,
                                                 self._rpn_match_threshold,
                                                 self._rpn_unmatched_threshold,
                                                 self._rpn_batch_size_per_im,
                                                 self._rpn_fg_fraction)
        rpn_score_targets, rpn_box_targets = anchor_labeler.label_anchors(
            boxes, tf.cast(tf.expand_dims(classes, axis=-1), dtype=tf.float32))

        # If bfloat16 is used, casts input image to tf.bfloat16.
        if self._use_bfloat16:
            image = tf.cast(image, dtype=tf.bfloat16)

        # Packs labels for model_fn outputs.
        labels = {
            'anchor_boxes': input_anchor.multilevel_boxes,
            'image_info': image_info,
            'rpn_score_targets': rpn_score_targets,
            'rpn_box_targets': rpn_box_targets,
        }
        labels['gt_boxes'] = input_utils.clip_or_pad_to_fixed_size(
            boxes, self._max_num_instances, -1)
        labels['gt_classes'] = input_utils.clip_or_pad_to_fixed_size(
            classes, self._max_num_instances, -1)
        if self._include_mask:
            labels['gt_masks'] = input_utils.clip_or_pad_to_fixed_size(
                masks, self._max_num_instances, -1)

        return image, labels
コード例 #7
0
ファイル: shapemask_parser.py プロジェクト: yc2013501516/tpu
    def _parse_train_data(self, data):
        """Parse data for ShapeMask training."""
        classes = data['groundtruth_classes']
        boxes = data['groundtruth_boxes']
        masks = data['groundtruth_instance_masks']
        is_crowds = data['groundtruth_is_crowd']
        # Skips annotations with `is_crowd` = True.
        if self._skip_crowd_during_training and self._is_training:
            num_groundtrtuhs = tf.shape(classes)[0]
            with tf.control_dependencies([num_groundtrtuhs, is_crowds]):
                indices = tf.cond(
                    tf.greater(tf.size(is_crowds), 0),
                    lambda: tf.where(tf.logical_not(is_crowds))[:, 0],
                    lambda: tf.cast(tf.range(num_groundtrtuhs), tf.int64))
            classes = tf.gather(classes, indices)
            boxes = tf.gather(boxes, indices)
            masks = tf.gather(masks, indices)

        # Gets original image and its size.
        image = data['image']
        image_shape = tf.shape(image)[0:2]

        # If not using category, makes all categories with id = 0.
        if not self._use_category:
            classes = tf.cast(tf.greater(classes, 0), dtype=tf.float32)

        # Normalizes image with mean and std pixel values.
        image = input_utils.normalize_image(image)

        # Flips image randomly during training.
        if self._aug_rand_hflip:
            image, boxes, masks = input_utils.random_horizontal_flip(
                image, boxes, masks)

        # Converts boxes from normalized coordinates to pixel coordinates.
        boxes = box_utils.denormalize_boxes(boxes, image_shape)

        # Resizes and crops image.
        image, image_info = input_utils.resize_and_crop_image(
            image,
            self._output_size,
            self._output_size,
            aug_scale_min=self._aug_scale_min,
            aug_scale_max=self._aug_scale_max)
        image_scale = image_info[2, :]
        offset = image_info[3, :]

        # Resizes and crops boxes and masks.
        boxes = input_utils.resize_and_crop_boxes(boxes, image_scale,
                                                  self._output_size, offset)

        # Filters out ground truth boxes that are all zeros.
        indices = box_utils.get_non_empty_box_indices(boxes)
        boxes = tf.gather(boxes, indices)
        classes = tf.gather(classes, indices)
        masks = tf.gather(masks, indices)

        # Assigns anchors.
        input_anchor = anchor.Anchor(self._min_level, self._max_level,
                                     self._num_scales, self._aspect_ratios,
                                     self._anchor_size, self._output_size)
        anchor_labeler = anchor.AnchorLabeler(input_anchor,
                                              self._match_threshold,
                                              self._unmatched_threshold)
        (cls_targets, box_targets,
         num_positives) = anchor_labeler.label_anchors(
             boxes, tf.cast(tf.expand_dims(classes, axis=1), tf.float32))

        # Sample groundtruth masks/boxes/classes for mask branch.
        num_masks = tf.shape(masks)[0]
        mask_shape = tf.shape(masks)[1:3]

        # Pad sampled boxes/masks/classes to a constant batch size.
        padded_boxes = input_utils.pad_to_fixed_size(boxes,
                                                     self._num_sampled_masks)
        padded_classes = input_utils.pad_to_fixed_size(classes,
                                                       self._num_sampled_masks)
        padded_masks = input_utils.pad_to_fixed_size(masks,
                                                     self._num_sampled_masks)

        # Randomly sample groundtruth masks for mask branch training. For the image
        # without groundtruth masks, it will sample the dummy padded tensors.
        rand_indices = tf.random.shuffle(
            tf.range(tf.maximum(num_masks, self._num_sampled_masks)))
        rand_indices = tf.mod(rand_indices, tf.maximum(num_masks, 1))
        rand_indices = rand_indices[0:self._num_sampled_masks]
        rand_indices = tf.reshape(rand_indices, [self._num_sampled_masks])

        sampled_boxes = tf.gather(padded_boxes, rand_indices)
        sampled_classes = tf.gather(padded_classes, rand_indices)
        sampled_masks = tf.gather(padded_masks, rand_indices)
        # Jitter the sampled boxes to mimic the noisy detections.
        sampled_boxes = box_utils.jitter_boxes(
            sampled_boxes, noise_scale=self._box_jitter_scale)
        sampled_boxes = box_utils.clip_boxes(sampled_boxes, self._output_size)
        # Compute mask targets in feature crop. A feature crop fully contains a
        # sampled box.
        mask_outer_boxes = box_utils.compute_outer_boxes(
            sampled_boxes, tf.shape(image)[0:2], scale=self._outer_box_scale)
        mask_outer_boxes = box_utils.clip_boxes(mask_outer_boxes,
                                                self._output_size)
        # Compensate the offset of mask_outer_boxes to map it back to original image
        # scale.
        mask_outer_boxes_ori = mask_outer_boxes
        mask_outer_boxes_ori += tf.tile(tf.expand_dims(offset, axis=0), [1, 2])
        mask_outer_boxes_ori /= tf.tile(tf.expand_dims(image_scale, axis=0),
                                        [1, 2])
        norm_mask_outer_boxes_ori = box_utils.normalize_boxes(
            mask_outer_boxes_ori, mask_shape)

        # Set sampled_masks shape to [batch_size, height, width, 1].
        sampled_masks = tf.cast(tf.expand_dims(sampled_masks, axis=-1),
                                tf.float32)
        mask_targets = tf.image.crop_and_resize(
            sampled_masks,
            norm_mask_outer_boxes_ori,
            box_ind=tf.range(self._num_sampled_masks),
            crop_size=[self._mask_crop_size, self._mask_crop_size],
            method='bilinear',
            extrapolation_value=0,
            name='train_mask_targets')
        mask_targets = tf.where(tf.greater_equal(mask_targets, 0.5),
                                tf.ones_like(mask_targets),
                                tf.zeros_like(mask_targets))
        mask_targets = tf.squeeze(mask_targets, axis=-1)
        if self._up_sample_factor > 1:
            fine_mask_targets = tf.image.crop_and_resize(
                sampled_masks,
                norm_mask_outer_boxes_ori,
                box_ind=tf.range(self._num_sampled_masks),
                crop_size=[
                    self._mask_crop_size * self._up_sample_factor,
                    self._mask_crop_size * self._up_sample_factor
                ],
                method='bilinear',
                extrapolation_value=0,
                name='train_mask_targets')
            fine_mask_targets = tf.where(
                tf.greater_equal(fine_mask_targets, 0.5),
                tf.ones_like(fine_mask_targets),
                tf.zeros_like(fine_mask_targets))
            fine_mask_targets = tf.squeeze(fine_mask_targets, axis=-1)
        else:
            fine_mask_targets = mask_targets

        # If bfloat16 is used, casts input image to tf.bfloat16.
        if self._use_bfloat16:
            image = tf.cast(image, dtype=tf.bfloat16)

        valid_image = tf.cast(tf.not_equal(num_masks, 0), tf.int32)
        if self._mask_train_class == 'all':
            mask_is_valid = valid_image * tf.ones_like(sampled_classes,
                                                       tf.int32)
        else:
            # Get the intersection of sampled classes with training splits.
            mask_valid_classes = tf.cast(
                tf.expand_dims(
                    class_utils.coco_split_class_ids(self._mask_train_class),
                    1), sampled_classes.dtype)
            match = tf.reduce_any(
                tf.equal(tf.expand_dims(sampled_classes, 0),
                         mask_valid_classes), 0)
            mask_is_valid = valid_image * tf.cast(match, tf.int32)

        # Packs labels for model_fn outputs.
        labels = {
            'cls_targets': cls_targets,
            'box_targets': box_targets,
            'anchor_boxes': input_anchor.multilevel_boxes,
            'num_positives': num_positives,
            'image_info': image_info,
            # For ShapeMask.
            'mask_boxes': sampled_boxes,
            'mask_outer_boxes': mask_outer_boxes,
            'mask_targets': mask_targets,
            'fine_mask_targets': fine_mask_targets,
            'mask_classes': sampled_classes,
            'mask_is_valid': mask_is_valid,
        }
        return image, labels
コード例 #8
0
ファイル: shapemask_parser.py プロジェクト: yc2013501516/tpu
    def _parse_predict_data(self, data):
        """Parse data for ShapeMask training."""
        classes = data['groundtruth_classes']
        boxes = data['groundtruth_boxes']
        masks = data['groundtruth_instance_masks']

        # Gets original image and its size.
        image = data['image']
        image_shape = tf.shape(image)[0:2]

        # If not using category, makes all categories with id = 0.
        if not self._use_category:
            classes = tf.cast(tf.greater(classes, 0), dtype=tf.float32)

        # Normalizes image with mean and std pixel values.
        image = input_utils.normalize_image(image)

        # Converts boxes from normalized coordinates to pixel coordinates.
        boxes = box_utils.denormalize_boxes(boxes, image_shape)

        # Resizes and crops image.
        image, image_info = input_utils.resize_and_crop_image(
            image,
            self._output_size,
            self._output_size,
            aug_scale_min=1.0,
            aug_scale_max=1.0)
        image_scale = image_info[2, :]
        offset = image_info[3, :]

        # Resizes and crops boxes and masks.
        boxes = input_utils.resize_and_crop_boxes(boxes, image_scale,
                                                  self._output_size, offset)
        masks = input_utils.resize_and_crop_masks(
            tf.expand_dims(masks, axis=-1), image_scale, self._output_size,
            offset)

        # Filters out ground truth boxes that are all zeros.
        indices = box_utils.get_non_empty_box_indices(boxes)
        boxes = tf.gather(boxes, indices)
        classes = tf.gather(classes, indices)

        # Assigns anchors.
        input_anchor = anchor.Anchor(self._min_level, self._max_level,
                                     self._num_scales, self._aspect_ratios,
                                     self._anchor_size, self._output_size)
        anchor_labeler = anchor.AnchorLabeler(input_anchor,
                                              self._match_threshold,
                                              self._unmatched_threshold)

        # If bfloat16 is used, casts input image to tf.bfloat16.
        if self._use_bfloat16:
            image = tf.cast(image, dtype=tf.bfloat16)

        labels = {
            'anchor_boxes': input_anchor.multilevel_boxes,
            'image_info': image_info,
        }
        if self._mode == ModeKeys.PREDICT_WITH_GT:
            # Converts boxes from normalized coordinates to pixel coordinates.
            groundtruths = {
                'source_id':
                data['source_id'],
                'height':
                data['height'],
                'width':
                data['width'],
                'num_detections':
                tf.shape(data['groundtruth_classes']),
                'boxes':
                box_utils.denormalize_boxes(data['groundtruth_boxes'],
                                            image_shape),
                'classes':
                data['groundtruth_classes'],
                # 'masks': tf.squeeze(masks, axis=-1),
                'areas':
                data['groundtruth_area'],
                'is_crowds':
                tf.cast(data['groundtruth_is_crowd'], tf.int32),
            }
            groundtruths['source_id'] = dataloader_utils.process_source_id(
                groundtruths['source_id'])
            groundtruths = dataloader_utils.pad_groundtruths_to_fixed_size(
                groundtruths, self._max_num_instances)
            # Computes training labels.
            (cls_targets, box_targets,
             num_positives) = anchor_labeler.label_anchors(
                 boxes, tf.cast(tf.expand_dims(classes, axis=1), tf.float32))
            # Packs labels for model_fn outputs.
            labels.update({
                'cls_targets': cls_targets,
                'box_targets': box_targets,
                'num_positives': num_positives,
                'groundtruths': groundtruths,
            })
        return {
            'images': image,
            'labels': labels,
        }
コード例 #9
0
    def _parse_eval_data(self, data):
        """Parses data for training and evaluation."""
        groundtruths = {}
        classes = data['groundtruth_classes']
        boxes = data['groundtruth_boxes']

        # Gets original image and its size.
        image = data['image']
        image_shape = tf.shape(input=image)[0:2]

        # Normalizes image with mean and std pixel values.
        image = input_utils.normalize_image(image)

        # Converts boxes from normalized coordinates to pixel coordinates.
        boxes = box_utils.denormalize_boxes(boxes, image_shape)

        # Resizes and crops image.
        image, image_info = input_utils.resize_and_crop_image(
            image,
            self._output_size,
            padded_size=input_utils.compute_padded_size(
                self._output_size, 2**self._max_level),
            aug_scale_min=1.0,
            aug_scale_max=1.0)
        image_height, image_width, _ = image.get_shape().as_list()

        # Resizes and crops boxes.
        image_scale = image_info[2, :]
        offset = image_info[3, :]
        boxes = input_utils.resize_and_crop_boxes(boxes, image_scale,
                                                  image_info[1, :], offset)
        # Filters out ground truth boxes that are all zeros.
        indices = box_utils.get_non_empty_box_indices(boxes)
        boxes = tf.gather(boxes, indices)
        classes = tf.gather(classes, indices)

        # Assigns anchors.
        input_anchor = anchor.Anchor(self._min_level, self._max_level,
                                     self._num_scales, self._aspect_ratios,
                                     self._anchor_size,
                                     (image_height, image_width))
        anchor_labeler = anchor.AnchorLabeler(input_anchor,
                                              self._match_threshold,
                                              self._unmatched_threshold)
        (cls_targets, box_targets,
         num_positives) = anchor_labeler.label_anchors(
             boxes, tf.cast(tf.expand_dims(classes, axis=1), tf.float32))

        # If bfloat16 is used, casts input image to tf.bfloat16.
        if self._use_bfloat16:
            image = tf.cast(image, dtype=tf.bfloat16)

        # Sets up groundtruth data for evaluation.
        groundtruths = {
            'source_id':
            data['source_id'],
            'num_groundtrtuhs':
            tf.shape(data['groundtruth_classes']),
            'image_info':
            image_info,
            'boxes':
            box_utils.denormalize_boxes(data['groundtruth_boxes'],
                                        image_shape),
            'classes':
            data['groundtruth_classes'],
            'areas':
            data['groundtruth_area'],
            'is_crowds':
            tf.cast(data['groundtruth_is_crowd'], tf.int32),
        }
        groundtruths['source_id'] = process_source_id(
            groundtruths['source_id'])
        groundtruths = pad_groundtruths_to_fixed_size(groundtruths,
                                                      self._max_num_instances)

        # Packs labels for model_fn outputs.
        labels = {
            'cls_targets': cls_targets,
            'box_targets': box_targets,
            'anchor_boxes': input_anchor.multilevel_boxes,
            'num_positives': num_positives,
            'image_info': image_info,
            'groundtruths': groundtruths,
        }
        return image, labels
コード例 #10
0
    def _parse_train_data(self, data):
        """Parses data for training.

    Args:
      data: the decoded tensor dictionary from TfExampleDecoder.

    Returns:
      image: image tensor that is preproessed to have normalized value and
        dimension [output_size[0], output_size[1], 3]
      labels: a dictionary of tensors used for training. The following describes
        {key: value} pairs in the dictionary.
        image: image tensor that is preproessed to have normalized value and
          dimension [output_size[0], output_size[1], 3]
        image_info: a 2D `Tensor` that encodes the information of the image and
          the applied preprocessing. It is in the format of
          [[original_height, original_width], [scaled_height, scaled_width],
        num_groundtrtuhs: number of objects.
        boxes: Groundtruth bounding box annotations. The box is represented
           in [y1, x1, y2, x2] format. The coordinates are w.r.t the scaled
           image that is fed to the network. The tennsor is padded with -1 to
           the fixed dimension [self._max_num_instances, 4].
        classes: Groundtruth classes annotations. The tennsor is padded
          with -1 to the fixed dimension [self._max_num_instances].
        masks: groundtrugh masks cropped by the bounding box and
          resized to a fixed size determined by mask_crop_size.
        pasted_objects_mask: a binary tensor with the same size as image which
          is computed as the union of all the objects masks.
    """
        classes = data['groundtruth_classes']
        boxes = data['groundtruth_boxes']
        if self._include_mask:
            masks = data['groundtruth_instance_masks']

        is_crowds = data['groundtruth_is_crowd']
        # Skips annotations with `is_crowd` = True.
        if self._skip_crowd_during_training:
            num_groundtrtuhs = tf.shape(classes)[0]
            with tf.control_dependencies([num_groundtrtuhs, is_crowds]):
                indices = tf.cond(
                    tf.greater(tf.size(is_crowds), 0),
                    lambda: tf.where(tf.logical_not(is_crowds))[:, 0],
                    lambda: tf.cast(tf.range(num_groundtrtuhs), tf.int64))
            classes = tf.gather(classes, indices)
            boxes = tf.gather(boxes, indices)
            if self._include_mask:
                masks = tf.gather(masks, indices)

        # Gets original image and its size.
        image = data['image']
        image_shape = tf.shape(image)[0:2]

        # Normalizes image with mean and std pixel values.
        image = input_utils.normalize_image(image)

        # Flips image randomly during training.
        if self._aug_rand_hflip:
            if self._include_mask:
                image, boxes, masks = input_utils.random_horizontal_flip(
                    image, boxes, masks)
            else:
                image, boxes = input_utils.random_horizontal_flip(image, boxes)

        # Converts boxes from normalized coordinates to pixel coordinates.
        # Now the coordinates of boxes are w.r.t. the original image.
        boxes = box_utils.denormalize_boxes(boxes, image_shape)

        # Resizes and crops image.
        image, image_info = input_utils.resize_and_crop_image(
            image,
            self._output_size,
            padded_size=input_utils.compute_padded_size(
                self._output_size, 2**self._max_level),
            aug_scale_min=self._aug_scale_min,
            aug_scale_max=self._aug_scale_max)

        # Resizes and crops boxes.
        # Now the coordinates of boxes are w.r.t the scaled image.
        image_scale = image_info[2, :]
        offset = image_info[3, :]
        boxes = input_utils.resize_and_crop_boxes(boxes, image_scale,
                                                  image_info[1, :], offset)

        # Filters out ground truth boxes that are all zeros.
        indices = box_utils.get_non_empty_box_indices(boxes)
        boxes = tf.gather(boxes, indices)
        classes = tf.gather(classes, indices)
        if self._include_mask:
            masks = tf.gather(masks, indices)
            uncropped_masks = tf.cast(masks, tf.int8)
            uncropped_masks = tf.expand_dims(uncropped_masks, axis=3)
            uncropped_masks = input_utils.resize_and_crop_masks(
                uncropped_masks, image_scale, self._output_size, offset)
            # Transfer boxes to the original image space and do normalization.
            cropped_boxes = boxes + tf.tile(tf.expand_dims(offset, axis=0),
                                            [1, 2])
            cropped_boxes /= tf.tile(tf.expand_dims(image_scale, axis=0),
                                     [1, 2])
            cropped_boxes = box_utils.normalize_boxes(cropped_boxes,
                                                      image_shape)
            num_masks = tf.shape(masks)[0]
            masks = tf.image.crop_and_resize(
                tf.expand_dims(masks, axis=-1),
                cropped_boxes,
                box_indices=tf.range(num_masks, dtype=tf.int32),
                crop_size=[self._mask_crop_size, self._mask_crop_size],
                method='bilinear')
            masks = tf.squeeze(masks, axis=-1)
        indices = tf.range(start=0, limit=tf.shape(classes)[0], dtype=tf.int32)

        # Samples the numbers of masks for pasting.
        m = tf.random.uniform(shape=[],
                              maxval=tf.shape(classes)[0] + 1,
                              dtype=tf.int32)
        m = tf.math.minimum(m, tf.shape(classes)[0])

        # Shuffles the indices of objects and keep the first m objects for pasting.
        shuffled_indices = tf.random.shuffle(indices)
        shuffled_indices = tf.slice(shuffled_indices, [0], [m])

        boxes = tf.gather(boxes, shuffled_indices)
        masks = tf.gather(masks, shuffled_indices)
        classes = tf.gather(classes, shuffled_indices)
        uncropped_masks = tf.gather(uncropped_masks, shuffled_indices)
        pasted_objects_mask = tf.reduce_max(uncropped_masks, 0)
        pasted_objects_mask = tf.cast(pasted_objects_mask, tf.bool)

        labels = {
            'image': image,
            'image_info': image_info,
            'num_groundtrtuhs': tf.shape(classes)[0],
            'boxes': boxes,
            'masks': masks,
            'classes': classes,
            'pasted_objects_mask': pasted_objects_mask,
        }
        return labels
コード例 #11
0
    def _parse_train_data(self, data):
        """Parses data for training and evaluation."""
        classes = data['groundtruth_classes']
        boxes = data['groundtruth_boxes']
        is_crowds = data['groundtruth_is_crowd']
        # Skips annotations with `is_crowd` = True.
        if self._skip_crowd_during_training and self._is_training:
            num_groundtrtuhs = tf.shape(input=classes)[0]
            with tf.control_dependencies([num_groundtrtuhs, is_crowds]):
                indices = tf.cond(
                    pred=tf.greater(tf.size(input=is_crowds), 0),
                    true_fn=lambda: tf.where(tf.logical_not(is_crowds))[:, 0],
                    false_fn=lambda: tf.cast(tf.range(num_groundtrtuhs), tf.
                                             int64))
            classes = tf.gather(classes, indices)
            boxes = tf.gather(boxes, indices)

        # Gets original image and its size.
        image = data['image']

        # NOTE: The autoaugment method works best when used alongside the standard
        # horizontal flipping of images along with size jittering and normalization.
        if self._use_autoaugment:
            image, boxes = autoaugment_utils.distort_image_with_autoaugment(
                image, boxes, self._autoaugment_policy_name)

        image_shape = tf.shape(input=image)[0:2]

        # Normalizes image with mean and std pixel values.
        image = input_utils.normalize_image(image)

        # Flips image randomly during training.
        if self._aug_rand_hflip:
            image, boxes = input_utils.random_horizontal_flip(image, boxes)

        # Converts boxes from normalized coordinates to pixel coordinates.
        boxes = box_utils.denormalize_boxes(boxes, image_shape)

        # Resizes and crops image.
        image, image_info = input_utils.resize_and_crop_image(
            image,
            self._output_size,
            padded_size=input_utils.compute_padded_size(
                self._output_size, 2**self._max_level),
            aug_scale_min=self._aug_scale_min,
            aug_scale_max=self._aug_scale_max)
        image_height, image_width, _ = image.get_shape().as_list()

        # Resizes and crops boxes.
        image_scale = image_info[2, :]
        offset = image_info[3, :]
        boxes = input_utils.resize_and_crop_boxes(boxes, image_scale,
                                                  image_info[1, :], offset)
        # Filters out ground truth boxes that are all zeros.
        indices = box_utils.get_non_empty_box_indices(boxes)
        boxes = tf.gather(boxes, indices)
        classes = tf.gather(classes, indices)

        # Assigns anchors.

        input_anchor = anchor.Anchor(self._min_level, self._max_level,
                                     self._num_scales, self._aspect_ratios,
                                     self._anchor_size,
                                     (image_height, image_width))
        anchor_labeler = anchor.AnchorLabeler(input_anchor,
                                              self._match_threshold,
                                              self._unmatched_threshold)
        (cls_targets, box_targets,
         num_positives) = anchor_labeler.label_anchors(
             boxes, tf.cast(tf.expand_dims(classes, axis=1), tf.float32))

        # If bfloat16 is used, casts input image to tf.bfloat16.
        if self._use_bfloat16:
            image = tf.cast(image, dtype=tf.bfloat16)

        # Packs labels for model_fn outputs.
        labels = {
            'cls_targets': cls_targets,
            'box_targets': box_targets,
            'anchor_boxes': input_anchor.multilevel_boxes,
            'num_positives': num_positives,
            'image_info': image_info,
        }
        return image, labels
コード例 #12
0
def main(unused_argv):
    del unused_argv
    # Load the label map.
    print(' - Loading the label map...')
    label_map_dict = {}
    if FLAGS.label_map_format == 'csv':
        with tf.gfile.Open(FLAGS.label_map_file, 'r') as csv_file:
            reader = csv.reader(csv_file, delimiter=':')
            for row in reader:
                if len(row) != 2:
                    raise ValueError(
                        'Each row of the csv label map file must be in '
                        '`id:name` format.')
                id_index = int(row[0])
                name = row[1]
                label_map_dict[id_index] = {
                    'id': id_index,
                    'name': name,
                }
    else:
        raise ValueError('Unsupported label map format: {}.'.format(
            FLAGS.label_mape_format))

    params = config_factory.config_generator(FLAGS.model)
    if FLAGS.config_file:
        params = params_dict.override_params_dict(params,
                                                  FLAGS.config_file,
                                                  is_strict=True)
    params = params_dict.override_params_dict(params,
                                              FLAGS.params_override,
                                              is_strict=True)
    params.override(
        {
            'architecture': {
                'use_bfloat16': False,  # The inference runs on CPU/GPU.
            },
        },
        is_strict=True)
    params.validate()
    params.lock()

    model = model_factory.model_generator(params)

    with tf.Graph().as_default():
        image_input = tf.placeholder(shape=(), dtype=tf.string)
        image = tf.io.decode_image(image_input, channels=3)
        image.set_shape([None, None, 3])

        image = input_utils.normalize_image(image)
        image_size = [FLAGS.image_size, FLAGS.image_size]
        image, image_info = input_utils.resize_and_crop_image(
            image,
            image_size,
            image_size,
            aug_scale_min=1.0,
            aug_scale_max=1.0)
        image.set_shape([image_size[0], image_size[1], 3])

        # batching.
        images = tf.reshape(image, [1, image_size[0], image_size[1], 3])
        images_info = tf.expand_dims(image_info, axis=0)

        # model inference
        outputs = model.build_outputs(images, {'image_info': images_info},
                                      mode=mode_keys.PREDICT)

        # outputs['detection_boxes'] = (
        #     outputs['detection_boxes'] / tf.tile(images_info[:, 2:3, :], [1, 1, 2]))

        predictions = outputs

        # Create a saver in order to load the pre-trained checkpoint.
        saver = tf.train.Saver()

        image_with_detections_list = []
        with tf.Session() as sess:
            print(' - Loading the checkpoint...')
            saver.restore(sess, FLAGS.checkpoint_path)

            image_files = tf.gfile.Glob(FLAGS.image_file_pattern)
            for i, image_file in enumerate(image_files):
                print(' - Processing image %d...' % i)

                with tf.gfile.GFile(image_file, 'rb') as f:
                    image_bytes = f.read()

                image = Image.open(image_file)
                image = image.convert(
                    'RGB')  # needed for images with 4 channels.
                width, height = image.size
                np_image = (np.array(image.getdata()).reshape(
                    height, width, 3).astype(np.uint8))
                print(np_image.shape)

                predictions_np = sess.run(predictions,
                                          feed_dict={image_input: image_bytes})

                logits = predictions_np['logits'][0]
                print(logits.shape)

                labels = np.argmax(logits.squeeze(), -1)
                print(labels.shape)
                print(labels)
                labels = np.array(Image.fromarray(labels.astype('uint8')))
                print(labels.shape)

                plt.imshow(labels)
                plt.savefig(f"temp-{i}.png")
コード例 #13
0
def main(unused_argv):
    del unused_argv
    # Load the label map.
    print(' - Loading the label map...')
    label_map_dict = {}
    if FLAGS.label_map_format == 'csv':
        with tf.gfile.Open(FLAGS.label_map_file, 'r') as csv_file:
            reader = csv.reader(csv_file, delimiter=':')
            for row in reader:
                if len(row) != 2:
                    raise ValueError(
                        'Each row of the csv label map file must be in '
                        '`id:name` format.')
                id_index = int(row[0])
                name = row[1]
                label_map_dict[id_index] = {
                    'id': id_index,
                    'name': name,
                }
    else:
        raise ValueError('Unsupported label map format: {}.'.format(
            FLAGS.label_mape_format))

    params = config_factory.config_generator(FLAGS.model)
    if FLAGS.config_file:
        params = params_dict.override_params_dict(params,
                                                  FLAGS.config_file,
                                                  is_strict=True)
    params = params_dict.override_params_dict(params,
                                              FLAGS.params_override,
                                              is_strict=True)
    params.override(
        {
            'architecture': {
                'use_bfloat16': False,  # The inference runs on CPU/GPU.
            },
        },
        is_strict=True)
    params.validate()
    params.lock()

    model = model_factory.model_generator(params)

    with tf.Graph().as_default():
        image_input = tf.placeholder(shape=(), dtype=tf.string)
        image = tf.io.decode_image(image_input, channels=3)
        image.set_shape([None, None, 3])

        image = input_utils.normalize_image(image)
        image_size = [FLAGS.image_size, FLAGS.image_size]
        image, image_info = input_utils.resize_and_crop_image(
            image,
            image_size,
            image_size,
            aug_scale_min=1.0,
            aug_scale_max=1.0)
        image.set_shape([image_size[0], image_size[1], 3])

        # batching.
        images = tf.reshape(image, [1, image_size[0], image_size[1], 3])
        images_info = tf.expand_dims(image_info, axis=0)

        # model inference
        outputs = model.build_outputs(images, {'image_info': images_info},
                                      mode=mode_keys.PREDICT)

        outputs['detection_boxes'] = (
            outputs['detection_boxes'] /
            tf.tile(images_info[:, 2:3, :], [1, 1, 2]))

        predictions = outputs

        # Create a saver in order to load the pre-trained checkpoint.
        saver = tf.train.Saver()

        image_with_detections_list = []
        with tf.Session() as sess:
            print(' - Loading the checkpoint...')
            saver.restore(sess, FLAGS.checkpoint_path)

            res = []
            image_files = tf.gfile.Glob(FLAGS.image_file_pattern)
            for i, image_file in enumerate(image_files):
                print(' - Processing image %d...' % i)

                with tf.gfile.GFile(image_file, 'rb') as f:
                    image_bytes = f.read()

                image = Image.open(image_file)
                image = image.convert(
                    'RGB')  # needed for images with 4 channels.
                width, height = image.size
                np_image = (np.array(image.getdata()).reshape(
                    height, width, 3).astype(np.uint8))

                predictions_np = sess.run(predictions,
                                          feed_dict={image_input: image_bytes})

                num_detections = int(predictions_np['num_detections'][0])
                np_boxes = predictions_np['detection_boxes'][
                    0, :num_detections]
                np_scores = predictions_np['detection_scores'][
                    0, :num_detections]
                np_classes = predictions_np['detection_classes'][
                    0, :num_detections]
                np_classes = np_classes.astype(np.int32)
                np_attributes = predictions_np['detection_attributes'][
                    0, :num_detections, :]
                np_masks = None
                if 'detection_masks' in predictions_np:
                    instance_masks = predictions_np['detection_masks'][
                        0, :num_detections]
                    np_masks = mask_utils.paste_instance_masks(
                        instance_masks, box_utils.yxyx_to_xywh(np_boxes),
                        height, width)
                    encoded_masks = [
                        mask_api.encode(np.asfortranarray(np_mask))
                        for np_mask in list(np_masks)
                    ]

                res.append({
                    'image_file': image_file,
                    'boxes': np_boxes,
                    'classes': np_classes,
                    'scores': np_scores,
                    'attributes': np_attributes,
                    'masks': encoded_masks,
                })

                image_with_detections = (
                    visualization_utils.
                    visualize_boxes_and_labels_on_image_array(
                        np_image,
                        np_boxes,
                        np_classes,
                        np_scores,
                        label_map_dict,
                        instance_masks=np_masks,
                        use_normalized_coordinates=False,
                        max_boxes_to_draw=FLAGS.max_boxes_to_draw,
                        min_score_thresh=FLAGS.min_score_threshold))
                image_with_detections_list.append(image_with_detections)

    print(' - Saving the outputs...')
    formatted_image_with_detections_list = [
        Image.fromarray(image.astype(np.uint8))
        for image in image_with_detections_list
    ]
    html_str = '<html>'
    image_strs = []
    for formatted_image in formatted_image_with_detections_list:
        with io.BytesIO() as stream:
            formatted_image.save(stream, format='JPEG')
            data_uri = base64.b64encode(stream.getvalue()).decode('utf-8')
        image_strs.append(
            '<img src="data:image/jpeg;base64,{}", height=800>'.format(
                data_uri))
    images_str = ' '.join(image_strs)
    html_str += images_str
    html_str += '</html>'
    with tf.gfile.GFile(FLAGS.output_html, 'w') as f:
        f.write(html_str)
    np.save(FLAGS.output_file, res)
コード例 #14
0
ファイル: inference.py プロジェクト: Team-Bit-by-bit/lavanyam
def initiate():
    # Load the label map.
    print(' - Loading the label map...')
    label_map_dict = {}
    if 'csv' == 'csv':
        with tf.gfile.Open('dataset/fashionpedia_label_map.csv',
                           'r') as csv_file:
            reader = csv.reader(csv_file, delimiter=':')
            for row in reader:
                if len(row) != 2:
                    raise ValueError(
                        'Each row of the csv label map file must be in '
                        '`id:name` format.')
                id_index = int(row[0])
                name = row[1]
                label_map_dict[id_index] = {
                    'id': id_index,
                    'name': name,
                }
    else:
        raise ValueError('Unsupported label map format: {}.'.format('csv'))

    params = config_factory.config_generator('attribute_mask_rcnn')
    if 'configs/yaml/spinenet49_amrcnn.yaml':
        params = params_dict.override_params_dict(
            params, 'configs/yaml/spinenet49_amrcnn.yaml', is_strict=True)
    params = params_dict.override_params_dict(params, '', is_strict=True)
    params.override(
        {
            'architecture': {
                'use_bfloat16': False,  # The inference runs on CPU/GPU.
            },
        },
        is_strict=True)
    params.validate()
    params.lock()

    model = model_factory.model_generator(params)

    with tf.Graph().as_default():
        image_input = tf.placeholder(shape=(), dtype=tf.string)
        image = tf.io.decode_image(image_input, channels=3)
        image.set_shape([None, None, 3])

        image = input_utils.normalize_image(image)
        image_size = [640, 640]
        image, image_info = input_utils.resize_and_crop_image(
            image,
            image_size,
            image_size,
            aug_scale_min=1.0,
            aug_scale_max=1.0)
        image.set_shape([image_size[0], image_size[1], 3])

        # batching.
        images = tf.reshape(image, [1, image_size[0], image_size[1], 3])
        images_info = tf.expand_dims(image_info, axis=0)

        # model inference
        outputs = model.build_outputs(images, {'image_info': images_info},
                                      mode=mode_keys.PREDICT)

        outputs['detection_boxes'] = (
            outputs['detection_boxes'] /
            tf.tile(images_info[:, 2:3, :], [1, 1, 2]))

        predictions = outputs

        # Create a saver in order to load the pre-trained checkpoint.
        saver = tf.train.Saver()
        sess = tf.Session()
        print(' - Loading the checkpoint...')
        saver.restore(sess, 'fashionpedia-spinenet-49/model.ckpt')
        print(' - Checkpoint Loaded...')
        return sess, predictions, image_input
コード例 #15
0
    def _parse_train_data_v2(self, data):
        """Parses data for training.

        Args:
          data: the decoded tensor dictionary from TfExampleDecoder.

        Returns:
          image: image tensor that is preproessed to have normalized value and
            dimension [output_size[0], output_size[1], 3]
          labels: a dictionary of tensors used for training. The following describes
            {key: value} pairs in the dictionary.
            image_info: a 2D `Tensor` that encodes the information of the image and
              the applied preprocessing. It is in the format of
              [[original_height, original_width], [scaled_height, scaled_width],
            anchor_boxes: ordered dictionary with keys
              [min_level, min_level+1, ..., max_level]. The values are tensor with
              shape [height_l, width_l, 4] representing anchor boxes at each level.
            rpn_score_targets: ordered dictionary with keys
              [min_level, min_level+1, ..., max_level]. The values are tensor with
              shape [height_l, width_l, anchors_per_location]. The height_l and
              width_l represent the dimension of class logits at l-th level.
            rpn_box_targets: ordered dictionary with keys
              [min_level, min_level+1, ..., max_level]. The values are tensor with
              shape [height_l, width_l, anchors_per_location * 4]. The height_l and
              width_l represent the dimension of bounding box regression output at
              l-th level.
            gt_boxes: Groundtruth bounding box annotations. The box is represented
               in [y1, x1, y2, x2] format. The coordinates are w.r.t the scaled
               image that is fed to the network. The tennsor is padded with -1 to
               the fixed dimension [self._max_num_instances, 4].
            gt_classes: Groundtruth classes annotations. The tennsor is padded
              with -1 to the fixed dimension [self._max_num_instances].
            gt_masks: groundtrugh masks cropped by the bounding box and
              resized to a fixed size determined by mask_crop_size.
        """
        if self._use_autoaugment:
            try:
                from utils import (
                    autoaugment_utils, )  # pylint: disable=g-import-not-at-top
            except ImportError as e:
                logging.exception("Autoaugment is not supported in TF 2.x.")
                raise e

        classes = data["groundtruth_classes"]
        boxes = data["groundtruth_boxes"]
        masks = None
        attributes = None

        if self._include_mask:
            masks = data["groundtruth_instance_masks"]

        if self._num_attributes:
            attributes = data["groundtruth_attributes"]

        is_crowds = data["groundtruth_is_crowd"]
        # Skips annotations with `is_crowd` = True.
        if self._skip_crowd_during_training and self._is_training:
            num_groundtrtuhs = tf.shape(input=classes)[0]
            with tf.control_dependencies([num_groundtrtuhs, is_crowds]):
                indices = tf.cond(
                    pred=tf.greater(tf.size(input=is_crowds), 0),
                    true_fn=lambda: tf.compat.v1.where(
                        tf.logical_not(is_crowds))[:, 0],
                    false_fn=lambda: tf.cast(tf.range(num_groundtrtuhs), tf.
                                             int64),
                )
            classes = tf.gather(classes, indices)
            boxes = tf.gather(boxes, indices)

            if self._include_mask:
                masks = tf.gather(masks, indices)

            if self._num_attributes:
                attributes = tf.gather(attributes, indices)

        # Gets original image and its size.
        image = data["image"]

        # NOTE: The autoaugment method works best when used alongside the standard
        # horizontal flipping of images along with size jittering and normalization.
        if self._use_autoaugment and not self._apply_autoaugment_after_resizing:
            (
                image,
                boxes,
                masks,
            ) = autoaugment_utils.distort_image_and_masks_with_autoaugment(
                image, boxes, masks, self._autoaugment_policy_name)

        # Flips image randomly during training.
        if self._aug_rand_hflip:
            if self._include_mask:
                image, boxes, masks = input_utils.random_horizontal_flip(
                    image, boxes, masks)
            else:
                image, boxes = input_utils.random_horizontal_flip(image, boxes)

        # Resizes and crops image.
        image = tf.image.convert_image_dtype(image, dtype=tf.float32)
        image, image_info = input_utils.resize_and_crop_image(
            image,
            self._output_size,
            aug_scale_min=self._aug_scale_min,
            aug_scale_max=self._aug_scale_max,
        )

        # Converts boxes from normalized coordinates to pixel coordinates.
        # Now the coordinates of boxes are w.r.t. the original image.
        orig_image_shape = image_info[0]
        boxes = box_utils.denormalize_boxes(boxes, orig_image_shape)

        # Resizes and crops boxes.
        # Now the coordinates of boxes are w.r.t the scaled image.
        rescaled_image_shape = tf.shape(input=image)[:2]
        image_scale = image_info[2, :]
        offset = image_info[3, :]
        boxes = input_utils.resize_and_crop_boxes(boxes, image_scale,
                                                  rescaled_image_shape, offset)

        # Filters out ground truth boxes that are all zeros.
        boxes, classes, masks, attributes = self._remove_empty_boxes(
            boxes, classes, masks, attributes)

        # apply the autoaugment after resizing
        if self._use_autoaugment and self._apply_autoaugment_after_resizing:
            # prepare image and boxes for the autoaugment
            image = tf.image.convert_image_dtype(image, dtype=tf.uint8)
            boxes = box_utils.normalize_boxes(boxes, rescaled_image_shape)

            # prepare masks for the autoaugment
            masks = tf.expand_dims(masks, axis=-1)
            scaled_mask_size = tf.cast(
                tf.round(orig_image_shape * image_scale), tf.int32)
            scaled_masks = tf.image.resize(
                masks, scaled_mask_size, method=tf.image.ResizeMethod.BILINEAR)
            offset_int = tf.cast(offset, tf.int32)
            masks = scaled_masks[:, offset_int[0]:offset_int[0] +
                                 rescaled_image_shape[0],
                                 offset_int[1]:offset_int[1] +
                                 rescaled_image_shape[1], ]
            masks = tf.squeeze(masks, axis=-1)
            masks = tf.cast(tf.greater(masks, 0.5), tf.float32)

            # apply the autoaugment
            (
                image,
                boxes,
                masks,
            ) = autoaugment_utils.distort_image_and_masks_with_autoaugment(
                image, boxes, masks, self._autoaugment_policy_name)

            # convert the image back to float32 and denormalize bboxes
            image = tf.image.convert_image_dtype(image, dtype=tf.float32)
            boxes = box_utils.denormalize_boxes(boxes, rescaled_image_shape)

            # filters out empty bboxes
            boxes, classes, masks, attributes = self._remove_empty_boxes(
                boxes, classes, masks, attributes)

        if self._include_mask:
            if self._use_autoaugment and self._apply_autoaugment_after_resizing:
                # don't rescale boxes as masks were already resized
                cropped_boxes = box_utils.normalize_boxes(
                    boxes, rescaled_image_shape)
            else:
                # transfer boxes to the original image space
                cropped_boxes = boxes + tf.tile(tf.expand_dims(offset, axis=0),
                                                [1, 2])
                cropped_boxes /= tf.tile(tf.expand_dims(image_scale, axis=0),
                                         [1, 2])
                cropped_boxes = box_utils.normalize_boxes(
                    cropped_boxes, orig_image_shape)

            num_masks = tf.shape(input=masks)[0]
            masks = tf.image.crop_and_resize(
                tf.expand_dims(masks, axis=-1),
                cropped_boxes,
                box_indices=tf.range(num_masks, dtype=tf.int32),
                crop_size=[self._mask_crop_size, self._mask_crop_size],
                method="bilinear",
            )
            masks = tf.squeeze(masks, axis=-1)

        # Normalizes image with mean and std pixel values.
        image = input_utils.normalize_image(image)

        # pad the image
        padded_size = input_utils.compute_padded_size(self._output_size,
                                                      2**self._max_level)
        image = tf.image.pad_to_bounding_box(image, 0, 0, padded_size[0],
                                             padded_size[1])

        image_height, image_width, _ = image.get_shape().as_list()

        # Assigns anchor targets.
        # Note that after the target assignment, box targets are absolute pixel
        # offsets w.r.t. the scaled image.
        input_anchor = anchor.Anchor(
            self._min_level,
            self._max_level,
            self._num_scales,
            self._aspect_ratios,
            self._anchor_size,
            (image_height, image_width),
        )
        anchor_labeler = anchor.RpnAnchorLabeler(
            input_anchor,
            self._rpn_match_threshold,
            self._rpn_unmatched_threshold,
            self._rpn_batch_size_per_im,
            self._rpn_fg_fraction,
        )
        rpn_score_targets, rpn_box_targets = anchor_labeler.label_anchors(
            boxes, tf.cast(tf.expand_dims(classes, axis=-1), dtype=tf.float32))

        # If bfloat16 is used, casts input image to tf.bfloat16.
        if self._use_bfloat16:
            image = tf.cast(image, dtype=tf.bfloat16)

        # Packs labels for model_fn outputs.
        labels = {
            "anchor_boxes": input_anchor.multilevel_boxes,
            "image_info": image_info,
            "rpn_score_targets": rpn_score_targets,
            "rpn_box_targets": rpn_box_targets,
        }
        labels["gt_boxes"] = input_utils.pad_to_fixed_size(
            boxes, self._max_num_instances, -1)
        labels["gt_classes"] = input_utils.pad_to_fixed_size(
            classes, self._max_num_instances, -1)

        if self._include_mask:
            labels["gt_masks"] = input_utils.pad_to_fixed_size(
                masks, self._max_num_instances, -1)

        if self._num_attributes:
            labels["gt_attributes"] = input_utils.pad_to_fixed_size(
                attributes, self._max_num_instances, -1)

        return image, labels