예제 #1
0
 def _jitter_scale(self, image, shape, letter_box, jitter, random_pad,
                   aug_scale_min, aug_scale_max, translate, angle,
                   perspective):
     """Distort and scale each input image."""
     infos = []
     if (aug_scale_min != 1.0 or aug_scale_max != 1.0):
         crop_only = True
         # jitter gives you only one info object, resize and crop gives you one,
         # if crop only then there can be 1 form jitter and 1 from crop
         infos.append(self._pad_infos_object(image))
     else:
         crop_only = False
     image, crop_info, _ = preprocessing_ops.resize_and_jitter_image(
         image,
         shape,
         letter_box=letter_box,
         jitter=jitter,
         crop_only=crop_only,
         random_pad=random_pad,
         seed=self._seed,
     )
     infos.extend(crop_info)
     image, _, affine = preprocessing_ops.affine_warp_image(
         image,
         shape,
         scale_min=aug_scale_min,
         scale_max=aug_scale_max,
         translate=translate,
         degrees=angle,
         perspective=perspective,
         random_pad=random_pad,
         seed=self._seed,
     )
     return image, infos, affine
예제 #2
0
    def _mosaic_crop_image(self, image, boxes, classes, is_crowd, area):
        """Process a patched image in preperation for final output."""
        if self._mosaic_crop_mode != 'crop':
            shape = tf.cast(preprocessing_ops.get_image_shape(image),
                            tf.float32)
            center = shape * self._mosaic_center

            # shift the center of the image by applying a translation to the whole
            # image
            ch = tf.math.round(
                preprocessing_ops.random_uniform_strong(-center[0],
                                                        center[0],
                                                        seed=self._seed))
            cw = tf.math.round(
                preprocessing_ops.random_uniform_strong(-center[1],
                                                        center[1],
                                                        seed=self._seed))

            # clip the boxes to those with in the image
            image = tfa.image.translate(image, [cw, ch],
                                        fill_value=self._pad_value)
            boxes = box_ops.denormalize_boxes(boxes, shape[:2])
            boxes = boxes + tf.cast([ch, cw, ch, cw], boxes.dtype)
            boxes = box_ops.clip_boxes(boxes, shape[:2])
            inds = box_ops.get_non_empty_box_indices(boxes)

            boxes = box_ops.normalize_boxes(boxes, shape[:2])
            boxes, classes, is_crowd, area = self._select_ind(
                inds,
                boxes,
                classes,  # pylint:disable=unbalanced-tuple-unpacking
                is_crowd,
                area)

        # warp and scale the fully stitched sample
        image, _, affine = preprocessing_ops.affine_warp_image(
            image, [self._output_size[0], self._output_size[1]],
            scale_min=self._aug_scale_min,
            scale_max=self._aug_scale_max,
            translate=self._aug_rand_translate,
            degrees=self._aug_rand_angle,
            perspective=self._aug_rand_perspective,
            random_pad=self._random_pad,
            seed=self._seed)
        height, width = self._output_size[0], self._output_size[1]
        image = tf.image.resize(image, (height, width))

        # clip and clean boxes
        boxes, inds = preprocessing_ops.transform_and_clip_boxes(
            boxes,
            None,
            affine=affine,
            area_thresh=self._area_thresh,
            seed=self._seed)
        classes, is_crowd, area = self._select_ind(inds, classes, is_crowd,
                                                   area)  # pylint:disable=unbalanced-tuple-unpacking
        return image, boxes, classes, is_crowd, area, area
예제 #3
0
 def testAffineWarpImage(self,
                         image_height,
                         image_width,
                         desired_size,
                         degrees=7.0,
                         scale_min=0.1,
                         scale_max=1.9):
   image = tf.convert_to_tensor(np.random.rand(image_height, image_width, 3))
   processed_image, _, _ = preprocessing_ops.affine_warp_image(
       image,
       desired_size,
       degrees=degrees,
       scale_min=scale_min,
       scale_max=scale_max)
   processed_image_shape = tf.shape(processed_image)
   self.assertAllEqual([desired_size[0], desired_size[1], 3],
                       processed_image_shape.numpy())