Ejemplo n.º 1
0
 def test_augment_keypoints__kernel_size_is_two__no_keep_size(self):
     from imgaug.augmentables.kps import Keypoint, KeypointsOnImage
     kps = [Keypoint(x=1.5, y=5.5), Keypoint(x=5.5, y=1.5)]
     kpsoi = KeypointsOnImage(kps, shape=(6, 6, 3))
     expected = KeypointsOnImage.from_xy_array(np.float32(
         [[1.5 / 2, 5.5 / 2], [5.5 / 2, 1.5 / 2]]),
                                               shape=(3, 3, 3))
     self._test_augment_cbaoi__kernel_size_is_two__no_keep_size(
         kpsoi, expected, "augment_keypoints")
Ejemplo n.º 2
0
    def _test_augment_keypoints__kernel_size_is_noop(self, kernel_size):
        from imgaug.augmentables.kps import Keypoint, KeypointsOnImage
        kps = [Keypoint(x=1.5, y=5.5), Keypoint(x=5.5, y=1.5)]
        kpsoi = KeypointsOnImage(kps, shape=(6, 6, 3))
        aug = self.augmenter(kernel_size)

        kpsoi_aug = aug.augment_keypoints(kpsoi)

        assert_cbaois_equal(kpsoi_aug, kpsoi)
Ejemplo n.º 3
0
    def test_keypoint_alignment(self):
        from imgaug.augmentables.kps import Keypoint, KeypointsOnImage
        kps = [Keypoint(x=10, y=10), Keypoint(x=30, y=30)]
        kpsoi = KeypointsOnImage(kps, shape=(40, 40, 1))
        kpsoi_empty = KeypointsOnImage([], shape=(40, 40, 1))

        self._test_cbaoi_alignment(kpsoi, kpsoi_empty, [[(5, 5)], [(15, 15)]],
                                   [[(10, 10)], [(30, 30)]],
                                   "augment_keypoints")
Ejemplo n.º 4
0
def show_pts_on_img(image, pts,
    img_pth = '/home/workspace2/dataset/3dhand/dataset/pts_on_img.png'):

    if pts.shape[1] == 3:
        kps = [Keypoint(x, y) for x,y,_ in pts]
    else :
        kps = [Keypoint(x, y) for x,y in pts]

    kpsoi = KeypointsOnImage(kps, shape=image.shape)
    misc.imsave('/home/workspace2/dataset/3dhand/dataset/img.png', image)
    misc.imsave(img_pth, kpsoi.draw_on_image(image, size = 7))
Ejemplo n.º 5
0
    def _test_augment_keypoints__kernel_size_is_noop(self, kernel_size):
        from imgaug.augmentables.kps import Keypoint, KeypointsOnImage
        kps = [Keypoint(x=1.5, y=5.5), Keypoint(x=5.5, y=1.5)]
        kpsoi = KeypointsOnImage(kps, shape=(6, 6, 3))
        aug = self.augmenter(kernel_size)

        kpsoi_aug = aug.augment_keypoints(kpsoi)

        assert kpsoi_aug.shape == (6, 6, 3)
        assert np.allclose(kpsoi_aug.to_xy_array(),
                           [[1.5, 5.5],
                            [5.5, 1.5]])
Ejemplo n.º 6
0
    def test_augment_keypoints__kernel_size_is_two__no_keep_size(self):
        from imgaug.augmentables.kps import Keypoint, KeypointsOnImage
        kps = [Keypoint(x=1.5, y=5.5), Keypoint(x=5.5, y=1.5)]
        kpsoi = KeypointsOnImage(kps, shape=(6, 6, 3))
        aug = self.augmenter(2, keep_size=False)

        kpsoi_aug = aug.augment_keypoints(kpsoi)

        expected = KeypointsOnImage.from_xy_array(np.float32(
            [[1.5 / 2, 5.5 / 2], [5.5 / 2, 1.5 / 2]]),
                                                  shape=(3, 3, 3))
        assert_cbaois_equal(kpsoi_aug, expected)
Ejemplo n.º 7
0
    def test_augment_keypoints__kernel_size_is_two__no_keep_size(self):
        from imgaug.augmentables.kps import Keypoint, KeypointsOnImage
        kps = [Keypoint(x=1.5, y=5.5), Keypoint(x=5.5, y=1.5)]
        kpsoi = KeypointsOnImage(kps, shape=(6, 6, 3))
        aug = self.augmenter(2, keep_size=False)

        kpsoi_aug = aug.augment_keypoints(kpsoi)

        assert kpsoi_aug.shape == (3, 3, 3)
        assert np.allclose(kpsoi_aug.to_xy_array(),
                           [[1.5/2, 5.5/2],
                            [5.5/2, 1.5/2]])
Ejemplo n.º 8
0
    def img2xy(self, img_path, gal, gpr, augment=0):
        '''
        Single img file to xys for all blocks and channels
        returns:
            xs:
                cropped window, grayscale[0~255]=>[0~1]
                shape ( #blocks, 1, win_shape[0], win_shape[1] )
            ys:
                gaussian heatmaps of same w, h as xs, one channel for one kpt
            idx:
                (img_path, block) for one x sample
        '''
        idxs, imgs, df = super().img2x(img_path, gal,
                                       gpr)  # imgs: (N, 1, h, w)
        h, w = imgs.shape[2], imgs.shape[3]
        gpr = Gpr(gpr) if type(gpr) == str else gpr
        xs, ys, coords = [], [], []
        for (img_path, b), img, block_df in zip(idxs, imgs, df):
            nrows = gal.header[f'Block{b}'][Gal.N_ROWS]
            ncols = gal.header[f'Block{b}'][Gal.N_COLS]
            kpts = KeypointsOnImage([
                Keypoint(x=block_df.loc[1, 1]['X'], y=block_df.loc[1, 1]['Y']),
                Keypoint(x=block_df.loc[1, ncols]['X'],
                         y=block_df.loc[1, ncols]['Y']),
                Keypoint(x=block_df.loc[nrows, ncols]['X'],
                         y=block_df.loc[nrows, ncols]['Y']),
                Keypoint(x=block_df.loc[nrows, 1]['X'],
                         y=block_df.loc[nrows, 1]['Y'])
            ],
                                    shape=(h, w))

            if augment <= 0:
                xs.append(img)
                coord = self.to_Lcoord(kpts.to_xy_array())
                ys.append(
                    self.coord2heatmap(coord, (img.shape[-2], img.shape[-1])))
                coords.append(coord.flatten())
            else:
                for i in range(augment):
                    img_aug, kpts_aug = self.aug_seq(
                        image=(img[0] * 255).astype('uint8'),
                        keypoints=kpts)  # img: (1, w, h) -> (w, h)
                    coord = self.to_Lcoord(kpts_aug.to_xy_array())  # (3, 2)
                    if self.check_out_of_bounds(img_aug.shape, coord):
                        continue  # skip if coord out of bounds
                    xs.append(np.array([img_aug / 255]))
                    ys.append(
                        self.coord2heatmap(
                            coord, (img_aug.shape[-2], img_aug.shape[-1])))
                    coords.append(coord.flatten())

        return np.stack(xs), np.stack(ys), np.stack(coords)
Ejemplo n.º 9
0
    def test_augment_keypoints__kernel_size_differs__requires_padding(self):
        from imgaug.augmentables.kps import Keypoint, KeypointsOnImage
        kps = [Keypoint(x=1.5, y=5.5), Keypoint(x=5.5, y=1.5)]
        kpsoi = KeypointsOnImage(kps, shape=(5, 6, 3))
        aug = self.augmenter(
            (iap.Deterministic(3), iap.Deterministic(2)),
            keep_size=False)

        kpsoi_aug = aug.augment_keypoints(kpsoi)

        assert kpsoi_aug.shape == (2, 3, 3)
        assert np.allclose(kpsoi_aug.to_xy_array(),
                           [[(1.5/6)*3, (5.5/5)*2],
                            [(5.5/6)*3, (1.5/5)*2]])
Ejemplo n.º 10
0
    def _test_augment_keypoints__kernel_size_differs(self, shape, shape_exp):
        from imgaug.augmentables.kps import Keypoint, KeypointsOnImage
        kps = [Keypoint(x=1.5, y=5.5), Keypoint(x=5.5, y=1.5)]
        kpsoi = KeypointsOnImage(kps, shape=shape)
        aug = self.augmenter((iap.Deterministic(3), iap.Deterministic(2)),
                             keep_size=False)

        kpsoi_aug = aug.augment_keypoints(kpsoi)

        expected = KeypointsOnImage.from_xy_array(np.float32([[
            (1.5 / shape[1]) * shape_exp[1], (5.5 / shape[0]) * shape_exp[0]
        ], [(5.5 / shape[1]) * shape_exp[1],
            (1.5 / shape[0]) * shape_exp[0]]]),
                                                  shape=shape_exp)
        assert_cbaois_equal(kpsoi_aug, expected)
def pts_2_imgaug_keypts(points, img):
    kps = []
    for point in points:
        keypoint = Keypoint(x=point[0], y=point[1])
        kps.append(keypoint)
    kpsoi = KeypointsOnImage(kps, shape=img.shape)
    return kpsoi
Ejemplo n.º 12
0
    def _augment_batch(self, images, corners):
        aug = iaa.SomeOf(
            (0, None),
            [
                iaa.KeepSizeByResize(
                    iaa.Affine(
                        translate_px={"x": (10, 30)},
                        rotate=(-5, 5),
                        mode="edge",
                        fit_output=True,
                    )),
                iaa.KeepSizeByResize(
                    iaa.Affine(shear=(-10, 10), mode="edge", fit_output=True)),
                iaa.AddToHueAndSaturation((-50, 50)),
                iaa.AverageBlur(k=(2, 5)),
            ],
            random_order=True,
        )

        # Convert array of corners to list of KeypointsOnImage instances for use with the augmenter.
        keypoints_from_corners = [
            KeypointsOnImage(
                [Keypoint(x=point[0], y=point[1]) for point in img_corners],
                shape=self.dim,
            ) for img_corners in corners
        ]

        images_augm, keypoints_augm = aug.augment(
            images=images, keypoints=keypoints_from_corners)

        # Convert augmented keypoints back to array of size (batch_size, 4, 2).
        corners_augm = np.array(
            [keypoints.to_xy_array() for keypoints in keypoints_augm])

        return images_augm, corners_augm
Ejemplo n.º 13
0
    def code_anno(self, anno):
        coded_anno = np.zeros((6, 6, 5), dtype='float32')
        origin_w = int(anno['size']['width'])
        origin_h = int(anno['size']['height'])

        xmin = int(anno['object'][0]['bndbox']['xmin']) - 1
        ymin = int(anno['object'][0]['bndbox']['ymin']) - 1
        xmax = int(anno['object'][0]['bndbox']['xmax']) - 1
        ymax = int(anno['object'][0]['bndbox']['ymax']) - 1

        c_x = ((xmax + xmin) / 2) / origin_w
        c_y = ((ymax + ymin) / 2) / origin_h
        w = (xmax - xmin) / origin_w
        h = (ymax - ymin) / origin_h

        if self.aug:
            d = KeypointsOnImage([Keypoint(c_x * 400, c_y * 400)], (400, 400))
            f**k = self.seq.augment_keypoints([d])[0]
            c_x = f**k.keypoints[0].x / 400
            c_y = f**k.keypoints[0].y / 400

        grid_x = int(np.floor(c_x * 6))
        grid_y = int(np.floor(c_y * 6))
        relative_x = c_x * 6 - grid_x
        relative_y = c_y * 6 - grid_y

        coded_anno[grid_y,
                   grid_x, :] = np.array([1, relative_x, relative_y, w, h])
        return coded_anno
Ejemplo n.º 14
0
    def convert_coco_kp_to_imgaug_kp(self, label):
        kps = []
        valid = np.ones(NUM_COCO_KEYPOINTS)
        invalid_xy = -1
        for i in range(NUM_COCO_KEYPOINTS):
            label_idx = i * NUM_COCO_KP_ATTRBS  # index for label
            # generate empty heatmap for unlabelled kp
            if label[label_idx + (NUM_COCO_KP_ATTRBS - 1)] == 0:
                # invalid keypoint
                valid[i] = 0
                kps.append(Keypoint(x=invalid_xy, y=invalid_xy))
                continue
            kpx = int(label[label_idx])
            kpy = int(label[label_idx + 1])
            kps.append(Keypoint(x=kpx, y=kpy))

        return kps, valid
Ejemplo n.º 15
0
    def to_keypoints(self):
        """Convert the BB's corners to keypoints (clockwise, from top left).

        Returns
        -------
        list of imgaug.augmentables.kps.Keypoint
            Corners of the bounding box as keypoints.

        """
        # TODO get rid of this deferred import
        from imgaug.augmentables.kps import Keypoint

        return [
            Keypoint(x=self.x1, y=self.y1),
            Keypoint(x=self.x2, y=self.y1),
            Keypoint(x=self.x2, y=self.y2),
            Keypoint(x=self.x1, y=self.y2)
        ]
Ejemplo n.º 16
0
def aug_data(seq, img, pts=None):
    if pts is None:
        return seq(image=img)
    pts_shape = pts.shape
    kps = [Keypoint(x=x[0], y=x[1]) for x in pts.reshape(-1, 2)]
    kpsoi = KeypointsOnImage(kps, shape=img.shape)
    img_auged, kpsoi_auged = seq(image=img, keypoints=kpsoi)
    res_pts = kpsoi_auged.to_xy_array().reshape(pts_shape)
    return img_auged, res_pts
Ejemplo n.º 17
0
 def np_to_keypoints(np_kpoints, image_size):
     np_kpoints = np_kpoints
     kps = [
         Keypoint(x=np_kpoints[0], y=image_size[0] - np_kpoints[1]),
         Keypoint(x=np_kpoints[2], y=image_size[0] - np_kpoints[3]),
         Keypoint(x=np_kpoints[4], y=image_size[0] - np_kpoints[5]),
         Keypoint(x=np_kpoints[6], y=image_size[0] - np_kpoints[7]),
         Keypoint(x=np_kpoints[8], y=image_size[0] - np_kpoints[9]),
         Keypoint(x=np_kpoints[10], y=image_size[0] - np_kpoints[11]),
         Keypoint(x=np_kpoints[12], y=image_size[0] - np_kpoints[13]),
         Keypoint(x=np_kpoints[14], y=image_size[0] - np_kpoints[15]),
     ]
     return kps
Ejemplo n.º 18
0
 def augment(self, img, corners, row_inc=0, col_inc=0):
     '''
     args:
         corners: [top left, bottom left, bottom right, top right] (4, 2)
         row_inc (int): changes n_row
     returns:
         aug_img
         coords: corner coords (4, 2)
     '''
     img_aug, corners_aug, row_cut_coords = self.augment_row(
         img, corners, row_inc)
     img_aug, corners_aug, col_cut_coords = self.augment_col(
         img_aug, corners_aug, col_inc)
     kpts = KeypointsOnImage([
         Keypoint(x=corners_aug[0, 0], y=corners_aug[0, 1]),
         Keypoint(x=corners_aug[1, 0], y=corners_aug[1, 1]),
         Keypoint(x=corners_aug[2, 0], y=corners_aug[2, 1]),
         Keypoint(x=corners_aug[3, 0], y=corners_aug[3, 1]),
     ],
                             shape=img_aug.shape)
     return img_aug, kpts, (row_cut_coords, col_cut_coords)
Ejemplo n.º 19
0
    def test_keypoint_alignment(self):
        from imgaug.augmentables.kps import Keypoint, KeypointsOnImage
        aug = self.augmenter((1, 2), keep_size=False)
        image = np.zeros((40, 40, 1), dtype=np.uint8)

        kps = [Keypoint(x=10, y=10), Keypoint(x=30, y=30)]
        kpsoi = KeypointsOnImage(kps, shape=image.shape)
        kpsoi_empty = KeypointsOnImage([], shape=image.shape)

        images_batch = [image, image, image, image]
        kpsoi_batch = [kpsoi, kpsoi, kpsoi_empty, kpsoi]

        nb_iterations = 10
        for _ in sm.xrange(nb_iterations):
            images_aug, kpsois_aug = aug(images=images_batch,
                                         keypoints=kpsoi_batch)

            for index in [0, 1, 3]:
                image_aug = images_aug[index]
                kpsoi_aug = kpsois_aug[index]

                assert image_aug.shape == kpsoi_aug.shape

                if image_aug.shape == (20, 20, 1):
                    assert np.allclose(
                        kpsoi_aug.to_xy_array(),
                        [[5, 5], [15, 15]]
                    )
                else:
                    assert np.allclose(
                        kpsoi_aug.to_xy_array(),
                        [[10, 10], [30, 30]]
                    )

            for index in [2]:
                image_aug = images_aug[index]
                kpsoi_aug = kpsois_aug[index]

                assert kpsoi_aug.shape == image_aug.shape
                assert len(kpsoi_aug.keypoints) == 0
Ejemplo n.º 20
0
 def tranformKeypoints(self, keypoints, augmentation, imageshape):
     imgaug_keypoints = []
     for i in range(len(keypoints)):
         imgaug_keypoints.append(
             Keypoint(x=keypoints[i, 0], y=keypoints[i, 1]))
     kpsoi = KeypointsOnImage(imgaug_keypoints, shape=imageshape)
     keypoitns_aug = augmentation(keypoints=kpsoi)
     if (isinstance(keypoints, np.ndarray)):
         keypoints[:, :2] = keypoitns_aug.to_xy_array()
     else:
         keypoints[:, :2] = torch.from_numpy(
             keypoitns_aug.to_xy_array())
     return keypoints
Ejemplo n.º 21
0
def generate_augment_dataset(dataset_dir, name_list, num_aug=500):

    for name in name_list:

        image_name = os.path.join(dataset_dir, name + '.jpg')
        json_name = os.path.join(dataset_dir, name + '.json')

        image = Image.open(image_name).convert('RGB')
        image = np.asarray(image)
        points, labels = parse_annotations(json_name)

        kps = [
            Keypoint(x=int(points[i][0]), y=int(points[i][1]))
            for i in range(len(points))
        ]
        kpsoi = KeypointsOnImage(kps, shape=image.shape)

        for j in range(num_aug):
            image_aug, kpsoi_aug = seq(image=image, keypoints=kpsoi)
            points_aug = kpsoi_aug.keypoints
            height, width, _ = image_aug.shape

            image_aug_name = os.path.join(dataset_dir,
                                          name + '_' + str(j) + '.jpg')
            json_aug_name = os.path.join(dataset_dir,
                                         name + '_' + str(j) + '.json')
            data = {}
            data['version'] = '4.2.10'
            data['flags'] = {}
            data['shapes'] = []
            for k in range(len(points_aug)):
                point_name = labels[k]
                p_x = int(points_aug[k].x)
                p_y = int(points_aug[k].y)
                data['shapes'].append({
                    'label': point_name,
                    'points': [[p_x, p_y]],
                    'group_id': None,
                    'shape_type': 'point',
                    'flags': {}
                })
            data['imagePath'] = name + str(j) + '.jpg'
            data['imageData'] = image_to_base64(image_aug)
            data['imageHeight'] = height
            data['imageWidth'] = width
            with open(json_aug_name, 'w') as outfile:
                json.dump(data, outfile, indent=4)
            image_aug = Image.fromarray(image_aug)
            image_aug.save(image_aug_name)
            print('success')
Ejemplo n.º 22
0
def quokka_keypoints(size=None, extract=None):
    """Return example keypoints on the standard example quokke image.

    The keypoints cover the eyes, ears, nose and paws.

    Added in 0.5.0. (Moved from ``imgaug.imgaug``.)

    Parameters
    ----------
    size : None or float or tuple of int or tuple of float, optional
        Size of the output image on which the keypoints are placed. If
        ``None``, then the keypoints are not projected to any new size
        (positions on the original image are used). ``float`` s lead to
        relative size changes, ``int`` s to absolute sizes in pixels.

    extract : None or 'square' or tuple of number or imgaug.augmentables.bbs.BoundingBox or imgaug.augmentables.bbs.BoundingBoxesOnImage
        Subarea to extract from the image. See :func:`~imgaug.imgaug.quokka`.

    Returns
    -------
    imgaug.augmentables.kps.KeypointsOnImage
        Example keypoints on the quokka image.

    """
    # TODO get rid of this deferred import
    from imgaug.augmentables.kps import Keypoint, KeypointsOnImage

    left, top = 0, 0
    if extract is not None:
        bb_extract = _quokka_normalize_extract(extract)
        left = bb_extract.x1
        top = bb_extract.y1
    with open(_QUOKKA_ANNOTATIONS_FP, "r") as f:
        json_dict = json.load(f)
    keypoints = []
    for kp_dict in json_dict["keypoints"]:
        keypoints.append(Keypoint(x=kp_dict["x"] - left, y=kp_dict["y"] - top))
    if extract is not None:
        shape = (bb_extract.height, bb_extract.width, 3)
    else:
        shape = (643, 960, 3)
    kpsoi = KeypointsOnImage(keypoints, shape=shape)
    if size is not None:
        shape_resized = _compute_resized_shape(shape, size)
        kpsoi = kpsoi.on(shape_resized)
    return kpsoi
Ejemplo n.º 23
0
            def getimage_FAN(self,
                             imagefile,
                             augmentations=None,
                             keypoints=None,
                             is_it_test_sample=False):

                image = cv2.cvtColor(
                    cv2.imread(
                        self.GetFullImagePath(self, imagefile,
                                              is_it_test_sample)),
                    cv2.COLOR_BGR2RGB)

                if (augmentations is not None):
                    keypoints_originalres = self.keypointsToOriginalResolution(
                        self, imagefile, keypoints)
                    imgaug_keypoints = []
                    for i in range(len(keypoints)):
                        imgaug_keypoints.append(
                            Keypoint(x=keypoints_originalres[i, 0],
                                     y=keypoints_originalres[i, 1]))
                    kpsoi = KeypointsOnImage(imgaug_keypoints,
                                             shape=image.shape)
                    image, keypoitns_aug = self.augmentations(image=image,
                                                              keypoints=kpsoi)

                    keypoints_originalres = np.column_stack(
                        (keypoitns_aug.to_xy_array(),
                         keypoints_originalres[:, 2:]))

                minx, miny, maxx, maxy = self.getFANBox(
                    self, imagefile, image.shape[1], image.shape[0],
                    is_it_test_sample)

                image = image[miny:maxy, minx:maxx, :]
                image = cv2.resize(image, dsize=(256, 256))

                if (keypoints is not None):
                    augmentedkeypoints = self.keypointsToFANResolution(
                        self, imagefile, keypoints_originalres, self.W, self.H)

                    return image, augmentedkeypoints

                return image
Ejemplo n.º 24
0
    def __data_generation(self, image_keys_temp):
        batch_images = np.empty((self.batch_size, IMG_SIZE, IMG_SIZE, 3),
                                dtype="int")
        batch_keypoints = np.empty((self.batch_size, 1, 1, NUM_KEYPOINTS),
                                   dtype="float32")

        for i, key in enumerate(image_keys_temp):
            data = get_dog(key)
            current_keypoint = np.array(data["joints"])[:, :2]
            kps = []

            # To apply our data augmentation pipeline, we first need to
            # form Keypoint objects with the original coordinates.
            for j in range(0, len(current_keypoint)):
                kps.append(
                    Keypoint(x=current_keypoint[j][0],
                             y=current_keypoint[j][1]))

            # We then project the original image and its keypoint coordinates.
            current_image = data["img_data"]
            kps_obj = KeypointsOnImage(kps, shape=current_image.shape)

            # Apply the augmentation pipeline.
            (new_image, new_kps_obj) = self.aug(image=current_image,
                                                keypoints=kps_obj)
            batch_images[i, ] = new_image

            # Parse the coordinates from the new keypoint object.
            kp_temp = []
            for keypoint in new_kps_obj:
                kp_temp.append(np.nan_to_num(keypoint.x))
                kp_temp.append(np.nan_to_num(keypoint.y))

            # More on why this reshaping later.
            batch_keypoints[i, ] = np.array(kp_temp).reshape(1, 1, 24 * 2)

        # Scale the coordinates to [0, 1] range.
        batch_keypoints = batch_keypoints / IMG_SIZE

        return (batch_images, batch_keypoints)
Ejemplo n.º 25
0
            def getimage_FAN(self,
                             imagefile,
                             augmentations=None,
                             keypoints=None,
                             is_it_test_sample=False):

                image = cv2.cvtColor(
                    cv2.imread(
                        self.GetFullImagePath(self, imagefile,
                                              is_it_test_sample)),
                    cv2.COLOR_BGR2RGB)

                if (augmentations is not None):
                    keypoints_originalres = self.keypointsToOriginalResolution(
                        self, imagefile, keypoints)
                    imgaug_keypoints = []
                    for i in range(len(keypoints)):
                        imgaug_keypoints.append(
                            Keypoint(x=keypoints_originalres[i, 0],
                                     y=keypoints_originalres[i, 1]))
                    kpsoi = KeypointsOnImage(imgaug_keypoints,
                                             shape=image.shape)
                    image, keypoitns_aug = self.augmentations(image=image,
                                                              keypoints=kpsoi)

                    keypoints_originalres = np.column_stack(
                        (keypoitns_aug.to_xy_array(),
                         keypoints_originalres[:, 2:]))

                scaledImage = self.scaleToFANRes(image=image)

                if (keypoints is not None):
                    augmentedkeypoints = self.keypointsToFANResolution(
                        self, imagefile, keypoints_originalres)

                    return scaledImage, augmentedkeypoints
                return scaledImage
Ejemplo n.º 26
0
    def augment_image(self, image, landmarks):
        """
           Generates a batch of augmented images with landmarks from image and its landmarks
           :param image: image which will be augmented
           :param landmarks: numpy coordinates ([x], [x, y] or [x, y, z]) of the landmark point of the image.
           :return: numpy array of the landmark image.
       """
        keypoints = []
        for k in list(range(0, 2 * self.landmarks_num - 1, 2)):
            keypoint = Keypoint(x=(float(landmarks[k])),
                                y=float(landmarks[k + 1]))
            keypoints.append(keypoint)

        keypoints_on_image = KeypointsOnImage(keypoints, shape=image.shape)

        images = [image for _ in range(self.batch_size)]
        keypoints_on_images = [
            keypoints_on_image for _ in range(self.batch_size)
        ]

        images_aug, kpsois_aug = self.seq(images=images,
                                          keypoints=keypoints_on_images)

        return images_aug, kpsois_aug
Ejemplo n.º 27
0
def _invert_normalize_polygons_and_line_strings(inputs, inputs_old,
                                                estimate_ntype_func,
                                                cls_single_name, cls_oi_name,
                                                get_entities_func,
                                                get_points_func):
    # TODO get rid of this deferred import
    from imgaug.augmentables.kps import Keypoint

    ntype = estimate_ntype_func(inputs_old)
    if ntype == "None":
        assert inputs is None
        return inputs
    elif ntype in ["array[float]", "array[int]", "array[uint]"]:
        input_dtype = inputs_old.dtype
        return restore_dtype_and_merge(
            [[get_points_func(entity) for entity in get_entities_func(oi)]
             for oi in inputs], input_dtype)
    elif ntype == cls_single_name:
        assert len(inputs) == 1
        assert len(get_entities_func(inputs[0])) == 1
        return get_entities_func(inputs[0])[0]
    elif ntype == cls_oi_name:
        assert len(inputs) == 1
        return inputs[0]
    elif ntype == "iterable[empty]":
        assert inputs is None
        return []
    elif ntype in [
            "iterable-array[float]", "iterable-array[int]",
            "iterable-array[uint]"
    ]:
        nonempty, _, _ = find_first_nonempty(inputs_old)
        input_dtype = nonempty.dtype
        return [
            restore_dtype_and_merge(
                [get_points_func(entity)
                 for entity in get_entities_func(oi)], input_dtype)
            for oi in inputs
        ]
    elif ntype == "iterable-tuple[number,size=2]":
        assert len(inputs) == 1
        assert len(get_entities_func(inputs[0])) == 1
        return [(point[0], point[1])
                for point in get_points_func(get_entities_func(inputs[0])[0])]
    elif ntype == "iterable-Keypoint":
        assert len(inputs) == 1
        assert len(get_entities_func(inputs[0])) == 1
        return [
            Keypoint(x=point[0], y=point[1])
            for point in get_points_func(get_entities_func(inputs[0])[0])
        ]
    elif ntype == ("iterable-%s" % (cls_single_name, )):
        assert len(inputs) == 1
        assert len(get_entities_func(inputs[0])) == len(inputs_old)
        return get_entities_func(inputs[0])
    elif ntype == ("iterable-%s" % (cls_oi_name, )):
        return inputs
    elif ntype == "iterable-iterable[empty]":
        assert inputs is None
        return inputs_old[:]
    elif ntype in [
            "iterable-iterable-array[float]", "iterable-iterable-array[int]",
            "iterable-iterable-array[uint]"
    ]:
        nonempty, _, _ = find_first_nonempty(inputs_old)
        input_dtype = nonempty.dtype
        return [[
            restore_dtype_and_merge(get_points_func(entity), input_dtype)
            for entity in get_entities_func(oi)
        ] for oi in inputs]
    elif ntype == "iterable-iterable-tuple[number,size=2]":
        assert len(inputs) == 1
        return [[(point[0], point[1]) for point in get_points_func(entity)]
                for entity in get_entities_func(inputs[0])]
    elif ntype == "iterable-iterable-Keypoint":
        assert len(inputs) == 1
        return [[
            Keypoint(x=point[0], y=point[1])
            for point in get_points_func(entity)
        ] for entity in get_entities_func(inputs[0])]
    elif ntype == ("iterable-iterable-%s" % (cls_single_name, )):
        return [get_entities_func(oi) for oi in inputs]
    elif ntype == "iterable-iterable-iterable[empty]":
        return inputs_old[:]
    elif ntype == "iterable-iterable-iterable-tuple[number,size=2]":
        return [[[(point[0], point[1]) for point in get_points_func(entity)]
                 for entity in get_entities_func(oi)] for oi in inputs]
    else:
        assert ntype == "iterable-iterable-iterable-Keypoint", (
            "Got unknown normalization type '%s'." % (ntype, ))
        return [[[
            Keypoint(x=point[0], y=point[1])
            for point in get_points_func(entity)
        ] for entity in get_entities_func(oi)] for oi in inputs]
Ejemplo n.º 28
0
def normalize_keypoints(inputs, shapes=None):
    # TODO get rid of this deferred import
    from imgaug.augmentables.kps import Keypoint, KeypointsOnImage

    shapes = _preprocess_shapes(shapes)
    ntype = estimate_keypoints_norm_type(inputs)
    _assert_exactly_n_shapes_partial = functools.partial(
        _assert_exactly_n_shapes,
        from_ntype=ntype,
        to_ntype="List[KeypointsOnImage]",
        shapes=shapes)

    if ntype == "None":
        return inputs
    elif ntype in ["array[float]", "array[int]", "array[uint]"]:
        _assert_single_array_ndim(inputs, 3, "(N,K,2)", "KeypointsOnImage")
        _assert_single_array_last_dim_exactly(inputs, 2, "KeypointsOnImage")
        _assert_exactly_n_shapes_partial(n=len(inputs))
        return [
            KeypointsOnImage.from_xy_array(attr_i, shape=shape)
            for attr_i, shape in zip(inputs, shapes)
        ]
    elif ntype == "tuple[number,size=2]":
        _assert_exactly_n_shapes_partial(n=1)
        return [
            KeypointsOnImage([Keypoint(x=inputs[0], y=inputs[1])],
                             shape=shapes[0])
        ]
    elif ntype == "Keypoint":
        _assert_exactly_n_shapes_partial(n=1)
        return [KeypointsOnImage([inputs], shape=shapes[0])]
    elif ntype == "KeypointsOnImage":
        return [inputs]
    elif ntype == "iterable[empty]":
        return None
    elif ntype in [
            "iterable-array[float]", "iterable-array[int]",
            "iterable-array[uint]"
    ]:
        _assert_many_arrays_ndim(inputs, 2, "(K,2)", "KeypointsOnImage")
        _assert_many_arrays_last_dim_exactly(inputs, 2, "KeypointsOnImage")
        _assert_exactly_n_shapes_partial(n=len(inputs))
        return [
            KeypointsOnImage.from_xy_array(attr_i, shape=shape)
            for attr_i, shape in zip(inputs, shapes)
        ]
    elif ntype == "iterable-tuple[number,size=2]":
        _assert_exactly_n_shapes_partial(n=1)
        return [
            KeypointsOnImage([Keypoint(x=x, y=y) for x, y in inputs],
                             shape=shapes[0])
        ]
    elif ntype == "iterable-Keypoint":
        _assert_exactly_n_shapes_partial(n=1)
        return [KeypointsOnImage(inputs, shape=shapes[0])]
    elif ntype == "iterable-KeypointsOnImage":
        return inputs
    elif ntype == "iterable-iterable[empty]":
        return None
    elif ntype == "iterable-iterable-tuple[number,size=2]":
        _assert_exactly_n_shapes_partial(n=len(inputs))
        return [
            KeypointsOnImage.from_xy_array(np.array(attr_i, dtype=np.float32),
                                           shape=shape)
            for attr_i, shape in zip(inputs, shapes)
        ]
    else:
        assert ntype == "iterable-iterable-Keypoint", (
            "Got unknown normalization type '%s'." % (ntype, ))
        _assert_exactly_n_shapes_partial(n=len(inputs))
        return [
            KeypointsOnImage(attr_i, shape=shape)
            for attr_i, shape in zip(inputs, shapes)
        ]
Ejemplo n.º 29
0
 def _test_augment_keypoints__kernel_size_is_noop(self, kernel_size):
     from imgaug.augmentables.kps import Keypoint, KeypointsOnImage
     kps = [Keypoint(x=1.5, y=5.5), Keypoint(x=5.5, y=1.5)]
     kpsoi = KeypointsOnImage(kps, shape=(6, 6, 3))
     self._test_augment_cbaoi__kernel_size_is_noop(kernel_size, kpsoi,
                                                   "augment_keypoints")
        (pair[1], pair[0]), zip(flipped_kps[1::2], flipped_kps[2::2]))
    # Leave nose unchanged, flip every pair of eye, ear, etc...
    return flipped_img, flipped_fixed_kps


# %% Load sample image
if __name__ == '__main__':
    image = imageio.imread('./data/Macropus_rufogriseus_rufogriseus_Bruny.jpg')
    image = ia.imresize_single_image(image, (389, 259))

    ymax = 389
    xmax = 259
    offset = 4

    kps = [
        Keypoint(x=99, y=81),  # left eye (from camera perspective)
        Keypoint(x=125, y=80),  # right eye
        Keypoint(x=112, y=102),  # nose
        Keypoint(x=102, y=210),  # left paw
        Keypoint(x=127, y=207),  # right paw
        # ***** Extrema to test out of bounds keypoints *****
        Keypoint(x=offset, y=offset),
        Keypoint(x=xmax - offset, y=offset),
        Keypoint(x=offset, y=ymax - offset),
        Keypoint(x=xmax - offset, y=ymax - offset),
        Keypoint(x=xmax * 0.5, y=ymax - offset),
        Keypoint(x=xmax * 0.9, y=ymax - offset),
        Keypoint(x=-1, y=-1),
    ]
    kpsoi = KeypointsOnImage(kps, shape=image.shape)