def __create_img_seg(self, img_obj):
        """
        Create an key-points segmentation
        for images without this info in the json file.
        Parameters:
            img_obj is json object with the following format
            {
                "id": 0,
                "width": 1001,
                "height": 709,
                "file_name": "B0049_0007.png",
                "license": 1,
                "date_captured": ""
            }
        Return:
            Keypoint list with all image background as a mask.
        """
        height = img_obj['height']
        width = img_obj['width']
        points = [
            Keypoint(x=0, y=0),
            Keypoint(x=width - 1, y=0),
            Keypoint(x=width - 1, y=height - 1),
            Keypoint(x=0, y=height - 1)
        ]

        return points
Beispiel #2
0
def get_kps_bbs(part, box, image, index):
    # Key points
    kps = KeypointsOnImage([
        Keypoint(x=int(part[0].attributes['x'].value),
                 y=int(part[0].attributes['y'].value)),
        Keypoint(x=int(part[1].attributes['x'].value),
                 y=int(part[1].attributes['y'].value)),
        Keypoint(x=int(part[2].attributes['x'].value),
                 y=int(part[2].attributes['y'].value)),
        Keypoint(x=int(part[3].attributes['x'].value),
                 y=int(part[3].attributes['y'].value)),
    ],
                           shape=image.shape)

    # Bounding box
    top = int(box[index].attributes['top'].value)
    left = int(box[index].attributes['left'].value)
    width = int(box[index].attributes['width'].value)
    height = int(box[index].attributes['height'].value)

    bbs = BoundingBoxesOnImage(
        [BoundingBox(x1=left, y1=top, x2=width + left, y2=height + top)],
        shape=image.shape)

    return kps, bbs
Beispiel #3
0
def get_image_and_keypoints(image_dict):
    (x1,y1), (x2,y2) = image_dict["A"], image_dict["B"]
    image = image_dict["image"]
    shape = image.shape[:2]
    kps = KeypointsOnImage([Keypoint(x=x1,y=y1), Keypoint(x=x2,y=y2)], shape=shape)
    heatmaps = None
    if "masks" in image_dict: # Optionally return heatmeps
        heatmaps = HeatmapsOnImage(image_dict["masks"], shape)
    return image, kps, heatmaps
def augmentation(images, annotations, distributed=False):
    height, width, _ = images[0].shape
    keypoints = [KeypointsOnImage(
        [
            Keypoint(x=0, y=annotation[0]*height),
            Keypoint(x=annotation[1]*width, y=annotation[2]*height),
            Keypoint(x=width, y=annotation[3]*height)
        ], shape=(height, width)) for annotation in annotations]

    seq = iaa.Sequential(
        [
            iaa.Fliplr(0.5),
            iaa.Sometimes(0.5, iaa.Crop(percent=(0, 0.125))),
            iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.01*255), per_channel=0.5),
            iaa.Sometimes(0.5, drop_light_shadow_generator),
            iaa.SomeOf((0, 3), [
                iaa.Multiply((0.75, 1.5), per_channel=False),
                iaa.BlendAlphaFrequencyNoise(
                    exponent=(-1, 1),
                    foreground=iaa.Multiply((0.7, 1.2)),
                    background=iaa.LinearContrast((0.75, 1.5))
                ),
                iaa.MotionBlur(k=[3, 9]),
                iaa.Add((-20, 20), per_channel=0.5),
                iaa.LinearContrast((0.75, 1.5), per_channel=0.5)
            ], random_order=True)
        ], random_order=False).to_deterministic()

    if distributed:
        data = np.asarray(Parallel(n_jobs=multiprocessing.cpu_count())(delayed(seq)(image=img, keypoints=kps) for img, kps in zip(images, keypoints)), dtype=object)
        augmented_images, augmented_keypoints = data[:, 0], data[:, 1]
    else:
        augmented_images, augmented_keypoints = seq(images=images, keypoints=keypoints)

    augmented_annotations = []
    for i, k in enumerate(augmented_keypoints):
        if k[0].x > k[2].x:  k = k[::-1]

        peak = (-1, -1)
        if annotations[i][1] == -1 and annotations[i][2] == -1:
            x, y = [k[0].x, k[2].x], [k[0].y, k[2].y]
        elif k[1].x < 0 or (k[0].y < 0 and k[1].y < 0) or (k[0].y > height and k[1].y > height):
            x, y = [k[1].x, k[2].x], [k[1].y, k[2].y]
        elif k[1].x > width or (k[1].y < 0 and k[2].y < 0) or (k[1].y > height and k[2].y > height):
            x, y = [k[0].x, k[1].x], [k[0].y, k[1].y]
        else:
            x, y = [k[0].x, k[1].x, k[2].x], [k[0].y, k[1].y, k[2].y]
            peak = (x[1]/width, np.interp(x[1], x, y)/height)
        augmented_annotation = [np.interp(0, x, y)/height, peak[0], peak[1], np.interp(width, x, y)/height]

        if augmented_annotation[0] < 0 and augmented_annotation[3] < 0:
            augmented_annotation = [0, -1, -1, 0]
        elif augmented_annotation[0] > 1 and augmented_annotation[2] > 1 and augmented_annotation[3] > 1:
            augmented_annotation = [1, -1, -1, 1]

        augmented_annotations.append(augmented_annotation)
    return augmented_images, np.asarray(augmented_annotations)
    def add_level(self, crate_images, height):
        n = 0

        num_crates_on_level = np.random.randint(2, 6)
        for i in range(num_crates_on_level):

            crate = np.copy(crate_images[n])

            if n == len(crate_images) - 1:
                n = 0
            else:
                n = n + 1

            crate_height, crate_width = crate.shape[:2]

            self.startY = self.startY - height

            if self.startY < 40:
                break

            kps = KeypointsOnImage([
                Keypoint(x=self.startX, y=self.startY),
                Keypoint(x=self.startX + crate_width, y=self.startY),
                Keypoint(x=self.startX + crate_width,
                         y=self.startY + crate_height),
                Keypoint(x=self.startX, y=self.startY + crate_height)
            ],
                                   shape=(imgH, imgW, 3))

            self.list_kps.append(kps)

            brightness_delta = np.random.randint(0, 100)
            contrast_delta = np.random.randint(-30, 30)
            crate[:, :, :3] = apply_brightness_contrast(
                crate[:, :, :3], 255 - brightness_delta, 127 - contrast_delta)

            self.img = np.zeros((imgH, imgW, 4), dtype=np.uint8)
            self.img[self.startY:self.startY + crate_height,
                     self.startX:self.startX + crate_width, :] = crate

            # self.class1=class1
            # self.class2=class2
            # self.class3=class3

            # Construct final image of the scene by superimposing: bg, img1, img2 and img3

            mask = self.img[:, :, 3]
            self.mask = np.stack([mask] * 3, -1)
            self.final = np.where(self.mask, self.img[:, :, 0:3], self.final)
Beispiel #6
0
    def augment_3D_train_data(self):
        sometimes = lambda aug: iaa.Sometimes(0.5, aug)
        for idx in range (len(self.train_imgs)):
            
            aug_img = np.zeros(shape=(512,512,3)) 
            aug_label = np.zeros(shape=(1))
        
            X = self.train_imgs[idx]
            Y = KeypointsOnImage([Keypoint(x=self.train_labels[idx][0]*512, y=self.train_labels[idx][1]*512),
                                      Keypoint(x=self.train_labels[idx][2]*512, y=self.train_labels[idx][3]*512),
                                      Keypoint(x=self.train_labels[idx][4]*512, y=self.train_labels[idx][5]*512),
                                      Keypoint(x=self.train_labels[idx][6]*512, y=self.train_labels[idx][7]*512),
                                      Keypoint(x=self.train_labels[idx][8]*512, y=self.train_labels[idx][9]*512),
                                      Keypoint(x=self.train_labels[idx][10]*512, y=self.train_labels[idx][11]*512),
                                      Keypoint(x=self.train_labels[idx][12]*512, y=self.train_labels[idx][13]*512),
                                      Keypoint(x=self.train_labels[idx][14]*512, y=self.train_labels[idx][15]*512)],
                                      shape=X.shape)

            seq2D = iaa.Sequential([
                # Small gaussian blur with random sigma between 0 and 0.5.
                # But we only blur about 50% of all images.
                iaa.Sometimes(0.5,
                    iaa.GaussianBlur(sigma=(0, 0.5))
                ),
                # Strengthen or weaken the contrast in each image.
                iaa.ContrastNormalization((0.75, 1.5)),
                # Add gaussian noise.
                # For 50% of all images, we sample the noise once per pixel.
                # For the other 50% of all images, we sample the noise per pixel AND
                # channel. This can change the color (not only brightness) of the
                # pixels.
                iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5),
                # Make some images brighter and some darker.
                # In 20% of all cases, we sample the multiplier once per channel,
                # which can end up changing the color of the images.
                iaa.Multiply((0.8, 1.2), per_channel=0.2),
                # Apply affine transformations to each image.
                # Scale/zoom them, translate/move them, rotate them and shear them.
                iaa.Affine(
                    scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
                    translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
                )
            ], random_order=True) # apply augmenters in random order

            seq2D.deterministic = True            

            aug_img, aug_label = seq2D(image=X,  keypoints=Y)    

            aug_label = [(aug_label.keypoints[0].x/512), (aug_label.keypoints[0].y/512), 
                         (aug_label.keypoints[1].x/512), (aug_label.keypoints[1].y/512),
                         (aug_label.keypoints[2].x/512), (aug_label.keypoints[2].y/512),
                         (aug_label.keypoints[3].x/512), (aug_label.keypoints[3].y/512),
                         (aug_label.keypoints[4].x/512), (aug_label.keypoints[4].y/512),
                         (aug_label.keypoints[5].x/512), (aug_label.keypoints[5].y/512),
                         (aug_label.keypoints[6].x/512), (aug_label.keypoints[6].y/512),
                         (aug_label.keypoints[7].x/512), (aug_label.keypoints[7].y/512)]
     
            self.aug_train_imgs.append(aug_img)
            self.aug_train_labels.append(aug_label)
Beispiel #7
0
        def _read_labelme(_path_list, _pos):
            for _path_list_i, name in enumerate(_path_list):
                pos = (_pos + _path_list_i)

                with open(os.path.join(self.label_path,
                                       name[:name.rfind(".")] + ".json"),
                          encoding=self.encoding) as f:
                    jdata = f.read()
                    data = json.loads(jdata)

                if self.img_path is None:
                    img64 = data['imageData']
                    img = Image.open(BytesIO(base64.b64decode(img64)))
                else:
                    img = Image.open(os.path.join(self.img_path, name))

                zoom_r = (np.array(img.size) / np.array(
                    (self.label_size[1], self.label_size[0])))
                img = _process_img(img, self.img_size)

                kps = []
                indexes = []
                for data_i in range(len(data['shapes'])):
                    label = data["shapes"][data_i]["label"]
                    if label in self.class_names:
                        index = self.class_names.index(label)
                        indexes.append(index)

                        point = np.array(data['shapes'][data_i]['points'])
                        point = point.squeeze() / zoom_r
                        kps.append(Keypoint(x=point[0], y=point[1]))
                kps = KeypointsOnImage(kps, shape=self.label_size)
                _imgaug_to_array(img, kps, pos, indexes)
Beispiel #8
0
    def __call__(self, image, keypoints):
        """
        transform the image and label together
        :param img: PIL grayscale image
        :param label: 21 landmarks
        :return: PIL grayscale image, 21 landmarks
        """

        kps = KeypointsOnImage([Keypoint(*kp) for kp in keypoints],
                               shape=image.shape)

        _max = keypoints.max(0).astype(int)
        _min = keypoints.min(0).astype(int)
        max_x = np.min([_max[0] + self.margin, image.shape[1]])
        max_y = np.min([_max[1] + self.margin, image.shape[0]])
        min_x = np.max([_min[0] - self.margin, 0])
        min_y = np.max([_min[1] - self.margin, 0])

        seq = iaa.Sequential([
            iaa.Crop(px=(min_y, image.shape[1] - max_x, image.shape[0] - max_y,
                         min_x))
        ])

        # Augment BBs and images.
        image_aug, kps_aug = seq(image=image, keypoints=kps)
        kps_fixed = np.asarray([[kp.x, kp.y] for kp in kps_aug.keypoints],
                               dtype=np.float32)
        return image_aug, kps_fixed
Beispiel #9
0
    def __call__(self, sample):
        #image, keypoints = sample['image'], sample['points']
        image, keypoints, valids = sample
        im = np.array(image)
        kpt = KeypointsOnImage([Keypoint(x, y) for x, y in keypoints],
                               shape=im.shape)

        seq = iaa.Sequential([
            iaa.Multiply((1.2, 1.5)),
            iaa.Affine(scale=(0.8, 1.2),
                       translate_px={
                           "x": (-20, 20),
                           "y": (-20, 20)
                       },
                       rotate=(5, 10),
                       shear=(1, 3)),
            iaa.LinearContrast((0.8, 1.2))
        ])

        img_aug, kpt_aug = seq(image=im, keypoints=kpt)
        keypoints_aug = None
        for i, point in enumerate(kpt_aug):
            if i == 0:
                keypoints_aug = [[point.x, point.y]]
            else:
                keypoints_aug = np.append(keypoints_aug, [[point.x, point.y]],
                                          axis=0)
        img_aug = Image.fromarray(img_aug)
        keypoints_aug = np.array(keypoints_aug)
        return img_aug, keypoints_aug, valids
Beispiel #10
0
 def __call__(self, image, keypoints):
     seq = iaa.Sequential([iaa.Fliplr(0.5)])
     kps = KeypointsOnImage([Keypoint(*kp) for kp in keypoints],
                            shape=image.shape)
     image_aug, kps_aug = seq(image=image, keypoints=kps)
     kps_fixed = np.asarray([[kp.x, kp.y] for kp in kps_aug.keypoints],
                            dtype=np.float32)
     return image_aug, kps_fixed
Beispiel #11
0
def chapter_examples_keypoints_simple():
    import imgaug as ia
    import imgaug.augmenters as iaa
    from imgaug.augmentables import Keypoint, KeypointsOnImage

    ia.seed(1)

    image = ia.quokka(size=(256, 256))
    kps = KeypointsOnImage([
        Keypoint(x=65, y=100),
        Keypoint(x=75, y=200),
        Keypoint(x=100, y=100),
        Keypoint(x=200, y=80)
    ],
                           shape=image.shape)

    seq = iaa.Sequential([
        iaa.Multiply(
            (1.2, 1.5)),  # change brightness, doesn't affect keypoints
        iaa.Affine(rotate=10, scale=(
            0.5, 0.7
        ))  # rotate by exactly 10deg and scale to 50-70%, affects keypoints
    ])

    # Augment keypoints and images.
    image_aug, kps_aug = seq(image=image, keypoints=kps)

    # print coordinates before/after augmentation (see below)
    # use after.x_int and after.y_int to get rounded integer coordinates
    for i in range(len(kps.keypoints)):
        before = kps.keypoints[i]
        after = kps_aug.keypoints[i]
        print("Keypoint %d: (%.8f, %.8f) -> (%.8f, %.8f)" %
              (i, before.x, before.y, after.x, after.y))

    # image with keypoints before/after augmentation (shown below)
    image_before = kps.draw_on_image(image, size=7)
    image_after = kps_aug.draw_on_image(image_aug, size=7)

    # ------------

    save("examples_keypoints",
         "simple.jpg",
         grid([image_before, image_after], cols=2, rows=1),
         quality=90)
Beispiel #12
0
def augment2(x, y, x_new, y_new, deg):
    seq = iaa.Sequential([
        iaa.Affine(rotate=deg)
    ])
    for dpt_x, dpt_y, dpt_x_new, dpt_y_new in zip(x, y, x_new, y_new):
        keypts = KeypointsOnImage([Keypoint(x=dpt_y[i] * img_size_cropped, y=dpt_y[i+1] * img_size_cropped) for i in range(0, 50, 2)])
        img_aug, keypts_aug = seq(image=dpt_x, keypoints=keypts)
        dpt_x_new[:,:,:] = img_aug
        dpt_y_new[:] = [t / img_size_cropped for x in [(keypt.x, keypt.y) for keypt in keypts_aug.keypoints] for t in x]
Beispiel #13
0
def generate_keypoints(points, img_shape):
    """
    From point stored in [[x coordinate],[y coordinate]] (e.g [[20],[70]]) create 
    Keypoint objects. Keypoint object is needed for imgaug library. 
    """
    key_points = []
    for point in points:
        key_points.append(Keypoint(x=point[0], y=point[1]))
    return KeypointsOnImage(key_points, img_shape)
Beispiel #14
0
    def __call__(self, image, keypoints):
        if not isinstance(image, np.ndarray):
            image = np.array(image)

        seq = iaa.Sequential([iaa.Resize(self.size)])
        kps = KeypointsOnImage([Keypoint(*kp) for kp in keypoints],
                               shape=image.shape)
        image_aug, kps_aug = seq(image=image, keypoints=kps)
        kps_fixed = np.asarray([[kp.x, kp.y] for kp in kps_aug.keypoints],
                               dtype=np.float32)
        return image_aug, kps_fixed
Beispiel #15
0
    def __getitem__(self, index):

        img = io.imread(self.img_paths[index])
        mask = io.imread(self.mask_paths[index])
        mask = mask / mask.max()
        mask = mask.astype(np.uint8)

        corners = self.csv.iloc[index]
        poly = np.zeros([5, 2])
        nodes = np.zeros([self.L, 2])
        for c in range(4):
            poly[c, 0] = np.float(corners[1 + 2 * c])
            poly[c, 1] = np.float(corners[2 + 2 * c])
        poly[4, :] = poly[0, :]
        [tck, u] = interpolate.splprep([poly[:, 0], poly[:, 1]],
                                       s=2,
                                       k=1,
                                       per=1)
        [nodes[:, 0],
         nodes[:, 1]] = interpolate.splev(np.linspace(0, 1, self.L), tck)

        sample = {
            'image': img,
            'label/segmentation': mask,
            'label/nodes': nodes
        }

        # do image augmentations
        if (self.augmentations is not None):
            orig_shape = sample['image'].shape
            aug_det = self.augmentations.to_deterministic()
            sample['image'] = aug_det.augment_image(sample['image'])

            truth = sample['label/segmentation']
            truth = ia.SegmentationMapOnImage(truth,
                                              shape=truth.shape,
                                              nb_classes=2)
            truth = aug_det.augment_segmentation_maps(
                [truth])[0].get_arr_int()[..., np.newaxis]
            sample['label/segmentation'] = truth

            if ('label/nodes' in sample.keys()):
                kp = sample['label/nodes']
                kp = KeypointsOnImage(
                    [Keypoint(x=r[1], y=r[0] - orig_shape[0]) for r in kp],
                    shape=orig_shape)
                sample['label/nodes'] = aug_det.augment_keypoints(
                    kp).to_xy_array()

        # do image normalization
        sample['image_unnormalized'] = sample['image']
        sample['image'] = self.normalization.augment_image(sample['image'])

        return sample
    def get_template_info(self, image, label, template_type=None):
        # get fore ground points from label
        points, label = self.get_points(label)
        points = [Keypoint(x=point[0], y=point[1]) for point in points]
        points = KeypointsOnImage(keypoints=points, shape=image.shape)

        # get fore ground mask from getting region from label
        mask = self.get_template_mask(
            label, template_type) if template_type else np.ones_like(image)
        mask = SegmentationMapsOnImage(arr=mask, shape=image.shape)

        return image, label, mask, points
    def next_batch(self, plotting=False):
        while True:
            (
                batch_images,
                joint_ids,
                batch_joints,
                data_items,
                sm_size,
                target_size,
            ) = self.get_batch()

            pipeline = self.build_augmentation_pipeline(height=target_size[0],
                                                        width=target_size[1],
                                                        apply_prob=0.5)

            batch_images, batch_joints = pipeline(images=batch_images,
                                                  keypoints=batch_joints)

            # If you would like to check the augmented images, script for saving
            # the images with joints on:
            if plotting:
                for i in range(self.batch_size):
                    joints = batch_joints[i]
                    kps = KeypointsOnImage(
                        [Keypoint(x=joint[0], y=joint[1]) for joint in joints],
                        shape=batch_images[i].shape,
                    )
                    im = kps.draw_on_image(batch_images[i])
                    # imageio.imwrite(data_items[i].im_path.split('/')[-1],im)
                    imageio.imwrite(
                        os.path.join(self.cfg["project_path"],
                                     str(i) + ".png"), im)

            image_shape = arr(batch_images).shape[1:3]
            batch = {Batch.inputs: arr(batch_images).astype(np.float64)}
            if self.has_gt:
                targetmaps = self.get_targetmaps_update(
                    joint_ids, batch_joints, data_items, sm_size, image_shape)
                batch.update(targetmaps)

            # if returndata:
            #        return batch_images,batch_joints,targetmaps

            batch = {
                key: data_to_input_batch(data)
                for (key, data) in batch.items()
            }
            batch[Batch.data_item] = data_items
            return batch
    def __get_img_seg_kps(self, img_seg):
        """
         Iterate every two steps due to json array with segmented
         points are in the following way: [x1,y1,x2,y2,..,..,xn,yn]
         Parameters:
             img_seg an array with segmentation points
         Return a list in the next format
            [[x1, y1], [x2, y2],...,[xn, yn]]
        """
        points = list()
        for i in range(0, len(img_seg), 2):  # iterate every two steps
            chunk = img_seg[i:i + 2]
            points.append(Keypoint(x=chunk[0], y=chunk[1]))

        return points
Beispiel #19
0
    def __getitem__(self, idx):
        try:

            image = cv2.imread(self.filename_list[idx])

            if self.mode != 'test':
                img_fname = self.filename_list[idx]
                json_fname = '.'.join(img_fname.split('.')[:-1]) + '.json'
                j = json.load(open(json_fname))
                pts = j['shapes'][0]['points']

            if self.mode == 'train':
                seq = self.train_seq
            else:
                seq = self.val_seq

            if self.mode != 'test':
                kps = KeypointsOnImage(
                    [Keypoint(x=pt[0], y=pt[1]) for pt in pts],
                    shape=image.shape)
            else:
                kps = []
            image_aug, kps_aug = seq(image=image, keypoints=kps)

            if self.mode != 'test':
                minx = min([pt.x for pt in kps_aug])
                maxx = max([pt.x for pt in kps_aug])
                target = [minx / image_aug.shape[1], maxx / image_aug.shape[1]]

                sample = {
                    'image':
                    torch.tensor(image_aug).permute(2, 0,
                                                    1).type(torch.FloatTensor),
                    'bbox':
                    torch.tensor(target).type(torch.FloatTensor)
                }
            else:
                sample = {
                    'image':
                    torch.tensor(image_aug).permute(2, 0,
                                                    1).type(torch.FloatTensor)
                }
            return sample
        except Exception as e:
            print(e)
            raise
Beispiel #20
0
    def __draw_keypoints(self, gt_img, gt_kps, pred_img, pred_kps,
                         pred_scores):
        for i in range(gt_kps.shape[0]):
            for p in gt_kps[i]:
                kp = Keypoint(*p)
                gt_img = kp.draw_on_image(gt_img, (0, 255, 255), self.alpha, 3)

        for i in range(pred_kps.shape[0]):
            if pred_scores[i] < self.score_threshold:
                continue

            for p in pred_kps[i]:
                kp = Keypoint(*p)
                pred_img = kp.draw_on_image(pred_img, (0, 255, 255),
                                            self.alpha, 3)
        return pred_img, gt_img
Beispiel #21
0
    def __call__(self, image, keypoints):
        """
        transform the image and label together
        :param img: PIL grayscale image
        :param label: 21 landmarks
        :return: PIL grayscale image, 21 landmarks
        """

        kps = KeypointsOnImage([Keypoint(*kp) for kp in keypoints],
                               shape=image.shape)

        cval = int(np.random.uniform(20, 230))

        seq = iaa.Sequential([
            iaa.Affine(rotate=self.min_max_angle, cval=cval),
        ])

        # Augment BBs and images.
        image_aug, kps_aug = seq(image=image, keypoints=kps)
        kps_fixed = np.asarray([[kp.x, kp.y] for kp in kps_aug.keypoints],
                               dtype=np.float32)
        return image_aug, kps_fixed
def augment(img,
            keypoints,
            output_dir_img,
            output_dir_kpt,
            new_idx,
            show=False,
            mode='kpt',
            depth_img=None,
            depth_output_dir_img=None):
    seq = seq_kpts if mode == 'kpt' else seq_bbox
    kps = [Keypoint(x, y) for x, y in keypoints]
    kps = KeypointsOnImage(kps, shape=img.shape)
    img_aug, kps_aug = seq(image=img, keypoints=kps)
    #seq = seq.to_deterministic()
    #depth_img_aug = seq(image=depth_img)
    vis_img_aug = img_aug.copy()
    kps_aug = kps_aug.to_xy_array().astype(int)

    for i, (u, v) in enumerate(kps_aug):
        (r, g,
         b) = colorsys.hsv_to_rgb(float(i) / keypoints.shape[0], 1.0, 1.0)
        R, G, B = int(255 * r), int(255 * g), int(255 * b)
    #     cv2.circle(vis_img_aug,(u,v),4,(R,G,B), -1)
    # if show:
    #     cv2.imshow("img", img_aug)
    #     cv2.waitKey(0)

    cv2.imwrite(os.path.join(output_dir_img, "%05d.png" % new_idx), img_aug)
    if depth_img is not None:
        cv2.imwrite(os.path.join(depth_output_dir_img, "%05d.png" % new_idx),
                    depth_img_aug)
    if mode == 'kpt':
        kps = np.array([kps_aug[0], keypoints[1]])
        # print(kps)
        np.save(os.path.join(output_dir_kpt, "%05d.npy" % new_idx), kps)
    else:  # BBOX
        process_bbox_annots(os.path.join(output_dir_kpt, "%05d.xml" % new_idx),
                            kps_aug)
Beispiel #23
0
    def __getitem__(self, index):
        img_filename = self.df.iloc[index][0]
        assert os.path.exists(img_filename), 'image file does not exists'
        image = cv2.imread(img_filename)

        center_x = self.df.iloc[index][1]
        center_y = self.df.iloc[index][2]

        #shape: height, width,  resize: width, height
        # if (self.image_shape is not None) and (image.shape[:2] != self.image_shape[:2]):
        #     center_x /= (image.shape[1] / self.image_shape[1])
        #     center_y /= (image.shape[0] / self.image_shape[0])
        #     image = cv2.resize(image, (self.image_shape[1], self.image_shape[0]))

        kps = KeypointsOnImage([
            Keypoint(x=center_x, y=center_y),
        ],
                               shape=image.shape)

        image, kps_aug = self.imgaug_iaa(image=image, keypoints=kps)
        # before = kps.keypoints[0]
        after = kps_aug.keypoints[0]
        center_x, center_y = after.x, after.y

        # print(img_filename)
        # print(center_x, center_y)

        # (H,W,C)->(C,H,W) , normalization /255, -> Pytorch Tensor
        x = transforms.ToTensor()(image)

        if not self.test_mode:
            y = np.array((center_x, center_y), dtype=np.float32)
            y = torch.from_numpy(y)
            return x, y
        else:
            return x
import imgaug as ia
from imgaug import augmenters as iaa
import numpy as np
from scipy import misc
import imageio
import cv2

import imgaug as ia
import imgaug.augmenters as iaa
from imgaug.augmentables import Keypoint, KeypointsOnImage

ia.seed(1)

image = ia.quokka(size=(256, 256))
kps = KeypointsOnImage([
    Keypoint(x=65, y=100),
    Keypoint(x=75, y=200),
    Keypoint(x=100, y=100),
    Keypoint(x=200, y=80)
],
                       shape=image.shape)

seq = iaa.Sequential([
    iaa.Multiply((1.2, 1.5)),  # change brightness, doesn't affect keypoints
    iaa.Affine(rotate=10, scale=(
        0.5,
        0.7))  # rotate by exactly 10deg and scale to 50-70%, affects keypoints
])

# Augment keypoints and images.
image_aug, kps_aug = seq(image=image, keypoints=kps)
import imgaug as ia
import imageio
import imgaug.augmenters as iaa
from imgaug.augmentables import Keypoint, KeypointsOnImage

ia.seed(1)

image = imageio.imread("input.jpg")

x1 = 125.5
y1 = 485.5
x2 = 400.5
y2 = 680.5

kps = KeypointsOnImage([
    Keypoint(x=x1, y=y1),
    Keypoint(x=x2, y=y1),
    Keypoint(x=x2, y=y2),
    Keypoint(x=x1, y=y2)
],
                       shape=image.shape)

seq = iaa.Sequential([
    iaa.Multiply((1.2, 1.5)),  # change brightness, doesn't affect keypoints
    iaa.Affine(rotate=45)
])

# Augment keypoints and images.
image_aug, kps_aug = seq(image=image, keypoints=kps)

# print coordinates before/after augmentation (see below)
    def next_batch(self, plotting=False):
        while True:
            (
                batch_images,
                joint_ids,
                batch_joints,
                data_items,
            ) = self.get_batch()

            # Scale is sampled only once (per batch) to transform all of the images into same size.
            target_size, sm_size = self.calc_target_and_scoremap_sizes()
            scale = np.mean(target_size / self.default_size)
            augmentation.update_crop_size(self.pipeline, *target_size)
            batch_images, batch_joints = self.pipeline(images=batch_images,
                                                       keypoints=batch_joints)
            batch_images = np.asarray(batch_images)
            image_shape = batch_images.shape[1:3]
            # Discard keypoints whose coordinates lie outside the cropped image
            batch_joints_valid = []
            joint_ids_valid = []
            for joints, ids in zip(batch_joints, joint_ids):
                inside = np.logical_and.reduce((
                    joints[:, 0] < image_shape[1],
                    joints[:, 0] > 0,
                    joints[:, 1] < image_shape[0],
                    joints[:, 1] > 0,
                ))
                batch_joints_valid.append(joints[inside])
                temp = []
                start = 0
                for array in ids:
                    end = start + array.size
                    temp.append(array[inside[start:end]])
                    start = end
                joint_ids_valid.append(temp)

            # If you would like to check the augmented images, script for saving
            # the images with joints on:
            if plotting:
                for i in range(self.batch_size):
                    joints = batch_joints_valid[i]
                    kps = KeypointsOnImage(
                        [Keypoint(x=joint[0], y=joint[1]) for joint in joints],
                        shape=batch_images[i].shape,
                    )
                    im = kps.draw_on_image(batch_images[i])
                    # imageio.imwrite(data_items[i].im_path.split('/')[-1],im)
                    imageio.imwrite(
                        os.path.join(self.cfg["project_path"],
                                     str(i) + ".png"), im)

            batch = {Batch.inputs: batch_images.astype(np.float64)}
            if self.has_gt:
                targetmaps = self.get_targetmaps_update(
                    joint_ids_valid,
                    batch_joints_valid,
                    data_items,
                    (sm_size[1], sm_size[0]),
                    scale,
                )
                batch.update(targetmaps)

            batch = {key: np.asarray(data) for (key, data) in batch.items()}
            batch[Batch.data_item] = data_items
            return batch
Beispiel #27
0
 def landmarks_to_kps(self, image, landmarks):
     kp_list = []
     for i in range(landmarks.shape[0]):
         kp_list.append(Keypoint(x=landmarks[i][0], y=landmarks[i][1]))
     kps = KeypointsOnImage(kp_list, shape=image.shape)
     return kps
                   speed=(0.01, 0.05))
])
Augmentations.append([augtype, seq])

for ind, imname in enumerate(Dataframe.index):
    image = imresize(imread(os.path.join('montblanc_images', imname)),
                     size=scale)
    ny, nx, nc = np.shape(image)

    kpts = []
    for i in individuals:
        for b in bodyparts:
            x, y = Dataframe.iloc[ind][scorer][i][b]['x'], Dataframe.iloc[ind][
                scorer][i][b]['y']
            if np.isfinite(x) and np.isfinite(y):
                kpts.append(Keypoint(x=x * scale, y=y * scale))

    kps = KeypointsOnImage(kpts, shape=image.shape)

    cells = []

    # image with keypoints before augmentation
    image_before = kps.draw_on_image(image,
                                     color=color,
                                     size=size,
                                     alpha=alpha)
    cells.append(image_before)

    for name, seq in Augmentations:
        image_aug, kps_aug = seq(image=image, keypoints=kps)
        image_after = kps_aug.draw_on_image(image_aug,
def main(argv=None):
    with open(FLAGS.processed_data_dir, 'r') as f:
        data_dict = json.load(f)
    Path(FLAGS.save_to_path).mkdir(parents=True, exist_ok=True)

    brightness_range = tuple([float(n) for n in FLAGS.brightness_range])
    rotation_range = tuple([float(n) for n in FLAGS.rotation_range])
    scale_range = tuple([float(n) for n in FLAGS.scale_range])
    seq = iaa.Sequential([
        iaa.Multiply(brightness_range),
        iaa.Affine(
            rotate=rotation_range,
            scale=scale_range)])

    for n in tqdm(range(FLAGS.num_augmentations+1), desc='Epochs', total=FLAGS.num_augmentations+1):
        fnm = os.path.join(FLAGS.save_to_path, 'augmentation_{}.tfrecord'.format(n))
        with tf.io.TFRecordWriter(fnm) as writer:
            for info in tqdm(data_dict.values(), desc='Pics', total=len(data_dict), leave=False):
                image = np.array(info['image']).astype(np.float32)
                key_pts = KeypointsOnImage([
                    Keypoint(info['left_eye']['center'][0], info['left_eye']['center'][1]),
                    Keypoint(info['left_eye']['inner_corner'][0], info['left_eye']['inner_corner'][1]),
                    Keypoint(info['left_eye']['outer_corner'][0], info['left_eye']['outer_corner'][1]),
                    Keypoint(info['right_eye']['center'][0], info['right_eye']['center'][1]),
                    Keypoint(info['right_eye']['inner_corner'][0], info['right_eye']['inner_corner'][1]),
                    Keypoint(info['right_eye']['outer_corner'][0], info['right_eye']['outer_corner'][1]),
                    Keypoint(info['left_eyebrow']['inner_end'][0], info['left_eyebrow']['inner_end'][1]),
                    Keypoint(info['left_eyebrow']['outer_end'][0], info['left_eyebrow']['outer_end'][1]),
                    Keypoint(info['right_eyebrow']['inner_end'][0], info['right_eyebrow']['inner_end'][1]),
                    Keypoint(info['right_eyebrow']['outer_end'][0], info['right_eyebrow']['outer_end'][1]),
                    Keypoint(info['mouth']['left_corner'][0], info['mouth']['left_corner'][1]),
                    Keypoint(info['mouth']['right_corner'][0], info['mouth']['right_corner'][1]),
                    Keypoint(info['mouth']['center_top_lip'][0], info['mouth']['center_top_lip'][1]),
                    Keypoint(info['mouth']['center_bottom_lip'][0], info['mouth']['center_bottom_lip'][1]),
                    Keypoint(info['nose']['tip'][0], info['nose']['tip'][1])
                ], shape=image.shape)

                if n == 0:
                    image_aug = image
                    key_pts_aug_arr = key_pts.to_xy_array()
                else:
                    image_aug, key_pts_aug = seq(image=image, keypoints=key_pts)
                    key_pts_aug_arr = key_pts_aug.to_xy_array()

                image_aug /= 255
                key_pts_aug_arr[:, 0] /= image_aug.shape[1]
                key_pts_aug_arr[:, 1] /= image_aug.shape[0]
                example = serialize_example(image_aug.tostring(), key_pts_aug_arr.tostring())
                writer.write(example)

    return
    def __getitem__(self, idx):
        file_name = self.files[idx]
        image = cv2.imread(file_name, cv2.IMREAD_GRAYSCALE)
        # Resize
        w, h = image.shape[1], image.shape[0]
        w_new, h_new = process_resize(w, h, self.resize)

        if self.resize_float:
            image = cv2.resize(image.astype('float32'), (w_new, h_new))
        else:
            image = cv2.resize(image, (w_new, h_new)).astype('float32')

        # extract keypoints of the original image
        kps_orig_np, descs_orig, scores_orig = self.detector.detect(image)
        kps_orig = KeypointsOnImage([Keypoint(x=kp[0], y=kp[1]) for kp in kps_orig_np], shape=image.shape)

        all_matches = []
        i = 0
        while len(all_matches) < self.min_keypoints and i < 10:

            i += 1

            image_aug1, kps_proj_aug1 = self.aug_seq(image=image, keypoints=kps_orig)
            image_aug2, kps_proj_aug2 = self.aug_seq(image=image, keypoints=kps_orig)

            kps_proj_aug_np1 = np.array([(kp.x, kp.y) for kp in kps_proj_aug1.keypoints])
            kps_proj_aug_np2 = np.array([(kp.x, kp.y) for kp in kps_proj_aug2.keypoints])

            # extract keypoints from augmented images
            kp_det_aug_np1, descs_aug1, scores_aug1 = self.detector.detect(image_aug1)
            kp_det_aug_np2, descs_aug2, scores_aug2 = self.detector.detect(image_aug2)

            # image = torch.from_numpy(image / 255.)[None].float()
            image_aug1 = torch.from_numpy(image_aug1 / 255.)[None].float()
            image_aug2 = torch.from_numpy(image_aug2 / 255.)[None].float()

            # finding GT matches
            if len(kp_det_aug_np1) < self.min_keypoints or len(kp_det_aug_np2) < self.min_keypoints:
                continue

            if len(kp_det_aug_np1.shape) != 2 or len(kp_det_aug_np2.shape) != 2 or len(kps_proj_aug_np1.shape) != 2 or len(kps_proj_aug_np2.shape) != 2:
                continue

            matches1 = self.find_matches(kps_proj_aug_np1, kp_det_aug_np1)
            matches2 = self.find_matches(kps_proj_aug_np2, kp_det_aug_np2)

            matches1 = {m[0]: m[1] for m in matches1}
            matches2 = {m[0]: m[1] for m in matches2}
            all_matches = np.array([(v, matches2[k]) for k, v in matches1.items() if k in matches2])



            # width, height = image.shape[:2]
            # corners = np.array([[0, 0], [0, height], [width, 0], [width, height]], dtype=np.float32)
            #
            # # kp1, kp2, all_matches = [], [], []
            # i = 0
            # while i < 100:

            # warp = np.random.randint(-224, 224, size=(4, 2)).astype(np.float32)
            #
            # # get the corresponding warped image
            # M = cv2.getPerspectiveTransform(corners, corners + warp)
            # warped = cv2.warpPerspective(src=image, M=M, dsize=(image.shape[1], image.shape[0]))  # return an image type
            #
            # # extract keypoints of the image pair using SIFT
            # kp1_np, descs1, scores1_np = self.detector.detect(image)
            # kp2_np, descs2, scores2_np = self.detector.detect(warped)
            #
            # # skip this image pair if no keypoints detected in image
            # if len(kp1_np) < self.min_keypoints:
            #     break
            #
            # if len(kp2_np) < self.min_keypoints:
            #     i += 1
            #     continue
            #
            # image = torch.from_numpy(image / 255.)[None].float()
            # warped = torch.from_numpy(warped / 255.)[None].float()
            #
            # # obtain the matching matrix of the image pair
            # # matched = self.matcher.match(descs1, descs2)
            # kp1_projected = cv2.perspectiveTransform(kp1_np.reshape((1, -1, 2)), M)[0, :, :]

            # dists = cdist(kp1_projected, kp2_np)
            #
            # min1 = np.argmin(dists, axis=0)
            # min2 = np.argmin(dists, axis=1)
            #
            # min1v = np.min(dists, axis=1)
            # min1f = min2[min1v < 3]
            #
            # xx = np.where(min2[min1] == np.arange(min1.shape[0]))[0]
            # matches = np.intersect1d(min1f, xx)
            #
            # missing1 = np.setdiff1d(np.arange(kp1_np.shape[0]), min1[matches])
            # missing2 = np.setdiff1d(np.arange(kp2_np.shape[0]), matches)
            #
            # MN = np.concatenate([min1[matches][np.newaxis, :], matches[np.newaxis, :]])
            # MN2 = np.concatenate([missing1[np.newaxis, :], (len(kp2)) * np.ones((1, len(missing1)), dtype=np.int64)])
            # MN3 = np.concatenate([(len(kp1)) * np.ones((1, len(missing2)), dtype=np.int64), missing2[np.newaxis, :]])
            # all_matches = MN.T  # np.concatenate([MN, MN2, MN3], axis=1).T
            # all_matches = np.unique(all_matches, axis=0)

            # if len(all_matches) < 2:
            #     continue

            # kp1_np = kp1_np.reshape((2, -1))
            # kp2_np = kp2_np.reshape((2, -1))
            # scores1_np = scores1_np.reshape((1, -1))
            # scores2_np = scores2_np.reshape((1, -1))
            # descs1 = np.pad((descs1 / 256.).T, pad_width=((0, 128), (0, 0)), constant_values=0.0)
            # descs2 = np.pad((descs2 / 256.).T, pad_width=((0, 128), (0, 0)), constant_values=0.0)

            if len(all_matches) < self.min_keypoints:
                continue

            # Padding
            pad_value = - 10 ** 6
            kp1_np, mask1 = self._pad_and_mask(kp_det_aug_np1, axis=0, val=pad_value)
            kp2_np, mask2 = self._pad_and_mask(kp_det_aug_np2, axis=0, val=pad_value)
            descs1, _ = self._pad_and_mask(descs_aug1, axis=1, val=pad_value)
            descs2, _ = self._pad_and_mask(descs_aug2, axis=1, val=pad_value)
            scores1_np = np.pad(scores_aug1, pad_width=((0, self.nfeatures - scores_aug1.shape[0]),), mode='constant',
                                constant_values=pad_value)
            scores2_np = np.pad(scores_aug2, pad_width=((0, self.nfeatures - scores_aug2.shape[0]),), mode='constant',
                                constant_values=pad_value)
            all_matches, all_matches_mask = self._pad_and_mask(all_matches, axis=0, val=0)

            # if np.random.rand(1) > 0.5:
            #     kp1_np, kp2_np = kp2_np, kp1_np
            #     descs1, descs2 = descs2, descs1
            #     scores1_np, scores2_np = scores2_np, scores1_np
            #     mask1, mask2 = mask2, mask1
            #     image, warped = warped, image
            #     all_matches[:, 0], all_matches[:, 1] = np.copy(all_matches[:, 1]), np.copy(all_matches[:, 0])

            return {
                'keypoints0': kp1_np,
                'keypoints1': kp2_np,
                'descriptors0': descs1,
                'descriptors1': descs2,
                'scores0': scores1_np,
                'scores1': scores2_np,
                'mask0': mask1,
                'mask1': mask2,
                'image0': image_aug1,
                'image1': image_aug2,
                'all_matches': all_matches,
                'all_matches_mask': all_matches_mask,
                'file_name': file_name
            }

        # image_aug1 = torch.from_numpy(image_aug1 / 255.)[None].float()
        # image_aug2 = torch.from_numpy(image_aug2 / 255.)[None].float()

        return {
            'keypoints0': np.zeros([self.nfeatures, 2]),
            'keypoints1': np.zeros([self.nfeatures, 2]),
            'descriptors0': np.zeros([self.desc_dim, self.nfeatures]),
            'descriptors1': np.zeros([self.desc_dim, self.nfeatures]),
            'scores0': np.zeros([self.nfeatures, ]),
            'scores1': np.zeros([self.nfeatures, ]),
            'mask0': np.zeros([self.nfeatures, ]),
            'mask1': np.zeros([self.nfeatures, ]),
            'all_matches': np.zeros([self.nfeatures, 2], dtype=int),
            'all_matches_mask': np.zeros([self.nfeatures, ]),
            'image0': image_aug1,
            'image1': image_aug2,
            'file_name': file_name
        }