Exemple #1
0
def image_main():
    import albumentations as A
    from image_utils import BirdDataset
    import biases

    INPUT_SIZE = 224
    NORMALIZE_MEANS = [0.485, 0.456, 0.406]
    NORMALIZE_STDS = [0.229, 0.224, 0.225]

    runlog = {}

    data_transforms = {
        'train':
        A.Compose([
            A.RandomResizedCrop(INPUT_SIZE, INPUT_SIZE),
            A.HorizontalFlip(),
            A.Normalize(NORMALIZE_MEANS, NORMALIZE_STDS),
            A.pytorch.ToTensorV2(),
        ],
                  keypoint_params=A.KeypointParams(format='xy')),
        'val':
        A.Compose([
            A.Resize(INPUT_SIZE, INPUT_SIZE),
            A.CenterCrop(INPUT_SIZE, INPUT_SIZE),
            A.Normalize(NORMALIZE_MEANS, NORMALIZE_STDS),
            A.pytorch.ToTensorV2(),
        ],
                  keypoint_params=A.KeypointParams(format='xy')),
    }

    print('\nGenerating stain...')
    train_data = BirdDataset(mode='train', transform=data_transforms['train'])
    biaser = biases.BirdBias(train_data, runlog)

    return
Exemple #2
0
    def __data_pipline(self, img, ldmarks):
        transform = None
        if self.mode == 'train':
            transform = A.Compose(
                [
                    A.Resize(height=self.output_size[0],
                             width=self.output_size[1],
                             p=1),
                    A.Crop(x_min=40,
                           y_min=0,
                           x_max=self.output_size[1] - 76,
                           y_max=self.output_size[0],
                           p=1),
                    A.HorizontalFlip(p=0.5),
                    A.VerticalFlip(p=0.5),
                    A.ToFloat(p=1),
                ],
                keypoint_params=A.KeypointParams(format='xy'))
        elif self.mode == 'test':
            transform = A.Compose(
                [
                    A.Resize(height=self.output_size[0],
                             width=self.output_size[1],
                             p=1),
                    A.Crop(x_min=40,
                           y_min=0,
                           x_max=self.output_size[1] - 76,
                           y_max=self.output_size[0],
                           p=1),
                    A.ToFloat(p=1),
                ],
                keypoint_params=A.KeypointParams(format='xy'))
        transformed = transform(image=img, keypoints=ldmarks)

        return transformed
def test_perspective_keep_size():
    h, w = 100, 100
    img = np.zeros([h, w, 3], dtype=np.uint8)
    h, w = img.shape[:2]
    bboxes = []
    for _ in range(10):
        x1 = np.random.randint(0, w - 1)
        y1 = np.random.randint(0, h - 1)
        x2 = np.random.randint(x1 + 1, w)
        y2 = np.random.randint(y1 + 1, h)
        bboxes.append([x1, y1, x2, y2])
    keypoints = [(np.random.randint(0, w), np.random.randint(0, h), np.random.random()) for _ in range(10)]

    transform_1 = A.Compose(
        [A.Perspective(keep_size=True, p=1)],
        keypoint_params=A.KeypointParams("xys"),
        bbox_params=A.BboxParams("pascal_voc", label_fields=["labels"]),
    )
    transform_2 = A.Compose(
        [A.Perspective(keep_size=False, p=1), A.Resize(h, w)],
        keypoint_params=A.KeypointParams("xys"),
        bbox_params=A.BboxParams("pascal_voc", label_fields=["labels"]),
    )

    set_seed()
    res_1 = transform_1(image=img, bboxes=bboxes, keypoints=keypoints, labels=[0] * len(bboxes))
    set_seed()
    res_2 = transform_2(image=img, bboxes=bboxes, keypoints=keypoints, labels=[0] * len(bboxes))

    assert np.allclose(res_1["bboxes"], res_2["bboxes"])
    assert np.allclose(res_1["keypoints"], res_2["keypoints"])
    def __call__(self, sample):
        assert isinstance(sample, dict)
        # first, add the object crops to the sample dict
        sample = self._generate_obj_crops(sample, self.crop_height)
        # now, for each crop, apply the seeded transform list
        output_crops, output_keypoints = [], []
        shared_seed = np.random.randint(np.iinfo(np.int32).max)

        #if self.enable_augmentation: #Might be disabled for evalution or validation.
        flip = random.choice([True, False])

        for crop_idx in range(len(sample["OBJ_CROPS"])):
            if self.shared_transform:
                np.random.seed(
                    shared_seed
                )  # the wrappers will use numpy to re-seed themselves internally

            if self.seed_wrap_augments:
                assert "POINTS" not in sample, "missing impl"
                aug_crop = self.augment_transform(
                    sample["OBJ_CROPS"][crop_idx])
            else:
                aug_crop = self.augment_transform(
                    image=sample["OBJ_CROPS"][crop_idx],
                    keypoints=sample["POINTS"][crop_idx],
                    # the "xy" format somehow breaks when we have 2-coord kpts, this is why we pad to 4...
                    keypoint_params=albumentations.KeypointParams(
                        format="xysa", remove_invisible=False),
                )

                if self.sync_hflip and flip:
                    aug_crop = self.sync_hflip_transform(
                        image=aug_crop["image"],
                        keypoints=aug_crop["keypoints"],
                        keypoint_params=albumentations.KeypointParams(
                            format="xysa", remove_invisible=False))
                output_keypoints.append(aug_crop["keypoints"])

            output_crops.append(
                self.convert_transform(PIL.Image.fromarray(aug_crop["image"])))
        sample["OBJ_CROPS"] = output_crops
        # finally, scrap the dumb padding around the 2d keypoints
        sample["POINTS"] = [
            pts
            for pts in np.asarray(output_keypoints)[..., :2].astype(np.float32)
        ]
        if self.drop_orig_image:
            del sample["IMAGE"]
            del sample["CENTROID_2D_IM"]
        return sample
def main():
    image = cv2.imread("images/image_1.jpg")

    keypoints = cv2.goodFeaturesToTrack(
        cv2.cvtColor(image, cv2.COLOR_RGB2GRAY), maxCorners=100, qualityLevel=0.5, minDistance=5
    ).squeeze(1)

    bboxes = [(kp[0] - 10, kp[1] - 10, kp[0] + 10, kp[1] + 10) for kp in keypoints]

    disp_image = visualize(image, keypoints, bboxes)
    plt.figure(figsize=(10, 10))
    plt.imshow(cv2.cvtColor(disp_image, cv2.COLOR_RGB2BGR))
    plt.tight_layout()
    plt.show()

    aug = A.Compose(
        [A.ShiftScaleRotate(scale_limit=0.1, shift_limit=0.2, rotate_limit=10, always_apply=True)],
        bbox_params=A.BboxParams(format="pascal_voc", label_fields=["bbox_labels"]),
        keypoint_params=A.KeypointParams(format="xy"),
    )

    for _i in range(10):
        data = aug(image=image, keypoints=keypoints, bboxes=bboxes, bbox_labels=np.ones(len(bboxes)))

        aug_image = data["image"]
        aug_image = visualize(aug_image, data["keypoints"], data["bboxes"])

        plt.figure(figsize=(10, 10))
        plt.imshow(cv2.cvtColor(aug_image, cv2.COLOR_RGB2BGR))
        plt.tight_layout()
        plt.show()
Exemple #6
0
def random_rotate_90(image: np.ndarray, annotations: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
    image_height, image_width = image.shape[:2]

    boxes = annotations[:, :4]
    keypoints = annotations[:, 4:-1].reshape(-1, 2)
    labels = annotations[:, -1:]

    invalid_index = keypoints.sum(axis=1) == -2

    keypoints[:, 0] = np.clip(keypoints[:, 0], 0, image_width - 1)
    keypoints[:, 1] = np.clip(keypoints[:, 1], 0, image_height - 1)

    keypoints[invalid_index] = 0

    category_ids = list(range(boxes.shape[0]))

    transform = albu.Compose(
        [albu.RandomRotate90(p=1)],
        keypoint_params=albu.KeypointParams(format="xy"),
        bbox_params=albu.BboxParams(format="pascal_voc", label_fields=["category_ids"]),
    )
    transformed = transform(
        image=image, keypoints=keypoints.tolist(), bboxes=boxes.tolist(), category_ids=category_ids
    )

    keypoints = np.array(transformed["keypoints"])
    keypoints[invalid_index] = -1

    keypoints = keypoints.reshape(-1, 10)
    boxes = np.array(transformed["bboxes"])
    image = transformed["image"]

    annotations = np.hstack([boxes, keypoints, labels])

    return image, annotations
Exemple #7
0
 def __init__(self, tfms: Sequence[A.BasicTransform]):
     self.bbox_params = A.BboxParams(format="pascal_voc",
                                     label_fields=["labels"])
     self.keypoint_params = A.KeypointParams(format="xy")
     super().__init__(tfms=A.Compose(tfms,
                                     bbox_params=self.bbox_params,
                                     keypoint_params=self.keypoint_params))
Exemple #8
0
    def setup_keypoints(self, record_component):
        self.adapter._compose_kwargs["keypoint_params"] = A.KeypointParams(
            format="xy",
            remove_invisible=False,
            label_fields=["keypoints_labels"])

        # not compatible with some transforms
        flat_tfms_list_ = _flatten_tfms(self.adapter.tfms_list)
        if get_transform(flat_tfms_list_,
                         "RandomSizedBBoxSafeCrop") is not None:
            raise RuntimeError(
                "RandomSizedBBoxSafeCrop is not supported for keypoints")

        self._kpts = record_component.keypoints
        self._kpts_xy = [xy for o in self._kpts for xy in o.xy]
        self._kpts_labels = [
            label for o in self._kpts for label in o.metadata.labels
        ]
        self._kpts_visible = [
            visible for o in self._kpts for visible in o.visible
        ]
        assert len(self._kpts_xy) == len(self._kpts_labels) == len(
            self._kpts_visible)

        self.adapter._albu_in["keypoints"] = self._kpts_xy
        self.adapter._albu_in["keypoints_labels"] = self._kpts_labels

        self.adapter._collect_ops.append(CollectOp(self.collect))
Exemple #9
0
def build_transform(cfg):
    tfs = []

    for icfg in cfg:
        if TRANSFORMS.get(icfg.get('type')) is not None:
            tf = build_from_cfg(icfg, TRANSFORMS)
        elif hasattr(alb, icfg.get('type')):
            if icfg.get('interpolation') and icfg.get(
                    'interpolation') in CV2_MODE:
                icfg['interpolation'] = CV2_MODE[icfg.get('interpolation')]
            if icfg.get('border_mode') and icfg.get(
                    'border_mode') in CV2_BORDER_MODE:
                icfg['border_mode'] = CV2_BORDER_MODE[icfg.get('border_mode')]
            tf = build_from_cfg(icfg, alb, mode='module')

        else:
            raise AttributeError(f"Invalid class {icfg.get('type')}")

        tfs.append(tf)

    aug = alb.Compose(
        transforms=tfs,
        p=1,
        keypoint_params=alb.KeypointParams(format='xy',
                                           remove_invisible=False),
    )

    return aug
    def __init__(self, root_dir, is_train):
        super(FaceDataset, self).__init__()

        #self.local_rank = local_rank
        self.is_train = is_train
        self.input_size = 256
        self.num_kps = 68
        transform_list = []
        if is_train:
            transform_list += \
                [
                    A.ColorJitter(brightness=0.8, contrast=0.5, p=0.5),
                    A.ToGray(p=0.1),
                    A.ISONoise(p=0.1),
                    A.MedianBlur(blur_limit=(1,7), p=0.1),
                    A.GaussianBlur(blur_limit=(1,7), p=0.1),
                    A.MotionBlur(blur_limit=(5,12), p=0.1),
                    A.ImageCompression(quality_lower=50, quality_upper=90, p=0.05),
                    A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.2, rotate_limit=40, interpolation=cv2.INTER_LINEAR,
                        border_mode=cv2.BORDER_CONSTANT, value=0, mask_value=0, p=0.8),
                    A.HorizontalFlip(p=0.5),
                    RectangleBorderAugmentation(limit=0.33, fill_value=0, p=0.2),
                ]
        transform_list += \
            [
                A.geometric.resize.Resize(self.input_size, self.input_size, interpolation=cv2.INTER_LINEAR, always_apply=True),
                A.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
                ToTensorV2(),
            ]
        self.transform = A.ReplayCompose(transform_list,
                                         keypoint_params=A.KeypointParams(
                                             format='xy',
                                             remove_invisible=False))
        self.root_dir = root_dir
        with open(osp.join(root_dir, 'annot.pkl'), 'rb') as f:
            annot = pickle.load(f)
            self.X, self.Y = annot
        train_size = int(len(self.X) * 0.99)

        if is_train:
            self.X = self.X[:train_size]
            self.Y = self.Y[:train_size]
        else:
            self.X = self.X[train_size:]
            self.Y = self.Y[train_size:]
        #if local_rank==0:
        #    logging.info('data_transform_list:%s'%transform_list)
        flip_parts = ([1, 17], [2, 16], [3, 15], [4, 14], [5, 13], [6, 12],
                      [7, 11], [8, 10], [18, 27], [19, 26], [20, 25], [21, 24],
                      [22, 23], [32, 36], [33, 35], [37, 46], [38, 45],
                      [39, 44], [40, 43], [41, 48], [42, 47], [49,
                                                               55], [50, 54],
                      [51, 53], [62, 64], [61, 65], [68, 66], [59,
                                                               57], [60, 56])
        self.flip_order = np.arange(self.num_kps)
        for pair in flip_parts:
            self.flip_order[pair[1] - 1] = pair[0] - 1
            self.flip_order[pair[0] - 1] = pair[1] - 1
        logging.info('len:%d' % len(self.X))
        print('!!!len:%d' % len(self.X))
    def __init__(self, width=512, height=512):
        super(ImgAugment_Albumentations, self).__init__()
        self.width = width
        self.height = height
        # Set data augmentation name, check its github  for more data augmentation methods.
        self.transform_train = A.Compose(
            [
                # A.PadIfNeeded(min_height=1080, min_width=1980, border_mode=1, p=0.6),
                # A.IAACropAndPad(percent=(-0.8, 0.8), p=0.6),
                # A.OneOf([A.RandomScale(scale_limit=(0.005, 0.5), p=0.3),
                #          A.RandomScale(scale_limit=(0.1, 1), p=0.2),
                #          A.RandomScale(scale_limit=(1, 3), p=0.3)]),

                # A.RandomResizedCrop(height, width),
                A.Rotate(limit=60, p=0.3),
                # A.IAAAffine(p=0.3),
                # A.IAAPerspective(p=0.5),
                # A.OneOf([
                #     A.IAAAdditiveGaussianNoise(p=0.5),
                #     A.GaussNoise(p=0.6),
                # ]),
                A.Resize(height, width)
            ],
            bbox_params=A.BboxParams(format='pascal_voc',
                                     min_area=256,
                                     min_visibility=0.5,
                                     label_fields=["bbox_classes"]),
            keypoint_params=A.KeypointParams(
                format='xy',
                remove_invisible=False,
                label_fields=["keypoints_classes"]))
        self.augment_imgaug = ImgAugTransform()
def test_compare_crop_and_pad(img_dtype, px, percent, pad_mode, pad_cval, keep_size):
    h, w, c = 100, 100, 3
    mode_mapping = {
        cv2.BORDER_CONSTANT: "constant",
        cv2.BORDER_REPLICATE: "edge",
        cv2.BORDER_REFLECT101: "reflect",
        cv2.BORDER_WRAP: "wrap",
    }
    pad_mode_iaa = mode_mapping[pad_mode]

    bbox_params = A.BboxParams(format="pascal_voc")
    keypoint_params = A.KeypointParams(format="xy", remove_invisible=False)

    keypoints = np.random.randint(0, min(h, w), [10, 2])

    bboxes = []
    for i in range(10):
        x1, y1 = np.random.randint(0, min(h, w) - 2, 2)
        x2 = np.random.randint(x1 + 1, w - 1)
        y2 = np.random.randint(y1 + 1, h - 1)
        bboxes.append([x1, y1, x2, y2, 0])

    transform_albu = A.Compose(
        [
            A.CropAndPad(
                px=px,
                percent=percent,
                pad_mode=pad_mode,
                pad_cval=pad_cval,
                keep_size=keep_size,
                p=1,
                interpolation=cv2.INTER_AREA
                if (px is not None and px < 0) or (percent is not None and percent < 0)
                else cv2.INTER_LINEAR,
            )
        ],
        bbox_params=bbox_params,
        keypoint_params=keypoint_params,
    )
    transform_iaa = A.Compose(
        [A.IAACropAndPad(px=px, percent=percent, pad_mode=pad_mode_iaa, pad_cval=pad_cval, keep_size=keep_size, p=1)],
        bbox_params=bbox_params,
        keypoint_params=keypoint_params,
    )

    if img_dtype == np.uint8:
        img = np.random.randint(0, 256, (h, w, c), dtype=np.uint8)
    else:
        img = np.random.random((h, w, c)).astype(img_dtype)

    res_albu = transform_albu(image=img, keypoints=keypoints, bboxes=bboxes)
    res_iaa = transform_iaa(image=img, keypoints=keypoints, bboxes=bboxes)

    for key, item in res_albu.items():
        if key == "bboxes":
            bboxes = np.array(res_iaa[key])
            h = bboxes[:, 3] - bboxes[:, 1]
            w = bboxes[:, 2] - bboxes[:, 0]
            res_iaa[key] = bboxes[(h > 0) & (w > 0)]
        assert np.allclose(item, res_iaa[key]), f"{key} are not equal"
Exemple #13
0
    def sequence_augmentation(self,
                              p_apply=0.5,
                              limit_rotation=40,
                              limit_translation=0.1,
                              limit_scale=(-0.2, 0.2)):
        if self.rand_choice == 1:
            augm = A.Lambda(image=self.aug_dilate, keypoint=self.aug_keypoints)
        elif self.rand_choice == 2:
            augm = A.Lambda(image=self.aug_erode, keypoint=self.aug_keypoints)
        else:
            augm = A.NoOp()
        transform = A.Compose([
            A.Lambda(image=self.aug_morph_close,
                     keypoint=self.aug_keypoints,
                     p=1.0), augm,
            A.Downscale(scale_min=0.5,
                        scale_max=0.9,
                        p=p_apply,
                        interpolation=cv2.INTER_NEAREST_EXACT),
            A.ShiftScaleRotate(limit_translation,
                               limit_scale,
                               limit_rotation,
                               p=p_apply,
                               border_mode=cv2.BORDER_REFLECT101,
                               value=-1.0)
        ],
                              additional_targets={
                                  'image1': 'image',
                                  'image2': 'image'
                              },
                              keypoint_params=A.KeypointParams(
                                  "xy", remove_invisible=False))

        return transform
Exemple #14
0
def augmentation(p_apply=0.5,
                 limit_rotation=40,
                 limit_translation=0.1,
                 limit_scale=(-0.2, 0.2)):
    transform = A.Compose(
        [
            A.Lambda(image=aug_morph_close, keypoint=aug_keypoints, p=1.0),
            A.OneOf([
                A.Lambda(image=aug_dilate, keypoint=aug_keypoints),
                A.Lambda(image=aug_erode, keypoint=aug_keypoints),
                A.NoOp()
            ],
                    p=p_apply),
            # A.Lambda(image=aug_erode_or_dilate, keypoint=aug_keypoints, p=p_apply),
            A.Downscale(scale_min=0.5,
                        scale_max=0.9,
                        p=p_apply,
                        interpolation=cv2.INTER_NEAREST_EXACT),
            A.ShiftScaleRotate(limit_translation,
                               limit_scale,
                               limit_rotation,
                               p=p_apply,
                               border_mode=cv2.BORDER_REFLECT101,
                               value=-1.0),
            A.Lambda(image=cropout, keypoint=aug_keypoints, p=p_apply),
        ],
        keypoint_params=A.KeypointParams("xy", remove_invisible=False))

    return transform
Exemple #15
0
def main():
    root = r'D:\DB\express-data\images'
    name = '001_4304989519209_20200403_140644412.jpg'
    image = cv2.imread(os.path.join(root, name))
    lines = parse_txt(
        os.path.join(r'D:\DB\express-data\train_gts', name + '.txt'))
    polys = [line[:-1] for line in lines]
    aug1 = albumentations.HorizontalFlip(p=1)
    aug2 = albumentations.Rotate(limit=360,
                                 border_mode=cv2.BORDER_CONSTANT,
                                 value=0,
                                 p=1)

    new_loc = []
    for i in range(0, len(polys[0]), 2):
        new_loc.append((polys[0][i], polys[0][i + 1]))
    ceshi = dict(image=image, keypoints=new_loc)
    out = albumentations.Compose(
        [aug1, aug2],
        p=1,
        keypoint_params=albumentations.KeypointParams(format='xy'))(**ceshi)
    out = aug1(image=image, keypoints=new_loc)
    # params = aug1.get_params()
    # params = {'col': 486, 'rows': 917}
    # image = aug1.apply(image, **params)
    # new_loc = aug1.apply_to_keypoint(keypoint=new_loc, **params)

    img = image
    cv2.polylines(img, [np.array(new_loc).reshape(-1, 1, 2)], True,
                  (0, 255, 0))
    cv2.imshow('im', img)
    cv2.waitKey()
    # out = aug(image=image, keypoints=polys[0])
    # augment_and_show(aug, image)
    print('done')
def test_symmetric(augmentation_cls, params, keypoints, result_keypoints):
    img = np.zeros([100, 100, 3], dtype=np.uint8)
    aug = A.Compose([augmentation_cls(**params, p=1)],
                    keypoint_params=A.KeypointParams("xy"))

    res = aug(image=img, keypoints=keypoints)

    assert np.allclose(result_keypoints, res["keypoints"])
Exemple #17
0
def valid_transformers(img_size):
    return A.Compose(
        [
            A.CenterCrop(img_size, img_size, p=1.0),
            # A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1.0),
        ],
        bbox_params=A.BboxParams(format='pascal_voc',
                                 label_fields=['category_ids']),
        keypoint_params=A.KeypointParams('xy'))
Exemple #18
0
def get_transform(train=True):
    if train:
        # here we will add some augmentation for training
        return A.Compose([
            A.Resize(width=224, height=336, p=1.0),
            A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
            ToTensorV2(),
        ],
                         keypoint_params=A.KeypointParams(
                             format='xy', remove_invisible=True))

    return A.Compose([
        A.Resize(width=224, height=336, p=1.0),
        A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
        ToTensorV2(),
    ],
                     keypoint_params=A.KeypointParams(format='xy',
                                                      remove_invisible=True))
Exemple #19
0
    def __data_pipline(self, img, ldmarks):
        # Convert RGB to BGR
        transform = None
        if self.mode == 'train':
            transform = A.Compose(
                [
                    A.Resize(height=self.output_size[0],
                             width=self.output_size[1],
                             p=1),  # /8--->(356, 536)
                    A.Crop(x_min=40,
                           y_min=0,
                           x_max=self.output_size[1] - 76,
                           y_max=self.output_size[0],
                           p=1),
                    # A.CLAHE(p=1),
                    A.HorizontalFlip(p=0.5),
                    A.VerticalFlip(p=0.5),
                    A.ToFloat(p=1),  # (0 ~ 1)
                    # A.Normalize(max_pixel_value=1, p=1)
                ],
                keypoint_params=A.KeypointParams(format='xy'))
        elif self.mode == 'test':
            # import random
            # random.seed(2020)
            transform = A.Compose(
                [
                    A.Resize(height=self.output_size[0],
                             width=self.output_size[1],
                             p=1),  # /8--->(356, 536)
                    A.Crop(x_min=40,
                           y_min=0,
                           x_max=self.output_size[1] - 76,
                           y_max=self.output_size[0],
                           p=1),
                    # (356, 460)
                    # A.CLAHE(p=1),
                    A.ToFloat(p=1),  # (0 ~ 1)
                    # A.Normalize(max_pixel_value=1, p=1)
                ],
                keypoint_params=A.KeypointParams(format='xy'))
        transformed = transform(image=img, keypoints=ldmarks)

        return transformed
def get_validation_augmentations(size=(256, 256), with_keypoints=False):
    h, w = size
    test_transform = [
        ResizeWithKp(h, w),
    ]
    if with_keypoints:
        return albu.Compose(test_transform,
                            keypoint_params=albu.KeypointParams(
                                format='xy', remove_invisible=False))
    else:
        return albu.Compose(test_transform)
Exemple #21
0
 def __init__(self, tfms: Sequence[A.BasicTransform]):
     self.tfms_list = tfms
     self.bbox_params = A.BboxParams(format="pascal_voc",
                                     label_fields=["labels"])
     self.keypoint_params = A.KeypointParams(
         format="xy",
         remove_invisible=False,
         label_fields=["keypoints_labels"])
     super().__init__(tfms=A.Compose(tfms,
                                     bbox_params=self.bbox_params,
                                     keypoint_params=self.keypoint_params))
Exemple #22
0
 def __call__(self, input_sample):
     result = albu.Compose(self.transformations,
                           p=1,
                           keypoint_params=albu.KeypointParams(
                               format='xy', remove_invisible=False))(
                                   image=input_sample['image'],
                                   keypoints=input_sample['landmarks'])
     sample = {
         'image': np.array(result['image']),
         'landmarks': torch.as_tensor(np.array(result['keypoints']))
     }
     return sample
Exemple #23
0
def transformers(img_size):
    return A.Compose(
        [
            ## A.RandomResizedCrop(img_size, img_size),
            A.CenterCrop(img_size, img_size, p=1.0),
            A.HorizontalFlip(p=1.0),
            A.ShiftScaleRotate(p=1.0),
            A.ColorJitter(p=1.0),
            A.Cutout(p=1.0),
        ],
        bbox_params=A.BboxParams('pascal_voc', label_fields=['category_ids']),
        keypoint_params=A.KeypointParams('xy'))
Exemple #24
0
    def process(self, raw_data, input_data, ground_truth, piped_params=None):
        if isinstance(raw_data, list):
            # TODO: Use the t-1 frame
            _, input_data, ground_truth, _ = super().process(raw_data[0], input_data, ground_truth, piped_params)
            assert(False and "Not implemented yet")
        else:
            _, input_data, ground_truth, _ = super().process(raw_data, input_data, ground_truth, piped_params)
             # make input_data a dict since we will have multiple input channels
            input_data = { "img": input_data }

            # create previous heatmap
            prev_heatmap = self.gen_prev_heatmap((ground_truth.shape[0], ground_truth.shape[1]), piped_params["gt_2d_info"], piped_params["roi"])

            # copy as prev image
            prev_img = input_data["img"].copy()

            # Translate and scale the image as well as the heatmap to mimic a t-1 frame and add to input_data
            trans = A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.15, rotate_limit=0, always_apply=True, border_mode=cv2.BORDER_CONSTANT)
            transform = A.ReplayCompose(
                [trans],
                keypoint_params=A.KeypointParams(format='xy', remove_invisible=False),
                additional_targets={'prev_heatmap': 'image'}
            )
            keypoints = list(np.array(piped_params["gt_2d_info"])[:,:2] * self.params.R)
            transformed = transform(image=prev_img, prev_heatmap=prev_heatmap, keypoints=keypoints)
            input_data["prev_img"] = transformed["image"]
            input_data["prev_heatmap"] = transformed["prev_heatmap"]

            # Add to ground_truth: Regression param offset to t-1 object using the scale and dx, dy changes from the previous image transformation
            # applied_params = transformed["replay"]["transforms"][0]["params"]
            # scale = applied_params["scale"]
            # shift_x = applied_params["dx"] * prev_img.shape[0]
            # shift_y = applied_params["dy"] * prev_img.shape[0]

            for i, [center_x, center_y, width, height] in enumerate(piped_params["gt_2d_info"]):
                if self.params.REGRESSION_FIELDS["track_offset"].active:
                    prev_center_x = transformed["keypoints"][i][0]
                    prev_center_y = transformed["keypoints"][i][1]

                    if prev_center_x < 0 or prev_center_x > prev_img.shape[1] or prev_center_y < 0 or prev_center_y > prev_img.shape[0]:
                        # previous center is outside of heatmap bounds, set offset to 0
                        offset_x = 0
                        offset_y = 0
                    else:
                        offset_x = prev_center_x - (center_x * self.params.R)
                        offset_y = prev_center_y - (center_y * self.params.R)
                    ground_truth[center_y][center_x][:][self.params.start_idx("track_offset"):self.params.end_idx("track_offset")] = [offset_x, offset_y]
                else:
                    assert(False and "I mean, why even use CenterTracker when we dont regress the t-1 track offset?")

        return raw_data, input_data, ground_truth, piped_params
 def __init__(self, names, image_dir, point_dir, transform, cache_size,
              patch_size, inter_dist):
     #store filenames
     self.image_names = [os.path.join(image_dir, f + '.jpg') for f in names]
     self.point_names = [os.path.join(point_dir, f + '.txt') for f in names]
     self.transform = transform
     self.random_crop = A.Compose(
         [A.RandomCrop(*patch_size)],
         keypoint_params=A.KeypointParams(format='xy'))
     self.det_crop = A.Compose(
         [A.Crop()], keypoint_params=A.KeypointParams(format='xy'))
     self.patch_size = patch_size
     self.center = (patch_size[1] // 2, patch_size[0] // 2)
     self.inter_dist = inter_dist
     self._cache_patch = []
     self._cache_points = []
     self._cache_size = cache_size
     self.start_index = 0
     # fill cache
     self.update_n_samples = 20
     for i in range(
         (cache_size + self.update_n_samples - 1) // self.update_n_samples):
         self.update_cache()
Exemple #26
0
    def transform_from_dict(self, **kwargs):
        if 'transforms' in kwargs:
            kwargs['transforms'] = [
                obj_from_dict(transform, A)
                for transform in kwargs['transforms']
            ]
        if 'bbox_params' in kwargs:
            kwargs['bbox_params'] = A.BboxParams(**kwargs['bbox_params'])
        if 'keypoint_params' in kwargs:
            kwargs['keypoint_params'] = A.KeypointParams(
                **kwargs['keypoint_params'])

        transform = A.Compose(**kwargs)
        return transform
def test_coarse_dropout():
    aug = A.Compose(
        [
            A.CoarseDropout(min_holes=1,
                            max_holes=1,
                            min_height=128,
                            max_width=128,
                            min_width=128,
                            max_height=128,
                            p=1)
        ],
        keypoint_params=A.KeypointParams(format="xy"),
    )

    result = aug(image=np.zeros((128, 128)), keypoints=((10, 10), (20, 30)))
    assert len(result["keypoints"]) == 0
Exemple #28
0
    def get_aug(self):
        """## Defining Augmentations"""

        MUL_AUGMENTATION = A.Compose([
            A.HorizontalFlip(p=0.7),
            A.OneOf([
                A.HueSaturationValue(p=0.5),
                A.RGBShift(p=0.7),
                A.Rotate(limit=10, p=0.5),
                A.ShiftScaleRotate(rotate_limit=10, shift_limit=0, p=0.5),
            ], p=1),
            A.Blur(p=0.3),
            A.ChannelShuffle(p=0.3),
            A.RandomBrightnessContrast(p=0.5)
        ], keypoint_params=A.KeypointParams(format='xy'))

        return MUL_AUGMENTATION
 def __init__(self,
              transform=A.Compose([
                  A.Rotate(always_apply=False,
                           p=0.3,
                           limit=(-30, 30),
                           interpolation=0,
                           border_mode=1,
                           value=(0, 0, 0),
                           mask_value=None),
                  A.RandomBrightnessContrast(p=0.2),
                  A.GaussianBlur(p=0.2),
                  A.HorizontalFlip(p=0.5)
              ],
                                  keypoint_params=A.KeypointParams(
                                      format='xy',
                                      remove_invisible=False))):
     self.transform = transform
Exemple #30
0
def train_transformers(img_size):
    return A.Compose(
        [
            A.RandomResizedCrop(img_size, img_size),
            # A.Transpose(p=0.5),
            A.HorizontalFlip(p=0.5),
            # A.VerticalFlip(p=0.5),
            A.ShiftScaleRotate(p=0.5),
            # A.HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.5),
            A.ColorJitter(p=0.5),
            A.RandomBrightnessContrast(brightness_limit=(-0.1, 0.1),
                                       contrast_limit=(-0.1, 0.1),
                                       p=0.5),
            # A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1.0),
            A.Cutout(p=0.5),
        ],
        bbox_params=A.BboxParams('pascal_voc', label_fields=['category_ids']),
        keypoint_params=A.KeypointParams('xy'))