Esempio n. 1
0
def test_different_crop_modes(crop_mode, img_2x2, mask_2x2):
    if crop_mode == "d":
        with pytest.raises(ValueError):
            slt.Crop(crop_to=2, crop_mode=crop_mode)
    else:
        stream = slc.Stream([slt.Pad(pad_to=20), slt.Crop(crop_to=2, crop_mode=crop_mode),])
        img, mask = img_2x2, mask_2x2
        dc = slc.DataContainer((img, mask,), "IM")
        dc_res = stream(dc, return_torch=False)

        for el in dc_res.data:
            assert el.shape[0] == 2
            assert el.shape[1] == 2
Esempio n. 2
0
    def __init__(self, crop_size, img_size=256):
        super(Crop, self).__init__(img_size)

        self.crop_size = crop_size

        self.solt_pipeline = slt.Crop(crop_size, crop_mode="r")

        self.albumentations_pipeline = albu.Compose([
            albu.RandomCrop(height=crop_size, width=crop_size),
            ToTensor(normalize={
                "mean": [0.485, 0.456, 0.406],
                "std": [0.229, 0.224, 0.225]
            }),
        ])

        self.torchvision_pipeline = tv_transforms.Compose([
            tv_transforms.RandomCrop(crop_size),
            tv_transforms.ToTensor(),
            tv_transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                    std=[0.229, 0.224, 0.225]),
        ])

        _augm_ppl = augmentor.Pipeline()

        _augm_ppl.crop_random(probability=1,
                              percentage_area=crop_size / float(self.img_size))
        self.augmentor_pipeline = tv_transforms.Compose([
            _augm_ppl.torch_transform(),
            tv_transforms.transforms.ToTensor(),
            tv_transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                    std=[0.229, 0.224, 0.225]),
        ])
Esempio n. 3
0
def test_2x2_pad_to_20x20_center_crop_2x2(pad_size, crop_size, img_2x2,
                                          mask_2x2):
    # Setting up the data
    kpts_data = np.array([[0, 0], [0, 1], [1, 1], [1, 0]]).reshape((4, 2))
    kpts = slc.Keypoints(kpts_data, 2, 2)
    img, mask = img_2x2, mask_2x2

    dc = slc.DataContainer((
        img,
        mask,
        kpts,
    ), "IMP")

    stream = slc.Stream(
        [slt.Pad(pad_to=pad_size),
         slt.Crop(crop_to=crop_size)])
    res = stream(dc, return_torch=False)

    assert (res[0][0].shape[0] == 2) and (res[0][0].shape[1] == 2)
    assert (res[1][0].shape[0] == 2) and (res[1][0].shape[1] == 2)
    assert (res[2][0].height == 2) and (res[2][0].width == 2)

    assert np.array_equal(res[0][0], img)
    assert np.array_equal(res[1][0], mask)
    assert np.array_equal(res[2][0].data, kpts_data)
Esempio n. 4
0
 def __init__(self):
     self.imgaug_transform = iaa.CropToFixedSize(width=64, height=64)
     self.augmentor_op = Operations.Crop(probability=1,
                                         width=64,
                                         height=64,
                                         centre=False)
     self.solt_stream = slc.Stream(
         [slt.Crop(crop_to=(64, 64), crop_mode="r")])
Esempio n. 5
0
def test_deserialize_from_dict(serialized):
    trfs = slc.Stream([slt.Pad(34), slt.Crop(32, "r"), slt.CutOut(2)])

    serialized_trfs = json.dumps(trfs.to_dict())
    serialized_from_deserialized = json.dumps(
        slu.from_dict(serialized).to_dict())

    assert serialized_trfs == serialized_from_deserialized
Esempio n. 6
0
def test_crop_or_cutout_size_are_too_big(img_2x2, cutout_crop_size):
    dc = slc.DataContainer((img_2x2, ), "I")
    trf = slt.Crop(crop_to=cutout_crop_size)
    with pytest.raises(ValueError):
        trf(dc)

    trf = slt.CutOut(p=1, cutout_size=cutout_crop_size)
    with pytest.raises(ValueError):
        trf(dc)
Esempio n. 7
0
def my_transforms():
    train_trf = solt.Stream([
        slt.Pad(pad_to=(36, 36)),
        slt.Rotate(10),
        slt.Crop((32, 32)),
        slt.CutOut((8, 8)),
        slt.Flip(p=0.5)
    ])

    test_trf = solt.Stream([])

    return {'train': train_trf, 'eval': test_trf}
Esempio n. 8
0
def my_transforms():
    train_trf = solt.Stream([
        # slt.Scale(range_x=(0.9, 1.1), range_y=(0.9, 1.1), same=True, p=0.5),
        # slt.Translate(range_x=(-0.05, 0.05), range_y=(-0.05, 0.05), p=0.5),
        # slt.GammaCorrection(gamma_range=0.1, p=0.5),
        slt.Pad(pad_to=(36, 36)),
        slt.Rotate((-5, 5), p=0.5),
        slt.Crop((32, 32)),
        # slt.Noise(gain_range=0.1, p=0.8),
        slt.CutOut((8, 8))
    ])

    test_trf = solt.Stream([])

    custom_trf = solt.Stream([
        slt.Pad(pad_to=(36, 36)),
        slt.Rotate((-5, 5), p=0.5),
        slt.Crop((32, 32)),
        # slt.Noise(gain_range=0.1, p=0.8),
        slt.CutOut((8, 8))
    ])

    return {'train': train_trf, 'eval': test_trf, 'transforms': custom_trf}
Esempio n. 9
0
def test_3x3_pad_to_20x20_center_crop_3x3_shape_stayes_unchanged(img_3x3, mask_3x3):
    # Setting up the data
    kpts_data = np.array([[0, 0], [0, 2], [2, 2], [2, 0]]).reshape((4, 2))
    kpts = slc.Keypoints(kpts_data, 3, 3)
    img, mask = img_3x3, mask_3x3

    dc = slc.DataContainer((img, mask, kpts,), "IMP")

    stream = slc.Stream([slt.Pad((20, 20)), slt.Crop((3, 3))])
    res = stream(dc, return_torch=False)

    assert (res[0][0].shape[0] == 3) and (res[0][0].shape[1] == 3)
    assert (res[1][0].shape[0] == 3) and (res[1][0].shape[1] == 3)
    assert (res[2][0].height == 3) and (res[2][0].width == 3)
Esempio n. 10
0
def test_6x6_pad_to_20x20_center_crop_6x6_kpts_img(img):
    # Setting up the data
    kpts_data = np.array([[0, 0], [0, 5], [1, 3], [2, 0]]).reshape((4, 2))
    kpts = slc.Keypoints(kpts_data, frame=(6, 6))

    dc = slc.DataContainer((kpts, img), "PI")

    stream = slc.Stream([slt.Pad((20, 20)), slt.Crop((6, 6))])
    res = stream(dc, return_torch=False)

    assert (res[1][0].shape[0] == 6) and (res[1][0].shape[1] == 6)
    assert (res[0][0].frame[0] == 6) and (res[0][0].frame[1] == 6)

    assert np.array_equal(res[1][0], img)
    assert np.array_equal(res[0][0].data, kpts_data)
Esempio n. 11
0
    def __init__(self, img_size=256):
        super(VHFlipRotateCrop, self).__init__(img_size)

        self.solt_pipeline = solt.Stream([
            slt.Flip(p=self.probablity, axis=0),
            slt.Flip(p=self.probablity, axis=1),
            slt.Rotate(angle_range=(0, 20)),
            slt.Crop(224, crop_mode="r"),
        ])

        self.albumentations_pipeline = albu.Compose([
            albu.VerticalFlip(p=self.probablity),
            albu.HorizontalFlip(p=self.probablity),
            albu.Rotate(limit=(0, 20),
                        p=self.probablity,
                        border_mode=cv2.BORDER_CONSTANT,
                        value=0),
            albu.RandomCrop(height=224, width=224),
            ToTensor(normalize={
                "mean": [0.485, 0.456, 0.406],
                "std": [0.229, 0.224, 0.225]
            }),
        ])

        self.torchvision_pipeline = tv_transforms.Compose([
            tv_transforms.RandomHorizontalFlip(p=self.probablity),
            tv_transforms.RandomRotation(degrees=(0, 20)),
            tv_transforms.RandomCrop(224),
            tv_transforms.ToTensor(),
            tv_transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                    std=[0.229, 0.224, 0.225]),
        ])

        _augm_ppl = augmentor.Pipeline()
        _augm_ppl.flip_top_bottom(probability=self.probablity)
        _augm_ppl.flip_left_right(probability=self.probablity)
        _augm_ppl.rotate(probability=self.probablity,
                         max_left_rotation=0,
                         max_right_rotation=20)
        _augm_ppl.crop_random(probability=1,
                              percentage_area=224 / float(self.img_size))

        self.augmentor_pipeline = tv_transforms.Compose([
            _augm_ppl.torch_transform(),
            tv_transforms.transforms.ToTensor(),
            tv_transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                    std=[0.229, 0.224, 0.225]),
        ])
Esempio n. 12
0
 def __init__(self):
     self.augmentor_pipeline = Pipeline()
     self.augmentor_pipeline.add_operation(
         Operations.Crop(probability=1, width=64, height=64, centre=False))
     self.augmentor_pipeline.add_operation(
         Operations.Resize(probability=1,
                           width=512,
                           height=512,
                           resample_filter="BILINEAR"))
     self.imgaug_transform = iaa.Sequential([
         iaa.CropToFixedSize(width=64, height=64),
         iaa.Scale(size=512, interpolation="linear")
     ])
     self.solt_stream = slc.Stream([
         slt.Crop(crop_to=(64, 64), crop_mode="r"),
         slt.Resize(resize_to=(512, 512))
     ])
Esempio n. 13
0
def test_complex_transform_serialization():
    stream = slc.Stream([
        slt.Flip(axis=1, p=0.5),
        slc.SelectiveStream([
            slt.Rotate(angle_range=(-45, -45), p=1, padding="r"),
            slt.Rotate90(1, p=1),
            slt.Rotate(angle_range=(45, 45), p=1, padding="r"),
        ]),
        slt.Crop((350, 350)),
        slc.SelectiveStream([
            slt.GammaCorrection(gamma_range=0.5, p=1),
            slt.Noise(gain_range=0.1, p=1),
            slt.Blur()
        ],
                            n=3),
        slt.Projection(
            affine_transforms=slc.Stream([
                slt.Rotate(angle_range=(-45, 45), p=1),
                slt.Scale(range_x=(0.8, 1.5),
                          range_y=(0.8, 1.5),
                          p=1,
                          same=False),
            ]),
            v_range=(1e-4, 1e-3),
            p=1,
        ),
        slc.SelectiveStream(
            [
                slt.CutOut(40, p=1),
                slt.CutOut(30, p=1),
                slt.CutOut(20, p=1),
                slt.CutOut(40, p=1),
                slc.Stream(),
                slc.Stream(),
                slc.Stream(),
            ],
            n=3,
        ),
    ])

    assert slu.from_yaml(stream.to_yaml()).to_yaml() == slu.from_yaml(
        stream.to_yaml()).to_yaml()
Esempio n. 14
0
    def __init__(self, img_size=256):
        super(HFlipCrop, self).__init__(img_size)

        self.solt_pipeline = solt.Stream([
            slt.Flip(p=self.probablity, axis=1),
            slt.Crop(224, crop_mode="r")
        ])

        self.albumentations_pipeline = albu.Compose([
            albu.HorizontalFlip(p=self.probablity),
            albu.RandomCrop(height=224, width=224),
            ToTensor(normalize={
                "mean": [0.485, 0.456, 0.406],
                "std": [0.229, 0.224, 0.225]
            }),
        ])

        self.torchvision_pipeline = tv_transforms.Compose([
            tv_transforms.RandomHorizontalFlip(p=self.probablity),
            tv_transforms.RandomCrop(224),
            tv_transforms.ToTensor(),
            tv_transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                    std=[0.229, 0.224, 0.225]),
        ])

        _augm_ppl = augmentor.Pipeline()
        _augm_ppl.flip_left_right(probability=self.probablity)
        _augm_ppl.crop_random(probability=1,
                              percentage_area=224 / float(self.img_size))

        self.augmentor_pipeline = tv_transforms.Compose([
            _augm_ppl.torch_transform(),
            tv_transforms.transforms.ToTensor(),
            tv_transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                    std=[0.229, 0.224, 0.225]),
        ])
Esempio n. 15
0
                             },
                             {
                                 "cutout": {
                                     "cutout_size": 2,
                                     "p": 0.5
                                 }
                             },
                         ],
                     },
                 },
             ]
         }
     },
     slc.Stream([
         slt.Pad(34),
         slt.Crop(32, "r"),
         slt.CutOut(2),
         slc.Stream([slt.Pad(34),
                     slt.Crop(32, "r"),
                     slt.CutOut(2)]),
     ]),
 ],
 [
     {
         "stream": {
             "transforms": [
                 {
                     "stream": {
                         "interpolation":
                         None,
                         "padding":
Esempio n. 16
0
def test_wrong_crop_size_types(cutout_crop_size):
    with pytest.raises(TypeError):
        slt.Crop(crop_to=cutout_crop_size)

    with pytest.raises(TypeError):
        slt.CutOut(cutout_size=cutout_crop_size)