Пример #1
0
def init_mnist_transforms():
    train_trf = Compose([
        wrap2solt,
        slc.Stream([
            slt.ResizeTransform(resize_to=(64, 64), interpolation='bilinear'),
            slt.RandomScale(range_x=(0.9, 1.1), same=False, p=0.5),
            slt.RandomShear(range_x=(-0.05, 0.05), p=0.5),
            slt.RandomRotate(rotation_range=(-10, 10), p=0.5),
            # slt.RandomRotate(rotation_range=(-5, 5), p=0.5),
            slt.PadTransform(pad_to=70),
            slt.CropTransform(crop_size=64, crop_mode='r'),
            slt.ImageAdditiveGaussianNoise(p=1.0)
        ]),
        unpack_solt,
        ApplyTransform(Normalize((0.5, ), (0.5, )))
    ])

    test_trf = Compose([
        wrap2solt,
        slt.ResizeTransform(resize_to=(64, 64), interpolation='bilinear'),
        # slt.PadTransform(pad_to=64),
        unpack_solt,
        ApplyTransform(Normalize((0.5, ), (0.5, ))),
    ])

    return train_trf, test_trf
Пример #2
0
def init_transforms(nc=1):
    if nc == 1:
        norm_mean_std = Normalize((0.1307, ), (0.3081, ))
    elif nc == 3:
        norm_mean_std = Normalize((0.4914, 0.4822, 0.4465),
                                  (0.247, 0.243, 0.261))
    else:
        raise ValueError("Not support channels of {}".format(nc))

    train_trf = Compose([
        wrap2solt,
        slc.Stream([
            slt.ResizeTransform(resize_to=(32, 32), interpolation='bilinear'),
            slt.RandomScale(range_x=(0.9, 1.1), same=False, p=0.5),
            slt.RandomShear(range_x=(-0.05, 0.05), p=0.5),
            slt.RandomRotate(rotation_range=(-10, 10), p=0.5),
            # slt.RandomRotate(rotation_range=(-5, 5), p=0.5),
            slt.PadTransform(pad_to=36),
            slt.CropTransform(crop_size=32, crop_mode='r'),
            slt.ImageAdditiveGaussianNoise(p=1.0)
        ]),
        unpack_solt,
        ApplyTransform(norm_mean_std)
    ])

    test_trf = Compose([
        wrap2solt,
        slt.ResizeTransform(resize_to=(32, 32), interpolation='bilinear'),
        unpack_solt,
        ApplyTransform(norm_mean_std)
    ])

    def custom_augment(img):
        tr = Compose([
            wrap2solt,
            slc.Stream([
                slt.ResizeTransform(resize_to=(32, 32),
                                    interpolation='bilinear'),
                slt.RandomScale(range_x=(0.9, 1.1), same=False, p=0.5),
                slt.RandomShear(range_x=(-0.05, 0.05), p=0.5),
                slt.RandomRotate(rotation_range=(-10, 10), p=0.5),
                # slt.RandomRotate(rotation_range=(-5, 5), p=0.5),
                slt.PadTransform(pad_to=36),
                slt.CropTransform(crop_size=32, crop_mode='r'),
                slt.ImageAdditiveGaussianNoise(p=1.0)
            ]),
            unpack_solt,
            ApplyTransform(norm_mean_std)
        ])

        img_tr, _ = tr((img, 0))
        return img_tr

    return train_trf, test_trf, custom_augment
Пример #3
0
def test_resize_img_to_arbitrary_size(img, mask, resize_to):
    # Setting up the data
    kpts_data = np.array([[0, 0], [0, 2], [2, 2], [2, 0]]).reshape(
        (4, 2))  # Top left corner
    kpts = sld.KeyPoints(kpts_data.copy(), img.shape[0], img.shape[1])

    dc = sld.DataContainer((
        img,
        mask,
        kpts,
    ), 'IMP')
    transf = slt.ResizeTransform(resize_to=resize_to)
    res = transf(dc).data

    if isinstance(resize_to, int):
        resize_to = (resize_to, resize_to)

    scale_x = resize_to[0] / img.shape[1]
    scale_y = resize_to[1] / img.shape[0]

    assert transf._resize_to == resize_to
    assert (res[0].shape[0] == resize_to[1]) and (res[0].shape[1]
                                                  == resize_to[0])
    assert (res[1].shape[0] == resize_to[1]) and (res[1].shape[1]
                                                  == resize_to[0])
    assert (res[2].H == resize_to[1]) and (res[2].W == resize_to[0])

    kpts_data = kpts_data.astype(float)
    kpts_data[:, 0] *= scale_x
    kpts_data[:, 1] *= scale_y
    kpts_data = kpts_data.astype(int)
    assert np.array_equal(res[2].data, kpts_data)
Пример #4
0
def init_data_processing():
    kvs = GlobalKVS()
    train_augs = init_train_augs()

    dataset = OAProgressionDataset(dataset=kvs['args'].dataset_root, split=kvs['metadata'], trf=train_augs)

    mean_vector, std_vector = init_mean_std(snapshots_dir=kvs['args'].snapshots,
                                            dataset=dataset, batch_size=kvs['args'].bs,
                                            n_threads=kvs['args'].n_threads)

    print(colored('====> ', 'red') + 'Mean:', mean_vector)
    print(colored('====> ', 'red') + 'Std:', std_vector)

    norm_trf = tv_transforms.Normalize(mean_vector.tolist(), std_vector.tolist())
    train_trf = tv_transforms.Compose([
        train_augs,
        partial(apply_by_index, transform=norm_trf, idx=0)
    ])

    val_trf = tv_transforms.Compose([
        img_labels2solt,
        slc.Stream([
            slt.ResizeTransform((310, 310)),
            slt.CropTransform(crop_size=(300, 300), crop_mode='c'),
            slt.ImageColorTransform(mode='gs2rgb'),
        ], interpolation='bicubic'),
        unpack_solt_data,
        partial(apply_by_index, transform=tv_transforms.ToTensor(), idx=0),
        partial(apply_by_index, transform=norm_trf, idx=0)
    ])

    kvs.update('train_trf', train_trf)
    kvs.update('val_trf', val_trf)
    kvs.save_pkl(os.path.join(kvs['args'].snapshots, kvs['snapshot_name'], 'session.pkl'))
Пример #5
0
def init_loader(metadata, args, snapshots_root):
    mean_vector, std_vector = session.init_mean_std(snapshots_root, None, None, None)

    norm_trf = tv_transforms.Normalize(mean_vector.tolist(), std_vector.tolist())

    tta_trf = tv_transforms.Compose([
        img_labels2solt,
        slc.Stream([
            slt.PadTransform(pad_to=(700, 700), padding='z'),
            slt.CropTransform(crop_size=(700, 700), crop_mode='c'),
            slt.ResizeTransform(resize_to=(310, 310), interpolation='bicubic'),
            slt.ImageColorTransform(mode='gs2rgb'),
        ], interpolation='bicubic'),
        unpack_solt_data,
        partial(apply_by_index, transform=tv_transforms.ToTensor(), idx=0),
        partial(apply_by_index, transform=norm_trf, idx=0),
        partial(apply_by_index, transform=partial(five_crop, size=300), idx=0),
    ])

    dataset = OAProgressionDataset(dataset=args.dataset_root,
                                   split=metadata, trf=tta_trf)

    loader = DataLoader(dataset,
                        batch_size=args.bs,
                        sampler=SequentialSampler(dataset),
                        num_workers=args.n_threads)

    return loader
Пример #6
0
    def custom_augment(img):

        tr = Compose([
            wrap2solt,
            slc.Stream([
                slt.ResizeTransform(resize_to=(32, 32),
                                    interpolation='bilinear'),
                slt.RandomScale(range_x=(0.9, 1.1), same=False, p=0.5),
                slt.RandomFlip(axis=1, p=0.5),
                # slt.RandomShear(range_x=(-0.05, 0.05), p=0.5),
                # slt.RandomRotate(rotation_range=(-10, 10), p=0.5),
                slt.RandomRotate(rotation_range=(-5, 5), p=0.5),
                slt.PadTransform(pad_to=36),
                slt.CropTransform(crop_size=32, crop_mode='r'),
                slt.ImageAdditiveGaussianNoise(p=1.0)
            ]),
            unpack_solt,
            ApplyTransform(norm_mean_std)
        ])

        if len(img.shape) == 3:
            imgs = np.expand_dims(img, axis=0)
        elif len(img.shape) == 4:
            imgs = img
        else:
            raise ValueError('Expect num of dims 3 or 4, but got {}'.format(
                len(img.shape)))

        out_imgs = []
        for b in range(imgs.shape[0]):
            _img = imgs[b, :].astype(np.uint8)
            _img, _ = tr((_img, 0))
            out_imgs.append(_img)

        return torch.stack(out_imgs, dim=0)
Пример #7
0
def init_mnist_transforms():
    return Compose([
        wrap2solt,
        slt.ResizeTransform(resize_to=(64, 64), interpolation='bilinear'),
        unpack_solt,
        ApplyTransform(Normalize((0.5, ), (0.5, )))
    ])
Пример #8
0
 def __init__(self):
     self.imgaug_transform = iaa.Scale(size=512, interpolation="linear")
     self.solt_stream = slc.Stream(
         [slt.ResizeTransform(resize_to=(512, 512))])
     self.augmentor_op = Operations.Resize(probability=1,
                                           width=512,
                                           height=512,
                                           resample_filter="BILINEAR")
Пример #9
0
 def __init__(self):
     self.augmentor_pipeline = Pipeline()
     self.augmentor_pipeline.add_operation(Operations.Crop(probability=1, width=64, height=64, centre=False))
     self.augmentor_pipeline.add_operation(
         Operations.Resize(probability=1, width=512, height=512, resample_filter="BILINEAR")
     )
     self.imgaug_transform = iaa.Sequential(
         [iaa.CropToFixedSize(width=64, height=64), iaa.Scale(size=512, interpolation="linear")]
     )
     self.solt_stream = slc.Stream(
         [slt.CropTransform(crop_size=(64, 64), crop_mode="r"), slt.ResizeTransform(resize_to=(512, 512))]
     )
Пример #10
0
def test_different_interpolations_per_item_per_transform(
        img_6x6, transform_settings):
    dc = sld.DataContainer((img_6x6, ),
                           'I',
                           transform_settings=transform_settings)
    dc_res = slt.ResizeTransform(resize_to=(10, 15),
                                 interpolation='bilinear')(dc)

    interp = allowed_interpolations['bilinear']
    if transform_settings is not None:
        interp = allowed_interpolations[transform_settings[0]['interpolation']
                                        [0]]
    assert np.array_equal(
        cv2.resize(img_6x6, (10, 15), interpolation=interp).reshape(15, 10, 1),
        dc_res.data[0])
Пример #11
0
def init_train_augs():
    trf = transforms.Compose([
        img_labels2solt,
        slc.Stream([
            slt.PadTransform(pad_to=(700, 700)),
            slt.CropTransform(crop_size=(700, 700), crop_mode='c'),
            slt.ResizeTransform((310, 310)),
            slt.ImageAdditiveGaussianNoise(p=0.5, gain_range=0.3),
            slt.RandomRotate(p=1, rotation_range=(-10, 10)),
            slt.CropTransform(crop_size=(300, 300), crop_mode='r'),
            slt.ImageGammaCorrection(p=0.5, gamma_range=(0.5, 1.5)),
            slt.ImageColorTransform(mode='gs2rgb')
        ], interpolation='bicubic', padding='z'),
        unpack_solt_data,
        partial(apply_by_index, transform=transforms.ToTensor(), idx=0),
    ])
    return trf
Пример #12
0
    def custom_augment(img):
        tr = Compose([
            wrap2solt,
            slc.Stream([
                slt.ResizeTransform(resize_to=(32, 32),
                                    interpolation='bilinear'),
                slt.RandomScale(range_x=(0.9, 1.1), same=False, p=0.5),
                slt.RandomShear(range_x=(-0.05, 0.05), p=0.5),
                slt.RandomRotate(rotation_range=(-10, 10), p=0.5),
                # slt.RandomRotate(rotation_range=(-5, 5), p=0.5),
                slt.PadTransform(pad_to=36),
                slt.CropTransform(crop_size=32, crop_mode='r'),
                slt.ImageAdditiveGaussianNoise(p=1.0)
            ]),
            unpack_solt,
            ApplyTransform(norm_mean_std)
        ])

        img_tr, _ = tr((img, 0))
        return img_tr
Пример #13
0
def test_resize_does_not_change_labels():
    trf = slt.ResizeTransform(resize_to=(5, 5))
    dc = sld.DataContainer((1, ), 'L')
    dc = trf(dc)
    assert dc.data[0] == 1
Пример #14
0
def test_wrong_resize_types(resize_to):
    with pytest.raises(TypeError):
        slt.ResizeTransform(resize_to=resize_to)
Пример #15
0
def init_transforms(mean_vector, std_vector):
    kvs = GlobalKVS()

    if mean_vector is not None:
        mean_vector = torch.from_numpy(mean_vector).float()
        std_vector = torch.from_numpy(std_vector).float()
        norm_trf = partial(normalize_channel_wise,
                           mean=mean_vector,
                           std=std_vector)
        norm_trf = partial(apply_by_index, transform=norm_trf, idx=[0, 1, 2])
    else:
        norm_trf = None

    if kvs['args'].siamese:
        resize_train = slc.Stream()
        crop_train = slt.CropTransform(crop_size=(kvs['args'].imsize,
                                                  kvs['args'].imsize),
                                       crop_mode='c')
    else:
        resize_train = slt.ResizeTransform(
            (kvs['args'].inp_size, kvs['args'].inp_size))
        crop_train = slt.CropTransform(crop_size=(kvs['args'].crop_size,
                                                  kvs['args'].crop_size),
                                       crop_mode='r')

    train_trf = [
        wrap2solt,
        slc.Stream([
            slt.PadTransform(pad_to=(kvs['args'].imsize, kvs['args'].imsize)),
            slt.CropTransform(crop_size=(kvs['args'].imsize,
                                         kvs['args'].imsize),
                              crop_mode='c'),
            resize_train,
            slt.ImageAdditiveGaussianNoise(p=0.5, gain_range=0.3),
            slt.RandomRotate(p=1, rotation_range=(-10, 10)),
            crop_train,
            slt.ImageGammaCorrection(p=0.5, gamma_range=(0.5, 1.5)),
        ]),
        unpack_solt_data,
        partial(pack_tensors, no_kl=kvs['args'].no_kl),
    ]

    if not kvs['args'].siamese:
        resize_val = slc.Stream([
            slt.ResizeTransform((kvs['args'].inp_size, kvs['args'].inp_size)),
            slt.CropTransform(crop_size=(kvs['args'].crop_size,
                                         kvs['args'].crop_size),
                              crop_mode='c'),
        ])
    else:
        resize_val = slc.Stream()

    val_trf = [
        wrap2solt,
        slc.Stream([
            slt.PadTransform(pad_to=(kvs['args'].imsize, kvs['args'].imsize)),
            slt.CropTransform(crop_size=(kvs['args'].imsize,
                                         kvs['args'].imsize),
                              crop_mode='c'),
            resize_val,
        ]),
        unpack_solt_data,
        partial(pack_tensors, no_kl=kvs['args'].no_kl),
    ]

    if norm_trf is not None:
        train_trf.append(norm_trf)
        val_trf.append(norm_trf)

    train_trf = transforms.Compose(train_trf)
    val_trf = transforms.Compose(val_trf)

    return train_trf, val_trf