Ejemplo n.º 1
0
    def __data_pipline(self, img, ldmarks):
        transform = None
        if self.mode == 'train':
            transform = A.Compose(
                [
                    A.Resize(height=self.output_size[0],
                             width=self.output_size[1],
                             p=1),
                    A.Crop(x_min=40,
                           y_min=0,
                           x_max=self.output_size[1] - 76,
                           y_max=self.output_size[0],
                           p=1),
                    A.HorizontalFlip(p=0.5),
                    A.VerticalFlip(p=0.5),
                    A.ToFloat(p=1),
                ],
                keypoint_params=A.KeypointParams(format='xy'))
        elif self.mode == 'test':
            transform = A.Compose(
                [
                    A.Resize(height=self.output_size[0],
                             width=self.output_size[1],
                             p=1),
                    A.Crop(x_min=40,
                           y_min=0,
                           x_max=self.output_size[1] - 76,
                           y_max=self.output_size[0],
                           p=1),
                    A.ToFloat(p=1),
                ],
                keypoint_params=A.KeypointParams(format='xy'))
        transformed = transform(image=img, keypoints=ldmarks)

        return transformed
Ejemplo n.º 2
0
    def __init__(self, outputs=6):
        super().__init__()
        self.net = models.resnet34(True)
        self.linear = Linear(1000, outputs)

        df = pd.read_csv("/home/dipet/kaggle/prostate/input/prostate-cancer-grade-assessment/train.csv")
        self.train_df, self.valid_df = train_test_split(df, test_size=0.2)
        self.data_dir = "/datasets/panda/train_64_100"

        self.train_transforms = A.Compose(
            [
                A.Compose(
                    [
                        A.OneOf([A.GaussNoise(), A.MultiplicativeNoise(elementwise=True)]),
                        A.RandomBrightnessContrast(0.02, 0.02),
                        A.HueSaturationValue(0, 10, 10),
                        A.Flip(),
                        A.RandomGridShuffle(grid=(10, 10)),
                        A.GridDistortion(),
                        A.Rotate()
                    ],
                    p=0.5,
                ),
                A.ToFloat(),
            ]
        )
        self.valid_transforms = A.Compose([A.ToFloat()])
    def __data_generation(self, file_path_list_temp):
        'generate data containing batch_size samples'
        X = []
        y = []

        for filename in file_path_list_temp:
            # Carga de imagen y extraccion de mascara
            if filename[-4:] == ".png":
                image = (plt.imread(self.dir_x + filename) * 255).astype(
                    np.uint8)
                image_masked = plt.imread(self.dir_y + filename.strip(".png") +
                                          "-MASK.png")
            else:
                image = plt.imread(self.dir_x + filename).astype(np.uint8)
                image_masked = plt.imread(self.dir_y + filename.strip(".tif") +
                                          "-MASK.tif") / 255
            if image.shape[2] > 3:
                image[image[:, :, 3] == 0] = [255, 255, 255, 0]
                image = image[:, :, :-1]
            target = np.array(self.target) / 255
            mask = image_masked.copy()
            mask = np.all(np.abs(mask[:, :] - target) < 1e-1,
                          axis=2).astype(float)
            mask = np.expand_dims(mask, 2)

            # Definicion del pipeline de aumentaciones
            augmentations_train = alb.Compose([
                alb.CLAHE(p=0.1),
                alb.Blur(p=0.1),
                alb.RandomBrightnessContrast(p=0.1,
                                             brightness_limit=0.2,
                                             contrast_limit=0.2),
                alb.Rotate(p=0.1, limit=5),
                alb.ToFloat(max_value=255)
            ])

            augmentations_val = alb.Compose([alb.ToFloat(max_value=255)])

            # Aumentacion
            if self.train:
                augmented = augmentations_train(image=image, mask=mask)
            else:
                augmented = augmentations_val(image=image, mask=mask)

            image = augmented['image']
            if self.color_space == "lab":
                image = color.rgb2lab(image)
                image[:, :, 0] = image[:, :, 0] / 100
                image[:, :, 1] = image[:, :, 1] / 128
                image[:, :, 2] = image[:, :, 2] / 128
            X.append(image)
            y.append(augmented['mask'])

        X = np.array(X).reshape(self.batch_size, self.img_size, self.img_size,
                                3)
        y = np.array(y).reshape(self.batch_size, self.img_size, self.img_size,
                                1)

        return X, y
def fn():
    """Default transformations"""
    transform = {
        'train' : [
            A.Resize(448,448),
            A.ToFloat(),
            ToTensorV2()
        ],
        'val' : [
            A.Resize(448,448),
            A.ToFloat(),
            ToTensorV2()
        ]
    }
    return transform
Ejemplo n.º 5
0
def get_train_augm(size=Tuple[int, int],
                   p=0.5):
    return albu.Compose([
        albu.Flip(p=p),
        albu.ToFloat(255),
        ToTensorV2()  # albu.Lambda(image=to_tensor)
    ])
Ejemplo n.º 6
0
def get_valid_augm(size=Tuple[int, int],
                   p=0.5):
    return albu.Compose([
        albu.Resize(*size),
        albu.ToFloat(255),
        ToTensor()  # albu.Lambda(image=to_tensor)
    ])
Ejemplo n.º 7
0
def common_aug(mode, params, mean, p=1.):
    '''
    :param mode: 'more', 'inference', 'inference+flip', 'basic' ,
    '''
    augs_list = []
    assert mode in {
        'more',
        'inference',
        'inference+flip',
        'basic',
    }
    assert max(params.augmented_image_size,
               params.padded_image_size) >= params.nn_image_size
    augs_list += [
        albumentations.Resize(params.augmented_image_size,
                              params.augmented_image_size),
    ]
    if params.padded_image_size:
        augs_list += [
            albumentations.PadIfNeeded(min_height=params.padded_image_size,
                                       min_width=params.padded_image_size,
                                       border_mode=cv2.BORDER_REFLECT_101),
        ]
    if mode != 'inference':
        if mode == 'inference+flip':
            augs_list += [
                albumentations.HorizontalFlip(p=1.),
            ]
        else:
            augs_list += [
                albumentations.HorizontalFlip(),
            ]
    if mode == 'more':
        augs_list += [
            albumentations.RandomScale(0.1),
        ]
    if mode in ['inference', 'inference+flip']:
        augs_list += [
            albumentations.CenterCrop(params.nn_image_size,
                                      params.nn_image_size),
        ]
    else:
        augs_list += [
            albumentations.RandomCrop(params.nn_image_size,
                                      params.nn_image_size),
        ]
    augs_list += [
        albumentations.ToFloat(),
        albumentations.Normalize(mean=mean[0],
                                 std=mean[1] * params.norm_sigma_k,
                                 max_pixel_value=1.0),
    ]
    if mode == 'more':
        augs_list += [
            albumentations.Blur(),
            # albumentations.Rotate(limit=5),
            albumentations.RandomBrightness(),
            albumentations.RandomContrast(),
        ]
    return albumentations.Compose(augs_list, p=p)
Ejemplo n.º 8
0
    def val_dataloader(self):
        hparams = self.hparams

        dataset = TransformDataset(
            self.valid_dataset,
            albu.Compose(
                [
                    albu.Resize(
                        hparams.img_height // hparams.scale_ratio,
                        hparams.img_width // hparams.scale_ratio,
                    ),
                    albu.ToFloat(255),
                    ToTensorV2(),
                ],
                bbox_params=AIEdgeDataset.bbox_params,
            ),
        )

        if hparams.gpus > 1:
            sampler = data.distributed.DistributedSampler(dataset,
                                                          shuffle=True)
        else:
            sampler = None

        return data.DataLoader(
            dataset,
            batch_size=hparams.batch_size,
            collate_fn=CollateFn(self.net.stride, hparams, "valid"),
            num_workers=hparams.num_workers,
            shuffle=(sampler is None),
            sampler=sampler,
            pin_memory=(hparams.gpus > 0),
        )
Ejemplo n.º 9
0
    def test_dataloader(self):
        hparams = self.hparams

        dataset = TransformDataset(
            self.test_dataset,
            albu.Compose([
                albu.Resize(
                    hparams.img_height // hparams.scale_ratio,
                    hparams.img_width // hparams.scale_ratio,
                ),
                albu.ToFloat(255),
                ToTensorV2(),
            ], ),
        )

        if self.hparams.gpus > 1:
            sampler = data.distributed.DistributedSampler(dataset,
                                                          shuffle=False)
        else:
            sampler = None

        return data.DataLoader(
            dataset,
            batch_size=hparams.batch_size,
            collate_fn=AIEdgeTestDataset.collate_fn,
            num_workers=hparams.num_workers,
            sampler=sampler,
            pin_memory=(hparams.gpus > 0),
        )
Ejemplo n.º 10
0
    def __getitem__(self, idx):
        aug = albu.Compose([
            albu.Flip(p=0.3),
            albu.Rotate(p=0.9),
            albu.Blur(p=0.4),
            albu.ToFloat(p=1.)
        ])
        # image = self.crop_im[idx]
        # segment = self.crop_mask[idx]
        image = self.images[idx]

        # mask = self.masks[idx]
        segment = self.labels_dict[self.files_names[idx]]['segment']
        segment = preprocessing.rleToMask(segment).reshape((4, 256, 1600))

        # mask = np.transpose(mask, (2, 1, 0))
        # if image.shape != (200, 256, 4):
        #     image = cv2.resize(image, dsize=(200, 256))
        #     mask = cv2.resize(mask, dsize=(200, 256))

        # image = preprocessing.one_augment(aug, image)

        segment = np.transpose(segment, (1, 2, 0))
        image = ToTensor()(image).float()

        segment = ToTensor()(segment).float()

        # mask = ToTensor()(mask).float()
        return (image, segment)
Ejemplo n.º 11
0
    def __data_generation(self, file_path_list_temp):
        'generate data containing batch_size samples'
        X = []
        y = []

        for filename in file_path_list_temp:
            image = plt.imread(self.dir_x + filename)
            mask = plt.imread(self.dir_y + filename)

            if np.max(image) <= 1:
                image *= 255
                mask *= 255

            if image.shape[2] > 3:
                image[image[:, :, 3] == 0] = [255, 255, 255, 0]
                image = image[:, :, :-1]

            image = image.astype(np.uint8)
            mask = mask.astype(np.uint8)

            # Augmentation pipeline
            augmentations_train = alb.Compose([
                alb.CLAHE(p=0.1),
                alb.Blur(p=0.1),
                alb.RandomBrightnessContrast(p=0.1, brightness_limit=0.2, contrast_limit=0.2),
                alb.Rotate(p=0.1, limit=5),
                alb.ToFloat(max_value=255)
            ])

            augmentations_val = alb.Compose([
                alb.ToFloat(max_value=255)
            ])

            # Aumentation
            if self.train:
                augmented = augmentations_train(image=image, mask=mask)
            else:
                augmented = augmentations_val(image=image, mask=mask)

            image = augmented['image']
            X.append(image)
            y.append(augmented['mask'])

        X = np.array(X).reshape(self.batch_size, self.img_size, self.img_size, 3)
        y = np.array(y).reshape(self.batch_size, self.img_size, self.img_size, 1)

        return X, y
Ejemplo n.º 12
0
 def _default_test_transforms(self) -> A.Compose:
     transform_ls: List[AlbumentationsTform] = [A.ToFloat()]
     if self.norm_values is not None:
         transform_ls.append(
             A.Normalize(mean=self.norm_values.mean,
                         std=self.norm_values.std))
     transform_ls.append(ToTensorV2())
     return A.Compose(transform_ls)
Ejemplo n.º 13
0
def test_mydataset():
    train_transforms = A.Compose([A.ToFloat(max_value=255)])
    data_dirpath = f"{DIR_PATH}/data"
    dataset = Mydataset(data_dirpath, train_transforms, "train")
    assert len(dataset) == 9
    _, img_tensor, label_tensor = dataset[0]
    img = img_tensor.numpy()
    assert img.shape == (3, 28, 28)
Ejemplo n.º 14
0
def get_byol_transforms(width, height):
    byol_transforms = A.Compose([
        A.Resize(width, height),
        A.ToFloat(max_value=1.0),
        ToTensorV2(),
    ])

    return get_wrapper(byol_transforms)
Ejemplo n.º 15
0
 def __init__(self):
     self.Compose = albu.Compose([
         albu.PadIfNeeded(min_height=40, min_width=40, border_mode=0, value=[0,0,0], always_apply=True),
         albu.Cutout(num_holes=3, max_h_size=4, max_w_size=4, p=0.5),
         albu.HorizontalFlip(p=0.5),
         albu.RandomCrop(height=32, width=32, always_apply=True),
         albu.ToFloat(max_value=None, always_apply=True),
         ToTensor(normalize={'mean': [0.5071, 0.4867, 0.4408], 'std': [0.2675, 0.2565, 0.2761]})
     ])
Ejemplo n.º 16
0
def get_test_transforms(width, height):
    test_transforms = A.Compose([
        A.ToFloat(max_value=1.0),
        A.CenterCrop(width, height),
        A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ToTensorV2(),
    ])

    return get_wrapper(test_transforms)
Ejemplo n.º 17
0
    def __data_pipline(self, img, ldmarks):
        # Convert RGB to BGR
        transform = None
        if self.mode == 'train':
            transform = A.Compose(
                [
                    A.Resize(height=self.output_size[0],
                             width=self.output_size[1],
                             p=1),  # /8--->(356, 536)
                    A.Crop(x_min=40,
                           y_min=0,
                           x_max=self.output_size[1] - 76,
                           y_max=self.output_size[0],
                           p=1),
                    # A.CLAHE(p=1),
                    A.HorizontalFlip(p=0.5),
                    A.VerticalFlip(p=0.5),
                    A.ToFloat(p=1),  # (0 ~ 1)
                    # A.Normalize(max_pixel_value=1, p=1)
                ],
                keypoint_params=A.KeypointParams(format='xy'))
        elif self.mode == 'test':
            # import random
            # random.seed(2020)
            transform = A.Compose(
                [
                    A.Resize(height=self.output_size[0],
                             width=self.output_size[1],
                             p=1),  # /8--->(356, 536)
                    A.Crop(x_min=40,
                           y_min=0,
                           x_max=self.output_size[1] - 76,
                           y_max=self.output_size[0],
                           p=1),
                    # (356, 460)
                    # A.CLAHE(p=1),
                    A.ToFloat(p=1),  # (0 ~ 1)
                    # A.Normalize(max_pixel_value=1, p=1)
                ],
                keypoint_params=A.KeypointParams(format='xy'))
        transformed = transform(image=img, keypoints=ldmarks)

        return transformed
Ejemplo n.º 18
0
def get_common_transforms(shape, scale_ratio, pad):
    h, w = shape
    ph, pw = pad

    return [
        albu.Resize(h // scale_ratio, w // scale_ratio),
        # albu.Normalize(),
        albu.ToFloat(255),
        PadConstant(0, cfg.ph, 0, cfg.pw),
    ]
Ejemplo n.º 19
0
def get_train_augm(size=Tuple[int, int],
                   p=0.5):
    return albu.Compose([
        albu.Resize(*size),
        albu.OneOf([albu.CLAHE(6, (4, 4), always_apply=True),
                    albu.Equalize(always_apply=True)], p=0.99),
        albu.HorizontalFlip(p=p),
        albu.VerticalFlip(p=p),
        albu.ToFloat(255),
        ToTensor()  # albu.Lambda(image=to_tensor)
    ])
Ejemplo n.º 20
0
 def __init__(
     self,
     ann_file: str,
     img_dir: str,
     stage: str = "train",
     transforms=A.Compose([A.ToFloat()]),
 ):
     super().__init__()
     self.coco = coco.COCO(ann_file)
     self.ids = list(sorted(self.coco.imgs.keys()))
     self.img_dir = img_dir
     self.stage = stage
     self.transforms = transforms
def compose_transforms(transforms=None):
    if transforms is None:
        transforms = []
    transforms.append(A.ToFloat(max_value=255.0, always_apply=True), )
    transforms.append(ToTensorV2(always_apply=True))

    c = A.Compose(
        transforms,
        bbox_params=A.BboxParams(format="pascal_voc",
                                 label_fields=["class_labels"]),
    )

    return c
Ejemplo n.º 22
0
def get_train_transforms(width, height):
    train_transforms = A.Compose([
        A.RandomResizedCrop(width, height, scale=(0.1, 0.8)),
        A.ColorJitter(p=0.5),
        A.ToFloat(max_value=1.0),
        A.ShiftScaleRotate(p=0.5),
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.5),
        A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        A.CoarseDropout(p=0.5),
        ToTensorV2(),
    ])

    return get_wrapper(train_transforms)
Ejemplo n.º 23
0
 def __init__(
         self,
         root_dir: Path,
         glob_search_word: str = "*.png",
         crop_size: Tuple[int, int] = (224, 224),
 ):
     assert root_dir.is_dir(), f"{root_dir} is not a directory"
     self.paths = list(map(str, root_dir.glob(glob_search_word)))
     assert len(self.paths) > 0, f"Not found: {root_dir}/{glob_search_word}"
     self.labels = list(ChannelOrder)
     self.transforms = alb.Compose([
         alb.Resize(height=crop_size[0] * 2, width=crop_size[1] * 2),
         alb.RandomCrop(height=crop_size[0], width=crop_size[1]),
         alb.VerticalFlip(),
         alb.RandomRotate90(),
         alb.ToFloat(),
         ToTensorV2(),
     ])
Ejemplo n.º 24
0
    def __init__(self, img_dir, mask_dir, resize=None, n_channels=3, classes=1, train=False, first_channel=True):
        'Initialization'
        self.img_paths = glob.glob(img_dir + '/*')

        self.img_dir = img_dir
        self.mask_dir = mask_dir

        self.resize = resize
        self.n_channels = n_channels
        self.first_channel = first_channel
        self.classes = classes
        self.train = train
        self.entries = None
        self.augment = albm.Compose([
            albm.GaussNoise(p=0.3),
            albm.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.3),
            albm.ToFloat(max_value=1)
        ], p=1)
Ejemplo n.º 25
0
    def train_dataloader(self):
        hparams = self.hparams

        augs = []
        if hparams.aug_flip:
            augs.append(albu.HorizontalFlip(p=1.0))
        if hparams.aug_brightness:
            augs.append(albu.RandomBrightness(limit=0.2, p=1.0))
        if hparams.aug_randomcrop:
            aug = albu.RandomSizedBBoxSafeCrop(hparams.img_height,
                                               hparams.img_width)
            augs.append(aug)

        dataset = TransformDataset(
            self.train_dataset,
            albu.Compose(
                [
                    *augs,
                    albu.Resize(hparams.img_height // 4,
                                hparams.img_width // 4),
                    albu.ToFloat(255),
                    ToTensorV2(),
                ],
                bbox_params=AIEdgeDataset.bbox_params,
            ),
        )

        if self.hparams.gpus > 1:
            sampler = data.distributed.DistributedSampler(dataset,
                                                          shuffle=True)
        else:
            sampler = None

        return data.DataLoader(
            dataset,
            batch_size=hparams.batch_size,
            collate_fn=CollateFn(self.net.stride, hparams, "train"),
            num_workers=hparams.num_workers,
            shuffle=(sampler is None),
            sampler=sampler,
            pin_memory=(hparams.gpus > 0),
            drop_last=True,
        )
Ejemplo n.º 26
0
def main():

    cfg = Config()
    data_man = DataManager(cfg)
    tf = A.Compose([
        A.Resize(width=cfg.IMG_WIDTH * 2, height=cfg.IMG_HEIGHT * 2),
        A.ToFloat(),
        ToTensorV2(),
    ])

    im_folder = ImageFolder(data_man.data, tf)

    loader = DataLoader(im_folder,
                        batch_size=64,
                        num_workers=cfg.NUM_WORKERS,
                        pin_memory=cfg.PIN_MEMORY,
                        shuffle=False)

    mean, std = get_mean_std(loader, cfg)

    print('mean', mean)
    print('std', std)
Ejemplo n.º 27
0
def test_mydataset_loader():
    train_transforms = A.Compose([A.ToFloat(max_value=255)])
    data_dirpath = f"{DIR_PATH}/data"
    dataset = Mydataset(data_dirpath, train_transforms, "train")
    loader = torch.utils.data.DataLoader(dataset, batch_size=2)
    device = "cpu"
    for _, feature, label in loader:
        label = label.view(-1)  # [[0], [3]] -> [0, 3]とshapeを変換する
        feature_np = feature.numpy()
        label_np = label.numpy()
        feature, label = feature.to(device), label.to(device)
        break
    model_output_np = numpy.array(
        [
            [3, 0, 0, 0, 0, 0, 0, 0, 0, 0.01],
            [3, 0, 0, 0, 0, 0, 0, 0, 0, 0.01],
        ]
    )
    model_output = torch.from_numpy(model_output_np)
    criterion = nn.CrossEntropyLoss()
    loss = criterion(model_output, label)
    assert loss.numpy() >= 0
Ejemplo n.º 28
0
def get_train_transforms(input_size=256):
    return A.Compose(
        [
            A.RandomCrop(input_size, input_size),
            A.HorizontalFlip(),
            A.VerticalFlip(),
            A.OneOf(
                [
                    A.HueSaturationValue(
                        hue_shift_limit=0.2,
                        sat_shift_limit=0.2,
                        val_shift_limit=0.2,
                        p=0.9,
                    ),
                    A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.15, p=0.9),
                ],
                p=0.9,
            ),
            A.ToFloat(255),
            ToTensorV2(),
        ],
        additional_targets={"image1": "image"},
    )
    optimizer=optimizer,
    callbacks=callbacks,
    loaders=loaders,
    logdir=logdir,
    num_epochs=num_epochs,
    verbose=1,
    scheduler=scheduler,
    main_metric='accuracy01',
    minimize_metric=False,
)

## Step 2. FT with HFlip

train_dataset.augmentations = A.Compose([
    A.HorizontalFlip(p=0.5),
    A.ToFloat(max_value=1),
], p=1)

optimizer = torch.optim.Adam([
    {'params': model.layer1.parameters(), 'lr': LR / 10},
    {'params': model.layer2.parameters(), 'lr': LR / 5},
    {'params': model.layer3.parameters(), 'lr': LR / 2},
    {'params': model.layer4.parameters(), 'lr': LR / 1},
], lr=LR)

scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.5, cooldown=1, min_lr=1e-7)

runner.train(
    model=model,
    criterion=criterion,
    optimizer=optimizer,
Ejemplo n.º 30
0
ocr_file = '%s/OCR/output.txt' % (path)
metadata_length = 35
tam_max = 3
L1_layer = Lambda(lambda tensor: K.abs(tensor[0] - tensor[1]))
train_augs = [[], [], [], [], [], []]
test_augs = [[], [], [], []]
keys = ['Set01', 'Set02', 'Set03', 'Set04', 'Set05']

seq_car = albu.Compose([
    albu.IAACropAndPad(px=(0, 8)),
    albu.IAAAffine(scale=(0.8, 1.2),
                   shear=(-8, 8),
                   order=[0, 1],
                   cval=(0),
                   mode='constant'),
    albu.ToFloat(max_value=255)
],
                       p=0.7)

seq_car2 = albu.Compose([
    albu.IAACropAndPad(px=(0, 8)),
    albu.IAAAffine(scale=(0.8, 1.2),
                   shear=(-8, 8),
                   order=[0, 1],
                   cval=(0),
                   mode='constant'),
],
                        p=0.7)

seq_plate = albu.Compose(
    [