Ejemplo n.º 1
0
def get_augmentations(resize: int = 224, augmentation_intensity: Optional[str] = None) -> A.Compose:
    fix_seeds(24)
    crop_limits = (int(resize * 0.85), resize)

    if augmentation_intensity == "slight":
        p_augment = 0.15
        p_scale = 0.15
        p_blur = 0.05
        p_dropout = 0.05
        p_flip = 0.15
        p_noise = 0.15
        gauss_limit = 0.005

    elif augmentation_intensity == "light":
        p_augment = 0.25
        p_scale = 0.2
        p_blur = 0.1
        p_dropout = 0.05
        p_flip = 0.2
        p_noise = 0.2
        gauss_limit = 0.01

    elif augmentation_intensity == "medium":
        p_augment = 0.5
        p_scale = 0.2
        p_blur = 0.2
        p_dropout = 0.1
        p_flip = 0.2
        p_noise = 0.2
        gauss_limit = 0.015

    elif augmentation_intensity == "heavy":
        p_augment = 0.5
        p_scale = 0.35
        p_blur = 0.35
        p_dropout = 0.15
        p_flip = 0.35
        p_noise = 0.35
        gauss_limit = 0.02

    elif augmentation_intensity is None:
        return None
    else:
        raise ValueError("Improper augmentation flag: should be equal to None, light, medium, or heavraisey")

    augmentation = A.Compose(
        [
            A.OneOf(
                [A.HorizontalFlip(), A.VerticalFlip()],
                p=p_flip,
            ),
            A.OneOf(
                [A.Rotate(p=1.0, limit=30), A.RandomRotate90(p=1.0)],
                p=p_scale,
            ),
            A.OneOf(
                [
                    A.ShiftScaleRotate(p=1.0, rotate_limit=30),
                    A.RandomSizedCrop(
                        min_max_height=crop_limits,
                        height=resize,
                        width=resize,
                        w2h_ratio=1.0,
                        interpolation=cv2.INTER_CUBIC,
                    ),
                ],
                p=p_scale,
            ),
            A.Blur(blur_limit=3, p=p_blur),
            A.CoarseDropout(max_height=7, max_width=7, p=p_dropout),
            A.GaussNoise(var_limit=(0.0, gauss_limit), p=p_noise),
        ],
        p=p_augment,
    )

    return augmentation
# Faster tha nTorch vision
# good for segmetnation , instance segmetnaion

import cv2
import albumentations as A
import numpy as np
from PIL import Image
from utils import plot_examples

image = Image.open("images/elon.jpeg")
mask = Image.open("images/mask.jpeg")

transform = A.Compose([
    A.Resize(width=1920, height=1080),
    A.RandomCrop(width=1280, height=720),
    A.Rotate(limit=40, p=0.9, border_mode=cv2.BORDER_CONSTANT),
    A.HorizontalFlip(p=0.3),
    A.RGBShift(r_shift_limit=25, g_shift_limit=25, b_shift_limit=25, p=0.9),
    A.OneOf([A.Blur(blur_limit=3, p=0.5),
             A.ColorJitter(p=0.5)])
])

images_list = [image]
image = np.array(image)
mask = np.array(mask)

for i in range(15):
    augumentations = transform(image=image, mask=mask)
    augument_img = augumentations["image"]
    augument_mask = augumentations["mask"]
    df['Filename'] = img_dir[0] + df['Filename'].astype(str)
    for extra in range(1, len(labels)):
        extra_df = pd.read_csv(labels[extra])
        extra_df['Filename'] = img_dir[extra] + '/' + extra_df[
            'Filename']  # .astype(str)
        df = pd.concat([df, extra_df], ignore_index=True)

# Exclude all entries with "Missing" Died stats
df = df[~df['Died'].isin(['Missing'])]
df['Died'] = pd.to_numeric(df['Died'])

# Augmentations
A_transform = A.Compose([
    A.Flip(p=1),
    A.RandomRotate90(p=1),
    A.Rotate(p=1, limit=45, interpolation=3),
    A.RandomResizedCrop(input_size[0],
                        input_size[1],
                        scale=(0.8, 1.0),
                        ratio=(0.8, 1.2),
                        interpolation=3,
                        p=1),
    A.OneOf([
        A.IAAAdditiveGaussianNoise(),
        A.GaussNoise(),
    ], p=0.25),
    A.OneOf([
        A.MotionBlur(p=0.25),
        A.MedianBlur(blur_limit=3, p=0.25),
        A.Blur(blur_limit=3, p=0.25),
        A.GaussianBlur(p=0.25)
Ejemplo n.º 4
0
def aug_rot(angle,image,mask):
    aug = A.Compose([
    A.Rotate([angle,angle],interpolation=1, border_mode=4, value=None, mask_value=None, always_apply=False, p=1)
    ])
    return aug(image=image, mask=mask)
Ejemplo n.º 5
0
from ml_stratifiers import MultilabelStratifiedKFold
import warnings
warnings.filterwarnings("ignore")
from classification_models.resnet.models import ResNet34
import albumentations as A

MODEL_PATH = 'Christof/models/GAPNet/5_SE/'
exp_suffix = '_2'

SIZE = 256

# Load dataset info
path_to_train = 'Christof/assets/train_rgb_256/'
data = pd.read_csv('Christof/assets/train.csv')

normal_aug = A.Compose([A.Rotate((0,30),p=0.75),
                        A.RandomRotate90(p=1),
                        A.HorizontalFlip(p=0.5),
                        A.RandomBrightness(0.05),
                        A.RandomContrast(0.05),
                        A.Normalize(mean=(0.08069, 0.05258, 0.05487), std=(0.13704, 0.10145, 0.15313),
                                    max_pixel_value=255.)
                        ])

val_aug = A.Compose([A.HorizontalFlip(p=0.5),
                     A.Normalize(mean=(0.08069, 0.05258, 0.05487), std=(0.13704, 0.10145, 0.15313),
                                 max_pixel_value=255.)])

train_dataset_info = []
for name, labels in zip(data['Id'], data['Target'].str.split(' ')):
    train_dataset_info.append({
import warnings
warnings.filterwarnings("ignore")
from classification_models.resnet.models import ResNet34

MODEL_PATH = 'Christof/models/ResNet34/tests/19/'
exp_suffix = '2albub'

SIZE = 256

# Load dataset info
path_to_train = 'Christof/assets/train_rgb_256/'
data = pd.read_csv('Christof/assets/train.csv').sample(frac=0.5)

normal_aug = A.Compose(
    [A.Flip(p=0.5),
     A.Rotate((-180, 180), border_mode=cv2.BORDER_CONSTANT)])

train_dataset_info = []
for name, labels in zip(data['Id'], data['Target'].str.split(' ')):
    train_dataset_info.append({
        'path':
        os.path.join(path_to_train, name),
        'labels':
        np.array([int(label) for label in labels])
    })
train_dataset_info = np.array(train_dataset_info)

counts = np.zeros(28)
for item in train_dataset_info:
    for l in item['labels']:
        counts[l] = counts[l] + 1
import albumentations

from albumentations import torch as AT

train_transforms = albumentations.Compose([
    albumentations.Resize(256, 256),
    albumentations.HorizontalFlip(),
    albumentations.Rotate(limit=10),
    albumentations.JpegCompression(80),
    albumentations.HueSaturationValue(),
    albumentations.Normalize(),
    AT.ToTensor()
])

val_transforms = albumentations.Compose([
    albumentations.Resize(256, 256),
    albumentations.Normalize(),
    AT.ToTensor()
])

test_norm_transforms = albumentations.Compose([
    albumentations.Resize(256, 256),
    albumentations.Normalize(),
    AT.ToTensor()
])

test_flip_transforms = albumentations.Compose([
    albumentations.Resize(256, 256),
    albumentations.HorizontalFlip(p=1.1),
    albumentations.Normalize(),
    AT.ToTensor()
Ejemplo n.º 8
0
#%%
chosen_image = cv2.imread(image_path)
plt.imshow(chosen_image)

#%%
# ref)
# Albumentations part adapted from my good friend Hongnan's notebbok in the Global Wheat Detection competition (https://www.kaggle.com/reighns/augmentations-data-cleaning-and-bounding-boxes#Bounding-Boxes-with-Albumentations)

#%%
albumentation_list = [
    A.RandomSunFlare(p=1),
    A.RandomFog(p=1),
    A.RandomBrightness(p=1),
    A.RandomCrop(p=1, height=512, width=512),
    A.Rotate(p=1, limit=90),
    A.RGBShift(p=1),
    A.RandomSnow(p=1),
    A.HorizontalFlip(p=1),
    A.VerticalFlip(p=1),
    A.RandomContrast(limit=0.5, p=1),
    A.HueSaturationValue(p=1,
                         hue_shift_limit=20,
                         sat_shift_limit=30,
                         val_shift_limit=50),
    A.Cutout(p=1),
    A.Transpose(p=1),
    A.JpegCompression(p=1),
    A.CoarseDropout(p=1),
    A.IAAAdditiveGaussianNoise(loc=0,
                               scale=(2.5500000000000003, 12.75),
Ejemplo n.º 9
0
#                         transforms.ToTensor(),
#                         transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[
#                                              0.229, 0.224, 0.225])
#                         ])


# valid_transform=transforms.Compose([
# #                         transforms.Resize((256,256)),
#                         transforms.ToTensor(),
#                         transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[
#                                              0.229, 0.224, 0.225])])


train_transform = A.Compose([
                            A.JpegCompression(p=0.5),
                            A.Rotate(limit=80, p=1.0),
                            A.OneOf([
                                A.OpticalDistortion(),
                                A.GridDistortion(),
                                A.IAAPiecewiseAffine(),
                            ]),
                            A.RandomSizedCrop(min_max_height=(int(resolution*0.7), input_res),
                                                height=resolution, width=resolution, p=1.0),
                            A.HorizontalFlip(p=0.5),
                            A.VerticalFlip(p=0.5),
                            A.GaussianBlur(p=0.3),
                            A.OneOf([
                                A.RandomBrightnessContrast(),
                                A.HueSaturationValue(),
                            ]),
                            A.OneOf([#off in most cases
 def __call__(self, image, boxes=None, labels=None):
     #initialize the format for lib albumentations
     if boxes.shape[0] == 0:
         return image, boxes, labels
     bbox = []
     for i in boxes:
         bbox.append(list(i))
     #create annotations
     annotations = {
         'image': image,
         'bboxes': boxes,
         'category_id': list(labels)
     }
     #create translation
     #Color_Level Change
     if self.cfg.DATA_LOADER.AUGMENTATION_WEATHER:
         trans_color_level = A.Compose([
             A.Cutout(num_holes=20,
                      max_h_size=64,
                      max_w_size=64,
                      fill_value=255,
                      always_apply=False,
                      p=0.8),
             A.Equalize(p=1),
             A.HueSaturationValue(hue_shift_limit=50,
                                  sat_shift_limit=50,
                                  val_shift_limit=50,
                                  always_apply=False,
                                  p=0.8),
             A.OneOf([
                 A.RandomFog(fog_coef_lower=0.3,
                             fog_coef_upper=0.7,
                             alpha_coef=0.08,
                             always_apply=False,
                             p=0.5),
                 A.RandomSnow(snow_point_lower=0.1,
                              snow_point_upper=0.3,
                              brightness_coeff=2.5,
                              always_apply=False,
                              p=0.5),
                 A.RandomSunFlare(flare_roi=(0, 0, 1, 0.5),
                                  angle_lower=0,
                                  angle_upper=1,
                                  num_flare_circles_lower=6,
                                  num_flare_circles_upper=10,
                                  src_radius=400,
                                  src_color=(255, 255, 255),
                                  always_apply=False,
                                  p=0.5),
                 A.RandomRain(slant_lower=-10,
                              slant_upper=10,
                              drop_length=20,
                              drop_width=1,
                              drop_color=(200, 200, 200),
                              blur_value=7,
                              brightness_coefficient=0.7,
                              rain_type=None,
                              always_apply=False,
                              p=0.5)
             ]),
             A.OneOf([
                 A.RandomSizedBBoxSafeCrop(720,
                                           960,
                                           erosion_rate=0.0,
                                           interpolation=1,
                                           always_apply=False,
                                           p=0.5),
                 A.RandomSizedBBoxSafeCrop(480,
                                           640,
                                           erosion_rate=0.0,
                                           interpolation=1,
                                           always_apply=False,
                                           p=0.5),
                 A.RandomSizedBBoxSafeCrop(240,
                                           320,
                                           erosion_rate=0.0,
                                           interpolation=1,
                                           always_apply=False,
                                           p=0.5),
             ]),
         ])
     else:
         trans_color_level = A.Compose([
             A.Cutout(num_holes=20,
                      max_h_size=64,
                      max_w_size=64,
                      fill_value=255,
                      always_apply=False,
                      p=0.5),
             A.Equalize(p=1),
             A.HueSaturationValue(hue_shift_limit=50,
                                  sat_shift_limit=50,
                                  val_shift_limit=50,
                                  always_apply=False,
                                  p=0.5),
             A.OneOf([
                 A.RandomSizedBBoxSafeCrop(720,
                                           960,
                                           erosion_rate=0.0,
                                           interpolation=1,
                                           always_apply=False,
                                           p=0.5),
                 A.RandomSizedBBoxSafeCrop(480,
                                           640,
                                           erosion_rate=0.0,
                                           interpolation=1,
                                           always_apply=False,
                                           p=0.5),
                 A.RandomSizedBBoxSafeCrop(240,
                                           320,
                                           erosion_rate=0.0,
                                           interpolation=1,
                                           always_apply=False,
                                           p=0.5),
             ]),
         ])
     #Spatial_Level
     if self.cfg.DATA_LOADER.AUGMENTATION_SPATIAL_LEVEL:
         trans_rotate_level = A.Compose([
             A.OneOf([
                 A.Rotate(limit=90,
                          interpolation=1,
                          border_mode=4,
                          value=None,
                          mask_value=None,
                          always_apply=False,
                          p=0.5),
                 A.RandomRotate90(always_apply=False, p=0.5),
                 A.VerticalFlip(always_apply=False, p=0.5),
                 A.HorizontalFlip(always_apply=False, p=0.5)
             ]),
         ])
     #Apply the trans
     aug = get_aug(trans_color_level)
     augmented = aug(**annotations)
     img = augmented['image']
     bbox = augmented['bboxes']
     bbox = np.array(bbox)
     label = augmented['category_id']
     #try rotate
     if self.cfg.DATA_LOADER.AUGMENTATION_SPATIAL_LEVEL:
         aug1 = get_aug(trans_rotate_level)
         augmented1 = aug1(**augmented)
         img1 = augmented1['image']
         bbox1 = augmented1['bboxes']
         bbox1 = np.array(bbox1)
         label1 = augmented1['category_id']
         #if rotate fail
         if bbox1.shape[0] == 0:
             return img, bbox.astype(np.float32), np.array(label)
         else:
             return img1, bbox1.astype(np.float32), np.array(label1)
     else:
         return img, bbox.astype(np.float32), np.array(label)
Ejemplo n.º 11
0
    csv_file = f'GIZ_SIZE_{img_size}_arch_{arch}_n_folds_{n_folds}_num_epochs_{n_epochs}_train_bs_{train_batch_size}.csv'
    submission.to_csv(os.path.join(submissions_folder, csv_file), index=False)

    print(f'[INFO] Submission file save to {os.path.join(submissions_folder, csv_file)}')

if __name__ == '__main__':
    args = parser.parse_args()

    _ = seed_everything(args.seed_value)
    # data augmentations
    data_transforms = {
        'train': al.Compose([
                al.Resize(args.img_size, args.img_size),
                al.Cutout(p=.6, max_h_size=15, max_w_size=10, num_holes=4),
                al.Rotate(limit=35, p=.04),
                al.Normalize((0.1307,), (0.3081,))
        ]),

        'test': al.Compose([
                al.Resize(args.img_size, args.img_size),
                al.Cutout(p=.6, max_h_size=15, max_w_size=10, num_holes=4),
                al.Normalize((0.1307,), (0.3081,))
        ])
    }

    test = pd.read_csv(args.test_csv_path)
    sample = pd.read_csv(args.sample_csv_path)

    # load models
    models = load_models(models_path=args.models_path, n_folds=args.kfold, arch=args.arch)
Ejemplo n.º 12
0
    validation_dataset = make_dataset(validation_jpg_list, tag2label)
    validation_dataset_mask = make_dataset_mask(validation_jpg_list,
                                                tag2label_mask)
    validation_dataset_gender = make_dataset_gender(validation_jpg_list,
                                                    tag2label_gender)
    validation_dataset_age = make_dataset_age(validation_jpg_list,
                                              tag2label_age)

    dataset = make_dataset_age_58(jpg_list, tag2label_age)  # img_path, label

    # 데이터 불러오기
    batch_size = args.batch

    transform = A.Compose([
        A.Resize(height=256 * 2, width=192 * 2),
        A.OneOf([A.Rotate(5), A.Rotate(10)]),
        A.HorizontalFlip(p=0.5),
        A.Normalize(),
    ])

    train_data = MyDataset(dataset, transform)
    train_iter = DataLoader(train_data,
                            sampler=ImbalancedDatasetSampler(train_data),
                            batch_size=batch_size,
                            num_workers=4)
    validation_data = MyDataset(validation_dataset_age, transform)
    validation_iter = DataLoader(validation_data,
                                 batch_size=batch_size,
                                 shuffle=True,
                                 num_workers=4)
Ejemplo n.º 13
0
def dataset_selector(train_aug, train_albumentation, val_aug,
                     val_albumentation, args):
    if args.segmentation_problem and args.selected_class == "Grietas":

        train_dataset_longitudinales = SIMEPU_Segmentation_Dataset(
            data_partition='train',
            transform=train_aug,
            fold=args.fold,
            augmentation=train_albumentation,
            selected_class="Grietas longitudinales",
            data_mod=args.data_mod,
        )

        train_albumentation.append(albumentations.Rotate(limit=(90, 90), p=1))
        train_dataset_transversales = SIMEPU_Segmentation_Dataset(
            data_partition='train',
            transform=train_aug,
            fold=args.fold,
            augmentation=train_albumentation,
            selected_class="Grietas transversales",
            rotate=True,
            data_mod=args.data_mod,
        )

        train_dataset = torch.utils.data.ConcatDataset(
            [train_dataset_longitudinales, train_dataset_transversales])

        num_classes = 1

        val_dataset_longitudinales = SIMEPU_Segmentation_Dataset(
            data_partition='validation',
            transform=val_aug,
            fold=args.fold,
            augmentation=val_albumentation,
            selected_class="Grietas longitudinales",
            data_mod=args.data_mod,
        )

        val_albumentation.append(albumentations.Rotate(limit=(90, 90), p=1))
        val_dataset_transversales = SIMEPU_Segmentation_Dataset(
            data_partition='validation',
            transform=val_aug,
            fold=args.fold,
            augmentation=val_albumentation,
            selected_class="Grietas transversales",
            rotate=True,
            data_mod=args.data_mod,
        )

        val_dataset = torch.utils.data.ConcatDataset(
            [val_dataset_longitudinales, val_dataset_transversales])

        train_loader = DataLoader(
            train_dataset,
            batch_size=args.batch_size,
            pin_memory=True,
            shuffle=True,
            collate_fn=train_dataset_longitudinales.segmentation_collate)
        val_loader = DataLoader(
            val_dataset,
            batch_size=args.batch_size,
            pin_memory=True,
            shuffle=False,
            collate_fn=val_dataset_longitudinales.segmentation_collate)
    elif args.segmentation_problem:

        train_dataset = SIMEPU_Segmentation_Dataset(
            data_partition='train',
            transform=train_aug,
            fold=args.fold,
            augmentation=train_albumentation,
            selected_class=args.selected_class)

        num_classes = train_dataset.num_classes

        val_dataset = SIMEPU_Segmentation_Dataset(
            data_partition='validation',
            transform=val_aug,
            fold=args.fold,
            augmentation=val_albumentation,
            selected_class=args.selected_class,
        )

        train_loader = DataLoader(
            train_dataset,
            batch_size=args.batch_size,
            pin_memory=True,
            shuffle=True,
            collate_fn=train_dataset.segmentation_collate)
        val_loader = DataLoader(val_dataset,
                                batch_size=args.batch_size,
                                pin_memory=True,
                                shuffle=False,
                                collate_fn=val_dataset.segmentation_collate)

        return train_dataset, train_loader, val_dataset, val_loader, num_classes
    else:
        train_dataset = SIMEPU_Dataset_MultiLabel(
            data_partition='train',
            transform=train_aug,
            fold=args.fold,
            augmentation=train_albumentation,
            segmentation_problem=args.segmentation_problem,
            selected_class=args.selected_class)

        num_classes = train_dataset.num_classes

        val_dataset = SIMEPU_Dataset_MultiLabel(
            data_partition='validation',
            transform=val_aug,
            fold=args.fold,
            segmentation_problem=args.segmentation_problem,
            augmentation=val_albumentation,
            selected_class=args.selected_class,
        )

        train_loader = DataLoader(
            train_dataset,
            batch_size=args.batch_size,
            pin_memory=True,
            shuffle=True,
        )
        val_loader = DataLoader(val_dataset,
                                batch_size=args.batch_size,
                                pin_memory=True,
                                shuffle=False)

    return train_dataset, train_loader, val_dataset, val_loader, num_classes
Ejemplo n.º 14
0
import torch.nn as nn
from training.lrfinder import LRFinder

from gradcam.grad_cam import *

"""**TRANSFORMS**

In this section we can define the alnumAlbumentationTransforms
"""

#Transforms
channel_means = (0.49139968, 0.48215841, 0.44653091)
channel_stdevs = (0.24703223, 0.24348513, 0.26158784)
# Train Phase transformations
train_transforms = AlbumentationTransforms([
                                       A.Rotate((-30.0, 30.0)),
                                       A.HorizontalFlip(),
                                       A.RGBShift(r_shift_limit=50, g_shift_limit=50, b_shift_limit=50, p=0.5),
                                       A.Normalize(mean=channel_means, std=channel_stdevs),
                                       A.Cutout(num_holes=4) # fillvalue is 0 after normalizing as mean is 0
                                       ])

fillmeans = (np.array(channel_means)).astype(np.uint8)

# Test Phase transformations
test_transforms = AlbumentationTransforms([A.Normalize(mean=channel_means, std=channel_stdevs)])

"""**DATA LOADER**

We have loaded the data and plotted a few images
"""
Ejemplo n.º 15
0
import cv2
import torch
import numpy as np
from torch.utils.data import Dataset
import pickle

from conet.config import get_cfg

train_size_aug = alb.Compose([
    # alb.RandomSizedCrop(min_max_height=(300, 500)),
    alb.HorizontalFlip(),
    alb.VerticalFlip(),
    # alb.RandomBrightness(limit=0.01),
    alb.RandomScale(),
    alb.ElasticTransform(),
    alb.Rotate(limit=50),
    alb.PadIfNeeded(530, border_mode=cv2.BORDER_REFLECT101),
    alb.RandomCrop(512, 512),
    # alb.Normalize(),
    # alb.pytorch.ToTensor(),
    # ToTensorV2()
])
train_content_aug = alb.Compose([
    alb.MedianBlur(),
    alb.RGBShift(r_shift_limit=10, g_shift_limit=10, b_shift_limit=10),
    alb.RandomBrightnessContrast(brightness_limit=0.1),
    alb.Normalize(),
    # ToTensorV2()
])

val_aug = alb.Compose([
Ejemplo n.º 16
0
def get_augmentation(version):
    if version == "v1":
        return {
            "train":
            A.Compose([
                A.Flip(p=0.5),
                A.Rotate(p=0.5),
                A.Resize(height=256, width=256, p=1.0),
                A.Normalize(mean=(0.623, 0.520, 0.650),
                            std=(0.278, 0.305, 0.274),
                            max_pixel_value=255.0,
                            p=1.0)
            ]),
            "valid":
            A.Compose([
                A.Resize(height=256, width=256, p=1.0),
                A.Normalize(mean=(0.623, 0.520, 0.650),
                            std=(0.278, 0.305, 0.274),
                            max_pixel_value=255.0,
                            p=1.0)
            ]),
        }

    elif version == "v2":
        return {
            "train":
            A.Compose([
                A.Flip(p=0.5),
                A.Rotate(p=0.5),
                A.OneOf([
                    A.RandomCrop(height=256, width=256, p=1),
                    A.RandomCrop(height=384, width=384, p=1),
                    A.RandomCrop(height=512, width=512, p=1),
                    A.RandomCrop(height=640, width=640, p=1),
                ],
                        p=0.5),
                A.Resize(height=256, width=256, p=1.0),
                A.Normalize(mean=(0.623, 0.520, 0.650),
                            std=(0.278, 0.305, 0.274),
                            max_pixel_value=255.0,
                            p=1.0)
            ]),
            "valid":
            A.Compose([
                A.Resize(height=256, width=256, p=1.0),
                A.Normalize(mean=(0.623, 0.520, 0.650),
                            std=(0.278, 0.305, 0.274),
                            max_pixel_value=255.0,
                            p=1.0)
            ]),
        }

    elif version == "v3":
        return {
            "train":
            A.Compose([
                A.RandomCrop(height=1024, width=1024, p=1),
                A.Flip(p=0.5),
                A.Rotate(p=0.5),
                A.Resize(height=256, width=256, p=1.0),
                A.Normalize(mean=(0.623, 0.520, 0.650),
                            std=(0.278, 0.305, 0.274),
                            max_pixel_value=255.0,
                            p=1.0)
            ]),
            "valid":
            A.Compose([
                A.Resize(height=256, width=256, p=1.0),
                A.Normalize(mean=(0.623, 0.520, 0.650),
                            std=(0.278, 0.305, 0.274),
                            max_pixel_value=255.0,
                            p=1.0)
            ]),
        }

    elif version == "v4":
        # for 3x-scaled dataset | 256x256
        return {
            "train":
            A.Compose([
                A.RandomCrop(height=512, width=512, p=1),
                A.Flip(p=0.5),
                A.Rotate(p=0.5),
                A.Resize(height=256, width=256, p=1.0),
                A.Normalize(mean=(0.623, 0.520, 0.650),
                            std=(0.278, 0.305, 0.274),
                            max_pixel_value=255.0,
                            p=1.0)
            ]),
            "valid":
            A.Compose([
                A.Resize(height=256, width=256, p=1.0),
                A.Normalize(mean=(0.623, 0.520, 0.650),
                            std=(0.278, 0.305, 0.274),
                            max_pixel_value=255.0,
                            p=1.0)
            ]),
        }

    elif version == "v5":
        # for 3x-scaled dataset | 512x512
        return {
            "train":
            A.Compose([
                A.RandomCrop(height=512, width=512, p=1),
                A.Flip(p=0.5),
                A.Rotate(p=0.5),
                A.Normalize(mean=(0.623, 0.520, 0.650),
                            std=(0.278, 0.305, 0.274),
                            max_pixel_value=255.0,
                            p=1.0)
            ]),
            "valid":
            A.Compose([
                A.Normalize(mean=(0.623, 0.520, 0.650),
                            std=(0.278, 0.305, 0.274),
                            max_pixel_value=255.0,
                            p=1.0)
            ]),
        }

    elif version == "v6":
        # for 3x-scaled dataset | 512x512
        return {
            "train":
            A.Compose([
                A.Flip(p=0.5),
                A.Rotate(p=0.5),
                A.HueSaturationValue(hue_shift_limit=20,
                                     sat_shift_limit=30,
                                     val_shift_limit=20,
                                     always_apply=False,
                                     p=0.25),
                A.RandomBrightness(limit=0.4, always_apply=False, p=0.25),
                A.RandomContrast(limit=0.2, always_apply=False, p=0.25),
                A.RandomShadow(shadow_roi=(0, 0, 1, 1),
                               num_shadows_lower=1,
                               num_shadows_upper=50,
                               shadow_dimension=5,
                               always_apply=False,
                               p=0.25),
                A.Normalize(mean=(0.623, 0.520, 0.650),
                            std=(0.278, 0.305, 0.274),
                            max_pixel_value=255.0,
                            p=1.0)
            ]),
            "valid":
            A.Compose([
                A.Normalize(mean=(0.623, 0.520, 0.650),
                            std=(0.278, 0.305, 0.274),
                            max_pixel_value=255.0,
                            p=1.0)
            ]),
        }

    elif version == "v7":
        # for 3x-scaled dataset | 512x512
        return {
            "train":
            A.Compose([
                A.Flip(p=0.5),
                A.Rotate(p=0.5),
                A.RandomCrop(height=768, width=768, p=1),
                A.Resize(height=256, width=256, p=1.0),
                A.HueSaturationValue(hue_shift_limit=20,
                                     sat_shift_limit=30,
                                     val_shift_limit=20,
                                     always_apply=False,
                                     p=0.25),
                A.RandomBrightness(limit=0.4, always_apply=False, p=0.25),
                A.RandomContrast(limit=0.2, always_apply=False, p=0.25),
                A.RandomShadow(shadow_roi=(0, 0, 1, 1),
                               num_shadows_lower=1,
                               num_shadows_upper=50,
                               shadow_dimension=5,
                               always_apply=False,
                               p=0.25),
                A.Normalize(mean=(0.623, 0.520, 0.650),
                            std=(0.278, 0.305, 0.274),
                            max_pixel_value=255.0,
                            p=1.0)
            ]),
            "valid":
            A.Compose([
                A.Resize(height=256, width=256, p=1.0),
                A.Normalize(mean=(0.623, 0.520, 0.650),
                            std=(0.278, 0.305, 0.274),
                            max_pixel_value=255.0,
                            p=1.0)
            ]),
        }

    elif version == "v8":
        # for 3x-scaled dataset | 512x512
        return {
            "train":
            A.Compose([
                A.Rotate(p=0.5),
                A.RandomCrop(height=512, width=512, p=1),
                A.Flip(p=0.5),

                # color
                A.HueSaturationValue(hue_shift_limit=20,
                                     sat_shift_limit=30,
                                     val_shift_limit=20,
                                     always_apply=False,
                                     p=0.25),
                A.RandomBrightness(limit=0.4, always_apply=False, p=0.25),
                A.RandomContrast(limit=0.2, always_apply=False, p=0.25),
                A.RandomShadow(shadow_roi=(0, 0, 1, 1),
                               num_shadows_lower=1,
                               num_shadows_upper=50,
                               shadow_dimension=5,
                               always_apply=False,
                               p=0.25),

                # transform
                A.ElasticTransform(p=1, alpha=120, sigma=6, alpha_affine=0.25),
                A.GridDistortion(p=0.25),
                A.OpticalDistortion(distort_limit=2, shift_limit=0.5, p=0.25),
                A.Normalize(mean=(0.623, 0.520, 0.650),
                            std=(0.278, 0.305, 0.274),
                            max_pixel_value=255.0,
                            p=1.0)
            ]),
            "valid":
            A.Compose([
                A.Normalize(mean=(0.623, 0.520, 0.650),
                            std=(0.278, 0.305, 0.274),
                            max_pixel_value=255.0,
                            p=1.0)
            ]),
        }

    elif version == "v9":
        # for 3x-scaled dataset | 512x512
        return {
            "train":
            A.Compose([
                A.Flip(p=0.5),
                A.Rotate(p=0.5),
                A.HueSaturationValue(hue_shift_limit=20,
                                     sat_shift_limit=30,
                                     val_shift_limit=20,
                                     always_apply=False,
                                     p=0.25),
                A.RandomBrightness(limit=0.4, always_apply=False, p=0.25),
                A.RandomContrast(limit=0.2, always_apply=False, p=0.25),
                A.RandomShadow(shadow_roi=(0, 0, 1, 1),
                               num_shadows_lower=1,
                               num_shadows_upper=50,
                               shadow_dimension=5,
                               always_apply=False,
                               p=0.25),

                # transform
                A.OneOf([
                    A.ElasticTransform(
                        alpha=120, sigma=6, alpha_affine=0.25, p=0.25),
                    A.GridDistortion(p=0.25),
                    A.OpticalDistortion(
                        distort_limit=2, shift_limit=0.5, p=0.25)
                ],
                        p=0.75),
                A.Normalize(mean=(0.623, 0.520, 0.650),
                            std=(0.278, 0.305, 0.274),
                            max_pixel_value=255.0,
                            p=1.0)
            ]),
            "valid":
            A.Compose([
                A.Normalize(mean=(0.623, 0.520, 0.650),
                            std=(0.278, 0.305, 0.274),
                            max_pixel_value=255.0,
                            p=1.0)
            ]),
        }

    elif version == "v10":
        # for 3x-scaled dataset | 512x512
        return {
            "train":
            A.Compose([
                A.Flip(p=0.5),
                A.Rotate(p=0.5),

                # size
                A.OneOf([
                    A.CropNonEmptyMaskIfExists(height=128, width=128, p=1.0),
                    A.CropNonEmptyMaskIfExists(height=256, width=256, p=1.0),
                    A.CropNonEmptyMaskIfExists(height=384, width=384, p=1.0)
                ],
                        p=0.25),
                A.PadIfNeeded(min_height=512,
                              min_width=512,
                              border_mode=4,
                              p=1),

                # array shuffle
                A.OneOf([A.MaskDropout(p=1),
                         A.RandomGridShuffle(p=1)], p=0.5),

                # quality
                A.Downscale(scale_min=0.25,
                            scale_max=0.75,
                            interpolation=0,
                            always_apply=False,
                            p=0.5),
                A.GaussNoise(p=0.5),
                A.OneOf([
                    A.GlassBlur(p=1),
                    A.GaussianBlur(p=1),
                ], p=0.5),

                # colors
                A.HueSaturationValue(hue_shift_limit=20,
                                     sat_shift_limit=30,
                                     val_shift_limit=20,
                                     always_apply=False,
                                     p=0.5),
                A.RandomBrightness(limit=0.4, always_apply=False, p=0.5),
                A.RandomContrast(limit=0.2, always_apply=False, p=0.5),
                A.RandomShadow(shadow_roi=(0, 0, 1, 1),
                               num_shadows_lower=1,
                               num_shadows_upper=50,
                               shadow_dimension=5,
                               always_apply=False,
                               p=0.5),
                A.CLAHE(p=0.5),
                A.Equalize(p=0.5),
                A.ChannelShuffle(p=0.5),

                # transform
                A.OneOf([
                    A.ElasticTransform(
                        alpha=120, sigma=6, alpha_affine=0.25, p=1),
                    A.GridDistortion(p=1),
                    A.OpticalDistortion(distort_limit=2, shift_limit=0.5, p=1)
                ],
                        p=0.5),
                A.Normalize(mean=(0.623, 0.520, 0.650),
                            std=(0.278, 0.305, 0.274),
                            max_pixel_value=255.0,
                            p=1.0)
            ]),
            "valid":
            A.Compose([
                A.Normalize(mean=(0.623, 0.520, 0.650),
                            std=(0.278, 0.305, 0.274),
                            max_pixel_value=255.0,
                            p=1.0)
            ]),
        }

    elif version == "v11":
        # custom normalization (see other/img_normalization.py)
        return {
            "train":
            A.Compose([
                A.Normalize(mean=(0.623, 0.520, 0.650),
                            std=(0.278, 0.305, 0.274),
                            max_pixel_value=255.0,
                            p=1.0)
            ]),
            "valid":
            A.Compose([
                A.Normalize(mean=(0.623, 0.520, 0.650),
                            std=(0.278, 0.305, 0.274),
                            max_pixel_value=255.0,
                            p=1.0)
            ]),
        }

    elif version == "v12":
        # for 3x-scaled dataset | 512x512
        return {
            "train":
            A.Compose([
                A.Flip(p=0.5),
                A.Rotate(p=0.5),

                # size / quality
                A.OneOf([
                    A.CropNonEmptyMaskIfExists(height=128, width=128, p=1.0),
                    A.CropNonEmptyMaskIfExists(height=256, width=256, p=1.0),
                    A.CropNonEmptyMaskIfExists(height=384, width=384, p=1.0),
                    A.Downscale(scale_min=0.25,
                                scale_max=0.75,
                                interpolation=0,
                                always_apply=False,
                                p=1.0),
                ],
                        p=0.25),
                A.PadIfNeeded(min_height=512,
                              min_width=512,
                              border_mode=4,
                              p=1),

                # array shuffle
                A.OneOf([A.MaskDropout(p=1),
                         A.RandomGridShuffle(p=1)], p=0.15),

                # noise
                A.OneOf([
                    A.GaussNoise(p=1),
                    A.GlassBlur(p=1),
                    A.GaussianBlur(p=1),
                ],
                        p=0.15),

                # colors
                A.HueSaturationValue(hue_shift_limit=20,
                                     sat_shift_limit=30,
                                     val_shift_limit=20,
                                     always_apply=False,
                                     p=0.15),
                A.RandomBrightness(limit=0.4, always_apply=False, p=0.15),
                A.RandomContrast(limit=0.2, always_apply=False, p=0.15),
                A.RandomShadow(shadow_roi=(0, 0, 1, 1),
                               num_shadows_lower=1,
                               num_shadows_upper=50,
                               shadow_dimension=5,
                               always_apply=False,
                               p=0.15),
                A.OneOf([
                    A.CLAHE(p=1),
                    A.Equalize(p=1),
                    A.ChannelShuffle(p=1),
                ],
                        p=0.15),

                # transform
                A.OneOf([
                    A.ElasticTransform(
                        alpha=120, sigma=6, alpha_affine=0.25, p=1),
                    A.GridDistortion(p=1),
                    A.OpticalDistortion(distort_limit=2, shift_limit=0.5, p=1)
                ],
                        p=0.15),
                A.Normalize(mean=(0.623, 0.520, 0.650),
                            std=(0.278, 0.305, 0.274),
                            max_pixel_value=255.0,
                            p=1.0)
            ]),
            "valid":
            A.Compose([
                A.Normalize(mean=(0.623, 0.520, 0.650),
                            std=(0.278, 0.305, 0.274),
                            max_pixel_value=255.0,
                            p=1.0)
            ]),
        }

    else:
        raise Exception(f"Augmentation version '{version}' is UNKNOWN!")
Ejemplo n.º 17
0
warnings.filterwarnings("ignore")
from classification_models.resnet.models import ResNet34

MODEL_PATH = 'Christof/models/ResNet34/tests/24/'
exp_suffix = '0'

SIZE = 256

# Load dataset info
path_to_train = 'Christof/assets/train_rgb_256/'
data = pd.read_csv('Christof/assets/train.csv').sample(frac=0.3)

normal_aug = A.Compose(
    [A.HorizontalFlip(p=0.5),
     A.VerticalFlip(p=0.5),
     A.Rotate((-180, 180))])

train_dataset_info = []
for name, labels in zip(data['Id'], data['Target'].str.split(' ')):
    train_dataset_info.append({
        'path':
        os.path.join(path_to_train, name),
        'labels':
        np.array([int(label) for label in labels])
    })
train_dataset_info = np.array(train_dataset_info)

counts = np.zeros(28)
for item in train_dataset_info:
    for l in item['labels']:
        counts[l] = counts[l] + 1
Ejemplo n.º 18
0
import warnings
warnings.filterwarnings("ignore")
from classification_models.resnet.models import ResNet34

MODEL_PATH = 'Christof/models/train_vs_test/ResNet34/1/'

SIZE = 256

# Load dataset info
path_to_train = 'Christof/assets/train_rgb_256/'
path_to_test = 'Christof/assets/test_rgb_256/'

train_data = pd.read_csv('Christof/assets/train.csv')
test_data = pd.read_csv('Christof/assets/sample_submission.csv')

normal_aug = A.Compose([A.Flip(p=0.75), A.Rotate((-180, 180))])

train_dataset_info = []
for name in train_data['Id']:
    train_dataset_info.append({
        'path': os.path.join(path_to_train, name),
        'labels': np.array(0)
    })
train_dataset_info = np.array(train_dataset_info)

test_dataset_info = []
for name in test_data['Id']:
    test_dataset_info.append({
        'path': os.path.join(path_to_test, name),
        'labels': np.array(1)
    })
Ejemplo n.º 19
0
def get_filenames_of_path(path: pathlib.Path, ext: str = '*'):
    """Returns a list of files in a directory/path. Uses pathlib."""
    filenames = [file for file in path.glob(ext) if file.is_file()]
    return filenames

# input and target files
images_train = get_filenames_of_path(root / 'train_frames')
masks_train = get_filenames_of_path(root / 'train_masks')
images_valid = get_filenames_of_path(root / 'val_frames')
masks_valid = get_filenames_of_path(root / 'val_masks')

# training transformations and augmentations
transforms = Compose([
    Resize(input_size=(480, 720), target_size=(240, 360)),
    AlbuSeg2d(albu=albumentations.HorizontalFlip(p=0.5)),
    AlbuSeg2d(albu=albumentations.Rotate(limit=20,p=0.2)),
    DenseTarget(),
    Normalize_to_01(),
    FixGreyScale()
])

# dataset training
dataset_train = dataset.EyeDataset(inputs=images_train,
                                    targets=masks_train,
                                    transform=transforms)

# dataset validation
dataset_valid = dataset.EyeDataset(inputs=images_valid,
                                    targets=masks_valid,
                                    transform=transforms)
Ejemplo n.º 20
0
def sample_custom_augmentations_constructor(
        num_features: int, window_radius: int) -> albumentations.Compose:
    """
    This function returns a custom augmentations object for use with sequences via the load_sequences function in
    data_core.py. Please note that these augmentations have only been tested with RGB data between 0 and 1 and that
    order of operations is critical. e.g., blurs don't like missing data so shouldn't be applied before dropout, noise
    probably shouldn't be applied before color changes or blurs... of course, this is all dependent on your specific
    problem.

    Args:
        num_features:  number of features used in the model
        window_size:  window_size from the data configs

    Returns:
        custom augmentations function for use with sequences
    """
    max_kernel = int(round(0.1 * window_radius))
    max_hole_size = int(round(0.1 * window_radius))
    additional_targets = [
        ADDITIONAL_TARGETS_KEY.format(idx) for idx in range(1, num_features)
    ]

    return albumentations.Compose(
        [
            # The augmentations assume an image is RGB between 0 and 1
            albumentations.ToFloat(max_value=255, always_apply=True, p=1.0),
            # These augmentations should be order independent, toss 'em up front
            albumentations.Flip(p=0.5),
            albumentations.Transpose(p=0.5),
            albumentations.Rotate(limit=90, p=0.5),
            # Fogging as it's quite similar to top-down cloud effects, seems reasonable to apply up front
            albumentations.RandomFog(
                fog_coef_lower=0.2, fog_coef_upper=0.8, alpha_coef=0.08,
                p=0.5),
            # Color modifications
            albumentations.OneOf(
                [
                    albumentations.RandomBrightnessContrast(
                        brightness_limit=0.2,
                        contrast_limit=0.6,
                        brightness_by_max=True,
                        p=1.0),
                    albumentations.RGBShift(r_shift_limit=0.2,
                                            g_shift_limit=0.2,
                                            b_shift_limit=0.2,
                                            p=1.0),
                ],
                p=0.25,
            ),
            # Distortions
            albumentations.OneOf(
                [
                    albumentations.ElasticTransform(
                        alpha=1, sigma=50, alpha_affine=50, p=1.0),
                    albumentations.GridDistortion(
                        num_steps=5, distort_limit=0.4, p=1.0),
                    albumentations.OpticalDistortion(
                        distort_limit=0.1, shift_limit=0.1, p=1.0),
                ],
                p=0.25,
            ),
            albumentations.GaussianBlur(blur_limit=max_kernel, p=0.25),
            # Noise
            albumentations.OneOf(
                [
                    albumentations.CoarseDropout(max_holes=8,
                                                 max_height=max_hole_size,
                                                 max_width=max_hole_size,
                                                 fill_value=np.nan,
                                                 p=1.0),
                    albumentations.GaussNoise(var_limit=0.05, mean=0, p=1.0),
                ],
                p=0.25,
            ),
            # Scaling, adding last so that other augmentations are applied at a consistent resolution
            albumentations.RandomScale(scale_limit=0.05, p=0.25),
            # Augmentations may not return images of the same size, images can be both smaller and larger than expected, so
            # these two augmentations are added to keep things consistent
            albumentations.PadIfNeeded(
                2 * window_radius, 2 * window_radius, always_apply=True,
                p=1.0),
            albumentations.CenterCrop(
                2 * window_radius, 2 * window_radius, always_apply=True,
                p=1.0),
            # Return the data to its original scale
            albumentations.FromFloat(max_value=255, always_apply=True, p=1.0),
        ],
        p=1.0,
        additional_targets={target: "image"
                            for target in additional_targets},
    )
Ejemplo n.º 21
0
    def __init__(self,
                 resize=(0, 0),
                 padding=(0, 0),
                 crop=(0, 0),
                 horizontal_flip_prob=0.0,
                 vertical_flip_prob=0.0,
                 gaussian_blur_prob=0.0,
                 rotate_degree=0.0,
                 cutout_prob=0.0,
                 cutout_dim=(8, 8),
                 hue_saturation_prob=0.0,
                 contrast_prob=0.0,
                 mean=(0.5, 0.5, 0.5),
                 std=(0.5, 0.5, 0.5),
                 normalize=True,
                 train=True):
        """Create data transformation pipeline.
        
        Args:
            resize (tuple, optional): Resize the input to the given height and
                width. (default: (0, 0))
            padding (tuple, optional): Pad the image if the image size is less
                than the specified dimensions (height, width). (default= (0, 0))
            crop (tuple, optional): Randomly crop the image with the specified
                dimensions (height, width). (default: (0, 0))
            horizontal_flip_prob (float, optional): Probability of an image
                being horizontally flipped. (default: 0)
            vertical_flip_prob (float, optional): Probability of an image
                being vertically flipped. (default: 0)
            rotate_prob (float, optional): Probability of an image being
                rotated. (default: 0)
            rotate_degree (float, optional): Angle of rotation for image
                augmentation. (default: 0)
            cutout_prob (float, optional): Probability that cutout will be
                performed. (default: 0)
            cutout_dim (tuple, optional): Dimensions of the cutout box (height, width).
                (default: (8, 8))
            hue_saturation_prob (float, optional): Probability of randomly changing hue,
                saturation and value of the input image. (default: 0)
            contrast_prob (float, optional): Randomly changing contrast of the input image.
                (default: 0)
            mean (float or tuple, optional): Dataset mean. (default: 0.5 for each channel)
            std (float or tuple, optional): Dataset standard deviation. (default: 0.5 for each channel)
        """
        transforms_list = []

        if sum(resize) > 0:
            transforms_list += [
                A.Resize(height=resize[0], width=resize[1], always_apply=True)
            ]
        if train:
            if sum(padding) > 0:
                transforms_list += [
                    A.PadIfNeeded(min_height=padding[0],
                                  min_width=padding[1],
                                  always_apply=True)
                ]
            if sum(crop) > 0:
                transforms_list += [
                    A.RandomCrop(crop[0], crop[1], always_apply=True)
                ]
            if horizontal_flip_prob > 0:  # Horizontal Flip
                transforms_list += [A.HorizontalFlip(p=horizontal_flip_prob)]
            if vertical_flip_prob > 0:  # Vertical Flip
                transforms_list += [A.VerticalFlip(p=vertical_flip_prob)]
            if gaussian_blur_prob > 0:  # Patch Gaussian Augmentation
                transforms_list += [A.GaussianBlur(p=gaussian_blur_prob)]
            if rotate_degree > 0:  # Rotate image
                transforms_list += [A.Rotate(limit=rotate_degree)]
            if cutout_prob > 0:  # CutOut
                if isinstance(mean, float):
                    fill_value = mean * 255.0
                else:
                    fill_value = tuple([x * 255.0 for x in mean])
                transforms_list += [
                    A.CoarseDropout(p=cutout_prob,
                                    max_holes=1,
                                    fill_value=fill_value,
                                    max_height=cutout_dim[0],
                                    max_width=cutout_dim[1])
                ]
            if hue_saturation_prob > 0:  # Hue Saturation
                transforms_list += [
                    A.HueSaturationValue(p=hue_saturation_prob)
                ]
            if contrast_prob > 0:  # Random Contrast
                transforms_list += [A.RandomContrast(p=contrast_prob)]
        if normalize:
            # normalize the data with mean and standard deviation to keep values in range [-1, 1]
            # since there are 3 channels for each image,
            # we have to specify mean and std for each channel
            transforms_list += [
                A.Normalize(mean=mean, std=std, always_apply=True),
            ]

        # convert the data to torch.FloatTensor
        transforms_list += [ToTensor()]

        self.transform = A.Compose(transforms_list)
Ejemplo n.º 22
0
txt = glob(
    "D:/Usuarios/diego/Documentos/GitHub/Annotation_Tools/annotation/*.txt")

#List where bb and labels will be saved
bbs = []
category_ids = []

##We will use the mapping from category_id to the class name
#to visualize the class label for the bounding box on the image
category_id_to_name = {"Person": "Person"}

#Transformation to be applied
transform = A.Compose(
    [
        A.HorizontalFlip(p=1.0),
        A.Rotate(limit=25, border_mode=cv2.BORDER_CONSTANT, value=1, p=1),
        A.ISONoise(color_shift=(0.01, 0.01), intensity=(0.1, 0.1), p=1.0),
        A.Blur(blur_limit=5, p=1.0),
        A.RandomBrightnessContrast(brightness_limit=0.2,
                                   contrast_limit=0.2,
                                   brightness_by_max=True,
                                   p=1),
        A.GaussNoise(
            var_limit=(100.0, 300.0), mean=40, always_apply=False, p=1)
    ],
    bbox_params=A.BboxParams(format='pascal_voc',
                             label_fields=['category_ids']),
)

i = 1
Ejemplo n.º 23
0
def main():
    train_transform = A.Compose([
        A.Resize(height=IMAGE_HEIGHT, width=IMAGE_WIDTH),
        A.Rotate(limit=35, p=1.0),
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.1),
        A.Normalize(
            mean=[0.0, 0.0, 0.0],
            std=[1.0, 1.0, 1.0],
            max_pixel_value=255.0,
        ),
        ToTensorV2(),
    ], )

    val_transforms = A.Compose([
        A.Resize(height=IMAGE_HEIGHT, width=IMAGE_WIDTH),
        A.Normalize(
            mean=[0.0, 0.0, 0.0],
            std=[1.0, 1.0, 1.0],
            max_pixel_value=255.0,
        ),
        ToTensorV2(),
    ], )

    model = ESNet(in_ch=3, n_class=1).to(DEVICE)
    loss_fn = nn.BCEWithLogitsLoss()
    optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)

    train_loader, val_loader = get_loaders(
        TRAIN_IMG_DIR,
        TRAIN_MASK_DIR,
        VAL_IMG_DIR,
        VAL_MASK_DIR,
        BATCH_SIZE,
        train_transform,
        val_transforms,
        NUM_WORKERS,
        PIN_MEMORY,
    )

    if LOAD_MODEL:
        load_checkpoint(torch.load("model.pth.tar"), model)

    check_accuracy(val_loader, model, device=DEVICE)
    scaler = torch.cuda.amp.GradScaler()

    for epoch in range(NUM_EPOCHS):
        train_fn(train_loader, model, optimizer, loss_fn, scaler)

        # save model
        checkpoint = {
            "state_dict": model.state_dict(),
            "optimizer": optimizer.state_dict(),
        }
        save_checkpoint(checkpoint)

        # check accuracy
        check_accuracy(val_loader, model, device=DEVICE)

        # print some examples to a folder
        save_predictions_as_imgs(val_loader,
                                 model,
                                 folder=SAVING_PATH,
                                 device=DEVICE)
Ejemplo n.º 24
0
def augment(source, images_output_path, size):
    images_path = images_output_path + "/JPEGImages/"
    os.makedirs(images_path, exist_ok=True)

    xml_path = images_output_path + "/Annotations/"
    os.makedirs(xml_path, exist_ok=True)

    transform = A.Compose(
        [
            # A.CLAHE(),
            # A.RandomScale(scale_limit=[0.5, 1]),
            # A.RandomCrop(width=450, height=450),
            A.OneOf([
                A.Sequential(
                    [A.RandomCrop(width=800, height=600),
                     A.RandomRotate90()]),
                # A.Sequential(
                #     [
                #         A.RandomSizedBBoxSafeCrop(width=800, height=600),
                #         A.RandomRotate90(),
                #     ]
                # ),
                A.Sequential([
                    A.RandomScale(scale_limit=0.2),
                    A.Flip(),
                    A.RandomRotate90(),
                ],
                             # p=0.3,
                             ),
                A.Sequential(
                    [
                        A.Rotate(),
                    ],
                    p=0.3,
                ),
            ])
            # A.Transpose(),
            # A.Resize(0.9, 0.9),
            # A.Blur(blur_limit=3),
            # A.OpticalDistortion(),
            # A.GridDistortion(),
            # A.HueSaturationValue(),
        ],
        bbox_params=A.BboxParams(format="pascal_voc",
                                 min_visibility=0.5,
                                 label_fields=["class_labels"]),
    )

    rows = []
    random.seed(42)

    images_index = 1
    for name, group in source.groupby("filename"):
        row = group.iloc[0]
        print(row["filename"])
        image = cv2.imread(row["filename"])
        same = set()

        bboxes = []
        class_labels = []

        aleady_box = {}
        for _, vrow in group.iterrows():
            bboxes.append(
                [vrow["xmin"], vrow["ymin"], vrow["xmax"], vrow["ymax"]])
            class_labels.append(vrow["class"])
            aleady_box[vrow["class"]] = set()
        all_count = 0
        print(aleady_box)
        while int(all_count) < size:
            augmented = transform(
                image=image,
                bboxes=bboxes,
                class_labels=class_labels,
            )
            file_name = f"{images_index}.jpg"

            if len(augmented["bboxes"]) < 1:
                continue

            writer = Writer(file_name, augmented["image"].shape[1],
                            augmented["image"].shape[0])

            findbox = False
            for index, bbox in enumerate(augmented["bboxes"]):
                x_min, y_min, x_max, y_max = map(lambda v: int(v), bbox[:4])

                same.add(x_min)
                rows.append({
                    "filename": f"{images_path}/{file_name}",
                    "width": augmented["image"].shape[1],
                    "height": augmented["image"].shape[0],
                    "class": augmented["class_labels"][index],
                    "xmin": x_min,
                    "ymin": y_min,
                    "xmax": x_max,
                    "ymax": y_max,
                    "imageindex": str(images_index),
                })
                writer.addObject(augmented["class_labels"][index], x_min,
                                 y_min, x_max, y_max)
                if len(aleady_box[augmented["class_labels"][index]]) >= size:

                    continue
                aleady_box[augmented["class_labels"][index]].add(x_min)
                findbox = True
            if findbox:
                cv2.imwrite(f"{images_path}/{file_name}", augmented["image"])
                writer.save(f"{xml_path}/{images_index}.xml")
                images_index += 1
                print(aleady_box)

            all_count = sum([min(len(v), size)
                             for k, v in aleady_box.items()]) / len(aleady_box)
    df = pd.DataFrame(rows)
    return df
Ejemplo n.º 25
0
    def get_images(self, imgpath):
        # Pick random clone, crypt or fufi
        u01 = np.random.uniform()
        if u01 < self.cpfr_frac[0]:
            img, mask = self.all_svs_opened[imgpath].fetch_clone(
                prop_displ=0.45)
        elif u01 < np.sum(self.cpfr_frac[0:2]):
            img, mask = self.all_svs_opened[imgpath].fetch_partial(
                prop_displ=0.45)
        elif u01 < np.sum(self.cpfr_frac[0:3]):
            img, mask = self.all_svs_opened[imgpath].fetch_fufi(
                prop_displ=0.45)
        else:
            img, mask = self.all_svs_opened[imgpath].fetch_rndmtile()

        if self.dilate_masks == True:
            st_3 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
            n_dil = int(5 / self.um_per_pixel)  # if mpp is one or less than 1
            # dilate if desired
            for i in range(mask.shape[2]):
                mask[:, :, i] = cv2.morphologyEx(mask[:, :, i].copy(),
                                                 cv2.MORPH_DILATE,
                                                 st_3,
                                                 iterations=n_dil)

        if self.aug == True:
            composition = A.Compose(
                [
                    A.HorizontalFlip(),
                    A.VerticalFlip(),
                    A.Rotate(border_mode=cv2.BORDER_CONSTANT),
                    A.OneOf(
                        [
                            A.ElasticTransform(alpha=1000,
                                               sigma=30,
                                               alpha_affine=30,
                                               border_mode=cv2.BORDER_CONSTANT,
                                               p=1),
                            A.GridDistortion(border_mode=cv2.BORDER_CONSTANT,
                                             p=1),
                            #                  A.OpticalDistortion(distort_limit=0.5, shift_limit=0.5,
                            #                                      border_mode = cv2.BORDER_CONSTANT, p = 1),
                        ],
                        p=0.7),
                    A.CLAHE(p=0.3),
                    A.HueSaturationValue(hue_shift_limit=12,
                                         sat_shift_limit=12,
                                         val_shift_limit=12,
                                         p=0.6),
                    A.RandomBrightnessContrast(p=0.6),
                    A.Posterize(p=0.2, num_bits=4),
                    A.OneOf([
                        A.JpegCompression(p=1),
                        A.MedianBlur(p=1),
                        A.Blur(p=1),
                        A.GlassBlur(p=1, max_delta=2, sigma=0.4),
                        A.IAASharpen(p=1)
                    ],
                            p=0.3)
                ],
                p=1)
            transformed = composition(image=img, mask=mask)
            img, mask = transformed['image'], transformed['mask']
        mask_list = [mask[:, :, ii] for ii in range(mask.shape[2])]

        if self.stride_bool:
            mask_list = [cv2.pyrDown(mask_ii.copy()) for mask_ii in mask_list]

        mask_list = [
            cv2.threshold(mask_ii, 120, 255, cv2.THRESH_BINARY)[1]
            for mask_ii in mask_list
        ]

        ## convert to floating point space, normalize and mask non-used clones
        img = img.astype(np.float32) / 255
        mask_list = [mask_ii.astype(np.float32) / 255 for mask_ii in mask_list]
        if self.normalize:
            img = (img - self.norm_mean) / self.norm_std

        return img, np.stack(mask_list, axis=2)
Ejemplo n.º 26
0
 def __init__(self, p=0.2, limit=90):
     self.p = p
     self.limit = limit
     self.augmenter = albu.Rotate(p=self.p, limit=self.limit)
import warnings
warnings.filterwarnings("ignore")
from classification_models.resnet.models import ResNet34
import albumentations as A

MODEL_PATH = 'Christof/models/ResNet34/23/'
exp_suffix = '_base'

SIZE = 256

# Load dataset info
path_to_train = 'Christof/assets/train_rgb_256/'
data = pd.read_csv('Christof/assets/train.csv')

normal_aug = A.Compose([
    A.Rotate((0, 30), p=0.75),
    A.RandomRotate90(p=1),
    A.HorizontalFlip(p=0.5),
    A.RandomBrightness(0.05),
    A.RandomContrast(0.05),
    A.Normalize(mean=(0.08069, 0.05258, 0.05487),
                std=(0.13704, 0.10145, 0.15313),
                max_pixel_value=255.)
])

val_aug = A.Compose([
    A.HorizontalFlip(p=0.5),
    A.Normalize(mean=(0.08069, 0.05258, 0.05487),
                std=(0.13704, 0.10145, 0.15313),
                max_pixel_value=255.)
])
Ejemplo n.º 28
0
import pickle
with open('Russ/folds2.pkl', 'rb') as f:
    folds = pickle.load(f)

data['fold'] = data['Id'].apply(lambda x: folds[x])

def get_fold_ids(fold_id,data_set_info, shuff = True):
    fold_info = np.array([item['fold'] for item in data_set_info])
    val_ids = np.where(fold_info == fold_id)[0]
    train_ids = np.where(fold_info != fold_id)[0]
    if shuff:
        shuffle(val_ids)
        shuffle(train_ids)
    return train_ids, val_ids

normal_aug = A.Compose([A.OneOf([A.Rotate((-180,180)),
                                 A.Rotate((-180,180),border_mode=cv2.BORDER_CONSTANT)]),
                        A.Flip(p=0.75)
                        ])

train_dataset_info = []
for name, labels in zip(data['Id'], data['Target'].str.split(' ')):
    train_dataset_info.append({
        'path': os.path.join(path_to_train, name),
        'labels': np.array([int(label) for label in labels]),
        'fold':folds[name]})
train_dataset_info = np.array(train_dataset_info)

counts = np.zeros(28)
for item in train_dataset_info:
    for l in item['labels']:
Ejemplo n.º 29
0
from glob import glob

import imageio
import numpy as np
from torch.utils.data import Dataset
import pickle

from conet.config import get_cfg

train_aug = alb.Compose([
    # alb.RandomSizedCrop(min_max_height=(300, 500)),
    alb.RandomScale(),
    # alb.HorizontalFlip(),
    alb.VerticalFlip(),
    alb.RandomBrightness(limit=0.01),
    alb.Rotate(limit=30),
    # 224 548
    alb.PadIfNeeded(min_height=224, min_width=548, border_mode=cv2.BORDER_REFLECT101),
    alb.RandomCrop(224, 512),
    alb.Normalize(),
    # alb.pytorch.ToTensor(),
    ToTensorV2()
])

val_aug = alb.Compose([
    alb.PadIfNeeded(min_height=224, min_width=512, border_mode=cv2.BORDER_REFLECT101),
    alb.Normalize(),
    # alb.Resize(512, 512),
    alb.CenterCrop(224, 512),
    ToTensorV2(),
])
Ejemplo n.º 30
0
def create_submission(model, model_name, files_dir):
    my_transforms = {
        "base":
        A.Compose([
            A.Resize(height=240, width=240),
            A.Normalize(
                mean=[0.485, 0.456, 0.406],
                std=[0.229, 0.224, 0.225],
                max_pixel_value=255.0,
            ),
            ToTensorV2(),
        ]),
        "horizontal_flip":
        A.Compose([
            A.Resize(height=240, width=240),
            A.HorizontalFlip(p=1.0),
            A.Normalize(
                mean=[0.485, 0.456, 0.406],
                std=[0.229, 0.224, 0.225],
                max_pixel_value=255.0,
            ),
            ToTensorV2(),
        ]),
        "vertical_flip":
        A.Compose([
            A.Resize(height=240, width=240),
            A.VerticalFlip(p=1.0),
            A.Normalize(
                mean=[0.485, 0.456, 0.406],
                std=[0.229, 0.224, 0.225],
                max_pixel_value=255.0,
            ),
            ToTensorV2(),
        ]),
        "coloring":
        A.Compose([
            A.Resize(height=240, width=240),
            A.ColorJitter(p=1.0),
            A.Normalize(
                mean=[0.485, 0.456, 0.406],
                std=[0.229, 0.224, 0.225],
                max_pixel_value=255.0,
            ),
            ToTensorV2(),
        ]),
        "rotate":
        A.Compose([
            A.Resize(height=240, width=240),
            A.Rotate(p=1.0, limit=45),
            A.Normalize(
                mean=[0.485, 0.456, 0.406],
                std=[0.229, 0.224, 0.225],
                max_pixel_value=255.0,
            ),
            ToTensorV2(),
        ]),
        "shear":
        A.Compose([
            A.Resize(height=240, width=240),
            A.IAAAffine(p=1.0),
            A.Normalize(
                mean=[0.485, 0.456, 0.406],
                std=[0.229, 0.224, 0.225],
                max_pixel_value=255.0,
            ),
            ToTensorV2(),
        ]),
    }

    for t in [
            "base", "horizontal_flip", "vertical_flip", "coloring", "rotate",
            "shear"
    ]:
        predictions = []
        labels = []
        all_files = []
        test_dataset = MyDataset(root=files_dir, transform=my_transforms[t])
        test_loader = DataLoader(test_dataset,
                                 batch_size=32,
                                 num_workers=4,
                                 shuffle=False,
                                 pin_memory=True)
        model.eval()

        for idx, (x, y, filenames) in enumerate(tqdm(test_loader)):
            x = x.to(config.DEVICE)
            with torch.no_grad():
                outputs = (torch.clip(torch.sigmoid(model(x)), 0.005,
                                      0.995).squeeze(1).cpu().numpy())
                predictions.append(outputs)
                labels += y.numpy().tolist()
                all_files += filenames

        df = pd.DataFrame({
            "id":
            np.arange(
                1,
                (len(predictions) - 1) * predictions[0].shape[0] +
                predictions[-1].shape[0] + 1,
            ),
            "label":
            np.concatenate(predictions, axis=0),
        })
        df.to_csv(f"predictions_test/submission_{model_name}_{t}.csv",
                  index=False)

        model.train()
        print(
            f"Created submission file for model {model_name} and transform {t}"
        )