コード例 #1
0
ファイル: transformer.py プロジェクト: yida2311/HistoDOI
 def __init__(self, crop_size=800):
     self.master = albumentations.Compose([
         albumentations.RandomCrop(crop_size, crop_size),
         albumentations.RandomRotate90(p=0.5),
         albumentations.Transpose(p=0.5),
         albumentations.Flip(p=0.5),
         albumentations.OneOf([
             albumentations.RandomBrightness(),
             albumentations.RandomContrast(),
             albumentations.HueSaturationValue(),
         ],
                              p=0.5),
         albumentations.ElasticTransform(),
         albumentations.ShiftScaleRotate(shift_limit=0.1,
                                         scale_limit=0.02,
                                         rotate_limit=15,
                                         p=0.5),
         albumentations.Normalize(mean=[0.798, 0.621, 0.841],
                                  std=[0.125, 0.228, 0.089]),
     ])
     self.to_tensor = ToTensor()
コード例 #2
0
    def __init__(self, split):
        self.split = split
        self.aug = albumentations.Compose([
            albumentations.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
            AT.ToTensor()
        ])

        if self.split == 'train':
            self.aug = albumentations.Compose([
                albumentations.PadIfNeeded(36, 36),
                albumentations.RandomCrop(32, 32),
                albumentations.HorizontalFlip(),
                ##albumentations.RandomBrightness(),
                #albumentations.ShiftScaleRotate(rotate_limit=15, scale_limit=0.10),
                #albumentations.HueSaturationValue(),
                albumentations.Cutout(1, 8, 8, 0.5),
                #albumentations.GaussNoise(),
                #albumentations.ElasticTransform(),
                albumentations.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
                AT.ToTensor()
            ])
コード例 #3
0
ファイル: utils.py プロジェクト: yujisw/perceptual_computing
def get_training_augmentation():
    train_transform = [
        # albu.Resize(320, 640),
        # albu.HorizontalFlip(p=0.5),
        # albu.ShiftScaleRotate(scale_limit=0.5, rotate_limit=0, shift_limit=0.1, p=0.5, border_mode=0),
        # albu.GridDistortion(p=0.5),
        # albu.OpticalDistortion(p=0.5, distort_limit=2, shift_limit=0.5),

        # fist prize's augmentations below
        albu.HorizontalFlip(p=0.5),
        albu.VerticalFlip(p=0.5),
        albu.ShiftScaleRotate(scale_limit=0.20, rotate_limit=10, shift_limit=0.1, p=0.5, border_mode=cv2.BORDER_CONSTANT, value=0),
        albu.GridDistortion(p=0.5),
        albu.RandomCrop(height=800,width=1200,p=0.5),
        albu.Resize(320, 640),
        albu.ChannelShuffle(),
        albu.InvertImg(),
        albu.ToGray(),
        albu.Normalize(),
    ]
    return albu.Compose(train_transform)
コード例 #4
0
def get_album_transforms(norm_mean, norm_std):
    """get the train and test transform by albumentations"""
    train_transform = AlbumentationTransforms([
        A.HorizontalFlip(p=0.7),
        A.PadIfNeeded(min_height=70,
                      min_width=70,
                      border_mode=4,
                      always_apply=False,
                      p=1.0),
        A.RandomCrop(64, 64, always_apply=False, p=1.0),
        A.Rotate(limit=30,
                 interpolation=1,
                 border_mode=4,
                 always_apply=False,
                 p=0.5),
        A.Normalize(mean=norm_mean, std=norm_std),
        A.Cutout(num_holes=1, max_h_size=32, max_w_size=32, p=0.7)
    ])
    test_transform = AlbumentationTransforms(
        [A.Normalize(mean=norm_mean, std=norm_std)])
    return (train_transform, test_transform)
コード例 #5
0
 def __init__(self, image_list, label):
     self.image_list = image_list
     self.label = label
     self.aug = A.Compose([
         A.PadIfNeeded(min_height=40,
                       min_width=40,
                       always_apply=True,
                       border_mode=0,
                       value=[0, 0, 0]),
         A.RandomCrop(32, 32, p=1),
         #torch.FlipLR(),
         A.HorizontalFlip(p=0.5),
         #A.Rotate((-8.0, 8.0), p=0.5),
         A.Cutout(num_holes=8,
                  max_h_size=8,
                  max_w_size=8,
                  fill_value=[0.4914, 0.4822, 0.4465],
                  p=0.5),
         A.Normalize(mean=(0.4914, 0.4822, 0.4465),
                     std=(0.2023, 0.1994, 0.2010))
     ])
コード例 #6
0
ファイル: samplers.py プロジェクト: viridityzhu/RubikKeras
def ego_generator(path, size=128, is_random=True):
    random_number = 7
    folders = [f for f in os.listdir(path)]
    while True:
        if is_random:
            folder = random.choice(folders)
            frame_number = random.randint(0, 100)
        else:
            folder = folders[7]
            frame_number = random_number

        frame_name = 'frame_%s.jpg' % str(frame_number).zfill(4)

        image = scipy.misc.imread(os.path.join(path, folder, frame_name))
        image = cv2.resize(image, (640, 480))
        image = A.RandomCrop(height=size, width=size,
                             p=1.0)(image=image)['image']
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        # image = np.reshape(image, (size, size, 1))

        yield image
コード例 #7
0
    def __init__(self, paths, labels=None, size=None, random_crop=False):
        self.labels = labels if labels is not None else dict()
        self.size = size
        self.random_crop = random_crop

        assert not "file_path_" in self.labels
        self.labels["file_path_"] = paths
        self._length = len(paths)

        if self.size is not None and self.size > 0:
            self.rescaler = albumentations.SmallestMaxSize(max_size=self.size)
            if not self.random_crop:
                self.cropper = albumentations.CenterCrop(height=self.size,
                                                         width=self.size)
            else:
                self.cropper = albumentations.RandomCrop(height=self.size,
                                                         width=self.size)
            self.preprocessor = albumentations.Compose(
                [self.rescaler, self.cropper])
        else:
            self.preprocessor = lambda **kwargs: kwargs
コード例 #8
0
ファイル: dataloader.py プロジェクト: torewulf/asip
    def __init__(self, scenes, crop_size, downsample=False):
        # Make sure patches with AMSR NaNs are removed 
        patches_folder = '/data/users/twu/ds-2/patches'
        df = pd.read_csv(os.path.join(patches_folder, 'tiles_info.txt'), header=None, names=['path', 'AMSRNaN', 'CT'])
        AMSRNaNPatches = list(df[df['AMSRNaN'] == 1]['path'].values)

        self.patch_paths = [patch for patch in utils.get_patch_paths(scenes) if patch not in AMSRNaNPatches]
        
        self.crop_size = crop_size
        self.downsample = downsample
        
        # Defining a data augmentation pipeline
        self.transform = A.Compose([A.RandomCrop(width=crop_size, height=crop_size, always_apply=True),
                                    A.HorizontalFlip(p=0.5),
                                    A.VerticalFlip(p=0.5)],
                                    additional_targets = {'INC': 'image', 'CT': 'image', 'DST': 'image', 'AMSR': 'image'})
        
        # Initializing data scalers
        self.INC_scaler = pickle.load(open('scalers/INC_scaler.pkl', 'rb'))
        self.DST_scaler = pickle.load(open('scalers/DST_scaler.pkl', 'rb'))
        self.AMSR_scaler = pickle.load(open('scalers/AMSR_scaler.pkl', 'rb'))
コード例 #9
0
def get_data_transform(path):
    mean, stdev = [0.4802, 0.4481, 0.3975], [0.2302, 0.2265, 0.2262]
    input_size = 64
    train_albumentation_transform = A.Compose([
        A.PadIfNeeded(min_height=70,
                      min_width=70,
                      border_mode=cv2.BORDER_REPLICATE,
                      always_apply=True,
                      p=1.0),
        A.Cutout(num_holes=1,
                 max_h_size=64,
                 max_w_size=64,
                 always_apply=True,
                 p=0.7,
                 fill_value=[i * 255 for i in mean]),
        #                                    A.RGBShift (r_shift_limit=20, g_shift_limit=20, b_shift_limit=20, always_apply=False, p=0.5),
        #                                    A.ChannelShuffle(0.7) ,
        A.RandomCrop(height=64, width=64, p=1, always_apply=False),
        A.HorizontalFlip(p=0.7, always_apply=True),
        A.ShiftScaleRotate(shift_limit=0.0625,
                           scale_limit=0,
                           rotate_limit=45,
                           p=0.2),
        A.Normalize(mean=tuple(mean),
                    std=tuple(stdev),
                    max_pixel_value=255,
                    always_apply=True,
                    p=1.0),
        A.Resize(input_size, input_size),
        ToTensor()
    ])

    # Test Phase transformation
    test_transforms = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(tuple(mean), tuple(stdev))
    ])
    train_transforms = AlbumCompose(train_albumentation_transform)
    #     test_transforms = AlbumCompose(test_transforms)
    return train_transforms, test_transforms
コード例 #10
0
def get_training_augmentation(height=480, width=640):
    train_transform = [
        A.Resize(height=height, width=width),
        A.HorizontalFlip(p=0.5),

        A.ShiftScaleRotate(scale_limit=0.5, rotate_limit=0, shift_limit=0.1, p=1, border_mode=0),

        A.PadIfNeeded(min_height=height, min_width=width, always_apply=True, border_mode=0),
        A.RandomCrop(height=height, width=width, p=0.5),

        A.IAAAdditiveGaussianNoise(p=0.05),
        # A.IAAPerspective(p=0.5),

        # A.OneOf(
        #     [
        #         A.CLAHE(p=1),
        #         A.RandomBrightness(p=1),
        #         A.RandomGamma(p=1),
        #     ],
        #     p=0.9,
        # ),

        # A.OneOf(
        #     [
        #         A.IAASharpen(p=1),
        #         A.Blur(blur_limit=3, p=1),
        #         A.MotionBlur(blur_limit=3, p=1),
        #     ],
        #     p=0.9,
        # ),

        # A.OneOf(
        #     [
        #         A.RandomContrast(p=1),
        #         A.HueSaturationValue(p=1),
        #     ],
        #     p=0.9,
        # ),
    ]
    return A.Compose(train_transform)
コード例 #11
0
ファイル: base_augs.py プロジェクト: PiotrowskiD/kidneys-aug
def get_training_augmentation():
    train_transform = [
        albu.HorizontalFlip(p=0.5),
        albu.ShiftScaleRotate(scale_limit=0.5,
                              rotate_limit=0,
                              shift_limit=0.1,
                              p=1,
                              border_mode=0),
        albu.PadIfNeeded(min_height=512,
                         min_width=512,
                         always_apply=True,
                         border_mode=0),
        albu.RandomCrop(height=320, width=320, always_apply=True),
        albu.IAAAdditiveGaussianNoise(p=0.2),
        albu.IAAPerspective(p=0.5),
        albu.OneOf(
            [
                albu.CLAHE(p=1),
                albu.RandomBrightnessContrast(p=1),
                albu.RandomGamma(p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.IAASharpen(p=1),
                albu.Blur(blur_limit=3, p=1),
                albu.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),
        albu.OneOf(
            [
                albu.RandomContrast(p=1),
                albu.HueSaturationValue(p=1),
            ],
            p=0.9,
        ),
    ]
    return albu.Compose(train_transform)
コード例 #12
0
ファイル: aug.py プロジェクト: urevoleg/jupyter_notebooks
def get_transforms(size: int, scope: str = 'geometric', crop='random'):
    augs = {'strong': albu.Compose([albu.HorizontalFlip(),
                                    albu.ShiftScaleRotate(shift_limit=0.0, scale_limit=0.2, rotate_limit=20, p=.4),
                                    albu.ElasticTransform(),
                                    albu.OpticalDistortion(),
                                    albu.OneOf([
                                        albu.CLAHE(clip_limit=2),
                                        albu.IAASharpen(),
                                        albu.IAAEmboss(),
                                        albu.RandomBrightnessContrast(),
                                        albu.RandomGamma()
                                    ], p=0.5),
                                    albu.OneOf([
                                        albu.RGBShift(),
                                        albu.HueSaturationValue(),
                                    ], p=0.5),
                                    ]),
            'weak': albu.Compose([albu.HorizontalFlip(),
                                  ]),
            'geometric': albu.OneOf([albu.HorizontalFlip(always_apply=True),
                                     albu.ShiftScaleRotate(always_apply=True, scale_limit=.5, rotate_limit=30),
                                     albu.Transpose(always_apply=True),
                                     albu.OpticalDistortion(always_apply=True, distort_limit=0.1, shift_limit=0.1),
                                     albu.ElasticTransform(always_apply=True),
                                     ]),
            'empty': NoOp(),
            }

    aug_fn = augs[scope]
    crop_fn = {'random': albu.RandomCrop(size, size, always_apply=True),
               'center': albu.CenterCrop(size, size, always_apply=True)}[crop]
    pad = albu.PadIfNeeded(size, size)

    pipeline = albu.Compose([aug_fn, crop_fn, pad])

    def process(a):
        r = pipeline(image=a)
        return r['image']

    return process
コード例 #13
0
def get_training_augmentation():
    train_transform = [
        albu.HorizontalFlip(p=0.5),
        albu.VerticalFlip(p=0.5),
        albu.ShiftScaleRotate(shift_limit=0.0,
                              scale_limit=0.2,
                              rotate_limit=0.0,
                              p=0.5,
                              border_mode=0),
        # albu.PadIfNeeded(min_height=1000, min_width=1600, always_apply=True, border_mode=0),
        albu.RandomCrop(height=256, width=400, always_apply=True),
        # albu.IAAAdditiveGaussianNoise(p=0.2),
        # albu.IAAPerspective(p=0.5),
        #
        # albu.OneOf(
        #     [
        #         albu.CLAHE(),
        #         albu.RandomBrightnessContrast(),
        #         albu.HueSaturationValue(),
        #         albu.RandomGamma(),
        #     ],
        #     p=0.9,
        # ),
        # #
        # albu.OneOf(
        #     [
        #         albu.IAASharpen(p=1),
        #         albu.Blur(blur_limit=3, p=1),
        #         albu.MotionBlur(blur_limit=3, p=1),
        #     ],
        #     p=0.9,
        # ),
        #
        # albu.CLAHE(),
        # albu.RandomBrightnessContrast(),
        # albu.HueSaturationValue(),
        albu.Normalize(),
        albu.Lambda(image=to_tensor, mask=to_tensor),
    ]
    return albu.Compose(train_transform)
コード例 #14
0
    def train_augment(image, eigenvectors, eigenvalues, pixel_avg, stdev,
                      shift_scale, aug_list):
        """
        Takes an image from the training set, applies a random
        224 x 224 crop and a horizontal flip with 50% probability.
        Additionally it sums a random linear combination of the
        eigenvectors of all the pixels in the training set to all
        of the pixels in the training image. We also normalize the
        pixel values.

        Since we are not generating all of the possible transformation
        combinations (but only randomly returning a single one), do
        we need more epochs or do we sample more times each epoch?
        """

        # Note height comes first since the 0-axis is the y-axis! Made a mistake here before
        height, width = image.shape[0], image.shape[1]

        crop_width = min(224, width)
        crop_height = min(224, height)

        augmenter = albumentations.Compose(
            aug_list.get() +  # We add custom augmentations separately
            [
                # Note height comes first in crop transformations, made a mistake here before
                albumentations.RandomCrop(crop_height, crop_width)
            ])

        alphas = np.random.normal(scale=shift_scale, size=3)
        shift = np.zeros(3)

        for i in range(3):
            shift += alphas[i] * eigenvalues[i] * eigenvectors[i]

        # Shift was normalized but not standardized, hence need to divide it by standard deviation
        # Since standard deviation is less than 1 this will increase the shift and thus increase regularization...
        output_image = ((augmenter(image=image)["image"] - pixel_avg) / 255 +
                        shift) / stdev

        return ImagenetSequence.pad_image(output_image)
コード例 #15
0
ファイル: datapre.py プロジェクト: Anniequ/x_unet_res
def get_training_augmentation():
    train_transform = [

        albu.HorizontalFlip(p=0.5), #水平翻转

        albu.ShiftScaleRotate(scale_limit=0.5, rotate_limit=0, shift_limit=0.1, p=1, border_mode=0), # 平移缩放旋转

        albu.PadIfNeeded(min_height=224, min_width=224, always_apply=True, border_mode=0),  # 加padding
        albu.RandomCrop(height=224, width=224, always_apply=True), # 随机剪裁

        albu.IAAAdditiveGaussianNoise(p=0.2),  # Add gaussian noise to the input image.
        albu.IAAPerspective(p=0.5),  # Perform a random four point perspective transform of the input

        albu.OneOf(
            [
                albu.CLAHE(p=1), # 对比度受限情况下的自适应直方图均衡化算法
                albu.RandomBrightnessContrast(p=1), # Randomly change brightness and contrast
                albu.RandomGamma(p=1), # Gamma变换
            ],
            p=0.9,
        ),

        albu.OneOf(
            [
                albu.IAASharpen(p=1), # Sharpen the input image and overlays the result with the original image.
                albu.Blur(blur_limit=3, p=1),
                albu.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),

        albu.OneOf(
            [
                albu.RandomBrightnessContrast(p=1),
                albu.HueSaturationValue(p=1),  # Randomly change hue, saturation and value of the input image.
            ],
            p=0.9,
        ),
    ]
    return albu.Compose(train_transform)
コード例 #16
0
    def _setup_transform(self, cfg):
        # Albumentation example: https://albumentations.readthedocs.io/en/latest/examples.html
        self.img_mask_transform = A.Compose([
            A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.2, rotate_limit=175, p=0.8, border_mode=cv2.BORDER_CONSTANT),
            A.Flip(),
            A.Transpose(),
            A.OneOf([
                A.ElasticTransform(),
                A.OpticalDistortion(),
                A.GridDistortion(),
                A.IAAPiecewiseAffine(),
            ]),
            A.OneOf([
                    A.RandomCrop(height=self.size_crop,width=self.size_crop,p=0.5),  
                    A.CenterCrop(height=self.size_crop,width=self.size_crop,p=0.5)
            ]),            
            A.Cutout(num_holes=8, max_h_size=8, max_w_size=8, fill_value=0,p=0.5),
            ],p=0.8)

        self.img_pixel_transform = A.Compose([
            A.OneOf([
                A.IAAAdditiveGaussianNoise(),
                A.GaussNoise(),
            ], p=0.2),
            A.OneOf([
                A.MotionBlur(p=0.2),
                A.MedianBlur(blur_limit=3, p=0.1),
                A.Blur(blur_limit=3, p=0.1),
            ], p=0.2),
            A.OneOf([
                A.IAASharpen(),
                A.IAAEmboss(),
                # A.RandomBrightnessContrast(),            
            ], p=0.3),
            A.HueSaturationValue(hue_shift_limit=3,sat_shift_limit=20,val_shift_limit=3 ,p=0.2),
        ],p=0.5)
        # Torch transform
        self.resize_transform = transforms.Resize(cfg.MODEL.IMAGE_SIZE, Image.NEAREST)
        self.to_tensor_transform = transforms.ToTensor()
        self.normalize_transform = transforms.Normalize(mean=cfg.TRAIN.NORMALIZE_MEAN, std=cfg.TRAIN.NORMALIZE_STD)
コード例 #17
0
  def __init__(self):
      self.mean = [0.4890062, 0.47970363, 0.47680542]
      self.std = [0.264582, 0.258996, 0.25643882]

      self.train_transforms = A.Compose([
      
                                  A.PadIfNeeded(min_height=40, min_width=40, always_apply=True),
                                  A.RandomCrop(height=32, width=32, always_apply=True),
                                  A.HorizontalFlip(p=0.5),
                                  A.Cutout( num_holes=1, max_h_size=8, max_w_size=8, fill_value= self.mean, always_apply=False, p=0.5),
                                  
                                  A.Normalize( mean = self.mean, std= self.std,),                                  
                                  ToTensorV2()
                                  #ToTensor()
                                  ])
                                  
      self.test_transforms = A.Compose([

                                  A.Normalize( mean = self.mean, std= self.std,),                                  
                                  ToTensorV2()
                                  #ToTensor()
                                  ])
コード例 #18
0
ファイル: base.py プロジェクト: sankovalev/goznak
    def __init__(self, config):
        super().__init__()
        self.config = config
        aux_params=dict(
            pooling='avg',
            dropout=0.2,
            activation='sigmoid',
            classes=1)
        self.net = smp.Unet(encoder_name=self.config.training.model_encoder,
                            in_channels=1,
                            aux_params=aux_params) # классификационная голова

        self.metrics = {
            "regression": pl.metrics.MeanSquaredError(),
            "classification": pl.metrics.Accuracy(self.config.prediction.threshold)
        }

        # грузим лучшие веса
        state_dict = self._rename_layers(load(os.path.join(config.sources.ckpt_path,
                                                           f"{config.name}.ckpt"))['state_dict'])
        self.net.load_state_dict(state_dict)
        self.net.eval()

        # TODO: вообще тут надо делать нарезку изображений на тайлы, а потом ресайз, но нет
        augs_list = [
            albu.PadIfNeeded(480, 80),
            albu.RandomCrop(480, 80),
            albu.Resize(576, 96), # должно быть кратно 32
            ToTensorV2()
            # без нормализации
            ]
        self.transforms = albu.Compose(augs_list)
        self.deferset = AudioDataset(config.sources.val_meta,
                                     config.sources.data_path,
                                     self.transforms,
                                     False,
                                     prob=0.5,
                                     train_ratio=1.0)
        self.deferloader = DataLoader(self.deferset, batch_size=1, shuffle=False)
コード例 #19
0
def da_policy_DA8(img_size, crop_size):
    train_da = albumentations.Compose([
        albumentations.Resize(img_size, img_size),
        albumentations.RandomCrop(p=1, height=crop_size, width=crop_size),
        albumentations.CoarseDropout(p=0.5,
                                     min_holes=1,
                                     max_holes=2,
                                     min_width=32,
                                     min_height=32,
                                     max_width=84,
                                     max_height=84),
        albumentations.RandomBrightnessContrast(p=0.5,
                                                brightness_limit=0.2,
                                                contrast_limit=0.25),
        albumentations.Rotate(p=0.5, limit=17),
        albumentations.GridDistortion(p=0.5, distort_limit=0.2, num_steps=10)
    ])

    val_da = albumentations.Compose(
        [albumentations.Resize(crop_size, crop_size)])

    return train_da, val_da
コード例 #20
0
ファイル: custom_transforms.py プロジェクト: aierh/AutoML
    def __call__(self, sample):
        # img, mask = sample.image, sample.annotation
        # w, h, _ = img.shape
        # x1 = random.randint(0, w - self.crop_size)
        # y1 = random.randint(0, h - self.crop_size)
        # img = img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))
        # mask = mask.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))
        # sample.image = img
        # sample.annotation = mask
        img, annot = sample.image, sample.annotation

        aug = A.RandomCrop(self.crop_size, self.crop_size)

        masks = []
        if sample.masks_and_category is not None:
            for index in sample.masks_and_category:
                masks.append(index[0])

        image_crop = []
        bbox_crop = []
        if masks is not None:
            augmented = aug(image=img, bboxes=annot, masks=masks)
            mask_crop = augmented['masks']
            image_crop = augmented['image']
            bbox_crop = augmented['bboxes']

            for i, index in enumerate(sample.masks_and_category):
                index[0] = mask_crop[i]
        else:
            augmented = aug(image=img, bboxes=annot)
            image_crop = augmented['image']
            bbox_crop = augmented['bboxes']

        # the shape has to be at least (0,5)
        if len(bbox_crop) == 0:
            bbox_crop = np.zeros((0, 5))

        sample.image = image_crop
        sample.annotation = bbox_crop
コード例 #21
0
def get_transform(args, is_train):
    """
    It takes in the arguments and a boolean value, and returns a transform
    object.
    :param args: the global arguments
    :param is_train: if the transform is for training or evaluating
    :return: transform operations to be performed on the image
    """
    if is_train:
        transform = A.Compose([
            A.RandomCrop(*args.train_size),
            *get_list_of_ops(args.augmentations, A),
            A.Normalize(mean=args.mean, std=args.std),
            ToTensorV2()
        ])
    else:
        transform = A.Compose([
            A.Resize(*args.eval_size),
            A.Normalize(mean=args.mean, std=args.std),
            ToTensorV2()
        ])
    return transform
コード例 #22
0
def designValidCrop(img, mask, center_threshold=70):
    crop_img = np.zeros([256, 256, 3])
    crop_mask = np.zeros([256, 256, 4])
    for ch in range(4):
        temp_mask = (mask[:, :, 0] > 0).astype(np.uint8)
        if (np.sum(temp_mask) == 0):
            aug = albu.RandomCrop(256, 256, always_apply=True)
            augmented = aug(image=img[:, :, ch], mask=mask[:, :, ch])
            crop_img[:, :, ch] = augmented['image']
            crop_mask[:, :, ch] = augmented['mask']
        else:
            chosen_center = randomChooseCenter(temp_mask, center_threshold)
            ###### ====  do crop  ===== ######
            chosen_center = np.array(chosen_center).astype(np.int32)
            if chosen_center[1] <= 128:
                crop_mask[:, :, ch] = temp_mask[:, 0:256]
                crop_img[:, :, ch] = img
            elif chosen_center[1] >= (1600 - 128):
                crop_mask[:, :, ch] = temp_mask[:, 1600 - 256:1600]
            else:
                crop_mask[:, :, ch] = temp_mask[:, chosen_center[1] -
                                                128:chosen_center[1] + 128]
コード例 #23
0
def da_policy_DA6(img_size, crop_size):
    # additional_aug = [*
    #     zoom_crop(scale=(0.85, 1.15), do_rand=True),
    #     cutout(n_holes=(1, 2), length=(32, 84), p=.5),
    #     brightness(change=(0.33, 0.68), p=.5),
    #     contrast(scale=(0.7, 1.4), p=.5),
    # ]
    # return get_transforms(do_flip=False, max_warp=0.25, max_zoom=1.25, max_rotate=17, xtra_tfms=additional_aug)

    train_da = albumentations.Compose([
        albumentations.Resize(img_size, img_size),
        albumentations.RandomCrop(p=1, height=crop_size, width=crop_size),
        albumentations.RandomBrightnessContrast(p=0.5,
                                                brightness_limit=0.2,
                                                contrast_limit=0.25),
        albumentations.Rotate(p=0.5, limit=17)
    ])

    val_da = albumentations.Compose(
        [albumentations.Resize(crop_size, crop_size)])

    return train_da, val_da
コード例 #24
0
ファイル: training.py プロジェクト: zhouchengcom/Fall-Guys-AI
def main():
    image = cv2.imread("cuiyan.png")
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

    def visualize(image):
        plt.figure(figsize=(6, 6))
        plt.axis("off")
        plt.imshow(image)
        # plt.show()

    transform = A.Compose([
        A.RandomCrop(111, 222),
        A.OneOf([A.RGBShift(), A.HueSaturationValue()]),
    ])

    random.seed(42)
    transformed = transform(image=image)
    visualize(transformed["image"])

    A.save(transform, "./transform.json")
    A.save(transform, "./transform.yml", data_format="yaml")
    pprint.pprint(A.to_dict(transform))
コード例 #25
0
def get_training_albumentations(size=(256, 256),
                                pad=10,
                                re_prob=0.5,
                                with_keypoints=False,
                                ms_prob=0.5):
    h, w = size
    train_transform = [
        MultiScale(p=ms_prob),
        ResizeWithKp(h, w, interpolation=cv2.INTER_CUBIC),
        albu.PadIfNeeded(h + 2 * pad,
                         w + 2 * pad,
                         border_mode=cv2.BORDER_CONSTANT,
                         value=0),
        albu.RandomCrop(height=h, width=w, always_apply=True),
        AlbuRandomErasing(re_prob),
    ]
    if with_keypoints:
        return albu.Compose(train_transform,
                            keypoint_params=albu.KeypointParams(
                                format='xy', remove_invisible=False))
    else:
        return albu.Compose(train_transform)
コード例 #26
0
def resize_transforms(input_size=256):
    result = A.Compose([
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.5),
        A.RandomRotate90(p=1),
        A.OneOf([
            A.ShiftScaleRotate(scale_limit=0,
                               rotate_limit=(-45, 45),
                               shift_limit=(-0.1, 0.1),
                               interpolation=0,
                               border_mode=2,
                               p=0.5),
            A.ElasticTransform(alpha_affine=20, sigma=30, border_mode=2, p=0.5)
        ]),
        A.PadIfNeeded(min_height=input_size,
                      min_width=input_size,
                      always_apply=True,
                      border_mode=2),
        A.RandomCrop(input_size, input_size, always_apply=True),
    ])

    return result
コード例 #27
0
ファイル: pic.py プロジェクト: ink1/taming-transformers
    def __init__(self,
                 data_csv, data_root, segmentation_root,
                 size=None, random_crop=False, interpolation="bicubic",
                 ):
        self.n_labels = 182
        self.data_csv = data_csv
        self.data_root = data_root
        self.segmentation_root = segmentation_root
        with open(self.data_csv, "r") as f:
            self.image_paths = f.read().splitlines()
        self._length = len(self.image_paths)
        self.labels = {
            "relative_file_path_": [l for l in self.image_paths],
            "file_path_": [os.path.join(self.data_root, l)
                           for l in self.image_paths],
            "segmentation_path_": [os.path.join(self.segmentation_root, l.replace(".jpg", ".png"))
                                   for l in self.image_paths]
        }

        size = None if size is not None and size<=0 else size
        self.size = size
        if self.size is not None:
            self.interpolation = interpolation
            self.interpolation = {
                "nearest": cv2.INTER_NEAREST,
                "bilinear": cv2.INTER_LINEAR,
                "bicubic": cv2.INTER_CUBIC,
                "area": cv2.INTER_AREA,
                "lanczos": cv2.INTER_LANCZOS4}[self.interpolation]
            self.image_rescaler = albumentations.SmallestMaxSize(max_size=self.size,
                                                                 interpolation=self.interpolation)
            self.segmentation_rescaler = albumentations.SmallestMaxSize(max_size=self.size,
                                                                        interpolation=cv2.INTER_NEAREST)
            self.center_crop = not random_crop
            if self.center_crop:
                self.cropper = albumentations.CenterCrop(height=self.size, width=self.size)
            else:
                self.cropper = albumentations.RandomCrop(height=self.size, width=self.size)
            self.preprocessor = self.cropper
コード例 #28
0
def load_data(fold: int) -> Any:
    torch.multiprocessing.set_sharing_strategy('file_system')  # type: ignore
    cudnn.benchmark = True  # type: ignore

    full_df = pd.read_csv('../input/train.csv')
    print('full_df', full_df.shape)
    train_df, val_df = train_val_split(full_df, fold)
    print('train_df', train_df.shape)

    num_ttas = 1

    if num_ttas > 1:
        transform_test = albu.Compose([
            albu.PadIfNeeded(config.model.input_size, config.model.input_size),
            albu.RandomCrop(height=config.model.input_size,
                            width=config.model.input_size),
            # horizontal flip is done by the data loader
        ])
    else:
        transform_test = albu.Compose([
            albu.PadIfNeeded(config.model.input_size, config.model.input_size),
            albu.CenterCrop(height=config.model.input_size,
                            width=config.model.input_size),
        ])

    val_dataset = ImageDataset(val_df,
                               mode='val',
                               config=config,
                               num_ttas=num_ttas,
                               augmentor=transform_test)

    data_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=config.test.batch_size,
        shuffle=False,
        num_workers=config.general.num_workers,
        drop_last=True)

    return data_loader
コード例 #29
0
def get_training_augmentation():
    train_transform = [
        A.HorizontalFlip(p=0.5),
        A.ShiftScaleRotate(rotate_limit=0, shift_limit=0.1, p=1,
                           border_mode=0),
        A.PadIfNeeded(min_height=320,
                      min_width=320,
                      always_apply=False,
                      border_mode=0),
        A.RandomCrop(height=320, width=320, always_apply=True),
        A.IAAAdditiveGaussianNoise(p=0.2),
        A.IAAPerspective(p=0.5),
        A.OneOf(
            [
                A.CLAHE(p=1),
                A.RandomBrightness(p=1),
                A.RandomGamma(p=1),
            ],
            p=0.9,
        ),
        A.OneOf(
            [
                A.IAASharpen(p=1),
                A.Blur(blur_limit=3, p=1),
                A.MotionBlur(blur_limit=3, p=1),
            ],
            p=0.9,
        ),
        A.OneOf(
            [
                A.RandomContrast(p=1),
                A.HueSaturationValue(p=1),
            ],
            p=0.9,
        ),
        A.Lambda(mask=round_clip_0_1)
    ]
    return A.Compose(train_transform)
コード例 #30
0
ファイル: augmentation.py プロジェクト: katsura-jp/alcon23
def get_train_augmentation(height=336,
                           width=224,
                           scale_height=384,
                           scale_width=256):
    train_augmentation = albu.Compose([
        # albu.HorizontalFlip(),
        albu.OneOf(
            [albu.RandomBrightness(0.1, p=1),
             albu.RandomContrast(0.1, p=1)],
            p=0.9),
        albu.ShiftScaleRotate(shift_limit=0.1,
                              scale_limit=0.0,
                              rotate_limit=15,
                              p=0.3),
        albu.OneOf([
            albu.IAAAdditiveGaussianNoise(p=1.0),
            albu.GaussNoise(var_limit=(10., 50.), p=1),
        ],
                   p=0.7),
        albu.OneOf([
            albu.Compose([
                albu.Resize(scale_height, scale_width, p=1.0),
                albu.RandomCrop(height=height, width=width, p=1.0),
            ]),
            albu.Resize(height, width, p=1.0),
        ],
                   p=1.0),
        albu.MotionBlur(blur_limit=10, p=0.7),
        albu.Normalize(mean=[0.695, 0.658, 0.592],
                       std=[0.191, 0.185, 0.171],
                       max_pixel_value=255),
        albu.Cutout(num_holes=1,
                    max_h_size=height // 2,
                    max_w_size=width // 2,
                    p=0.5),
        ToTensor()
    ])
    return train_augmentation