def setup(self, stage=None):
        self.train_transform = A.Compose(
            [
                A.Resize(width=416, height=416),
                A.ShiftScaleRotate(shift_limit=0.2, scale_limit=0.2, rotate_limit=0),
                ToTensor(),
            ],
            bbox_params=A.BboxParams(format="yolo", label_fields=["class_labels"]),
        )
        self.test_transform = A.Compose(
            [A.Resize(width=416, height=416), ToTensor()],
            bbox_params=A.BboxParams(format="yolo", label_fields=["class_labels"]),
        )
        if stage == "fit" or stage is None:
            train_files = [BASE_PATH / "train2017.txt"]
            val_files = [BASE_PATH / "val2017.txt"]
            # self.train_dataset = COCODataset(train_files, "train2017", transform=self.test_transform)
            self.train_dataset = COCODataset(
                val_files, "val2017", transform=self.test_transform
            )
            self.val_dataset = COCODataset(
                val_files, "val2017", transform=self.test_transform
            )

        if stage == "test":
            test_files = [BASE_PATH / "val2017.txt"]
            self.test_dataset = COCODataset(
                test_files, "val2017", transform=self.test_transform
            )
Ejemplo n.º 2
0
def get_augmentations(p=0.5, image_size=224):
    imagenet_stats = {
        "mean": [0.485, 0.456, 0.406],
        "std": [0.229, 0.224, 0.225]
    }
    train_tfms = A.Compose([
        # A.Resize(image_size, image_size),
        A.RandomResizedCrop(image_size, image_size),
        A.ShiftScaleRotate(shift_limit=0.15,
                           scale_limit=0.4,
                           rotate_limit=45,
                           p=p),
        A.Cutout(p=p),
        A.RandomRotate90(p=p),
        A.Flip(p=p),
        A.OneOf(
            [
                A.RandomBrightnessContrast(
                    brightness_limit=0.2,
                    contrast_limit=0.2,
                ),
                A.HueSaturationValue(hue_shift_limit=20,
                                     sat_shift_limit=50,
                                     val_shift_limit=50),
            ],
            p=p,
        ),
        A.OneOf(
            [
                A.IAAAdditiveGaussianNoise(),
                A.GaussNoise(),
            ],
            p=p,
        ),
        A.CoarseDropout(max_holes=10, p=p),
        A.OneOf(
            [
                A.MotionBlur(p=0.2),
                A.MedianBlur(blur_limit=3, p=0.1),
                A.Blur(blur_limit=3, p=0.1),
            ],
            p=p,
        ),
        A.OneOf(
            [
                A.OpticalDistortion(p=0.3),
                A.GridDistortion(p=0.1),
                A.IAAPiecewiseAffine(p=0.3),
            ],
            p=p,
        ),
        ToTensor(normalize=imagenet_stats),
    ])

    valid_tfms = A.Compose([
        A.CenterCrop(image_size, image_size),
        ToTensor(normalize=imagenet_stats)
    ])

    return train_tfms, valid_tfms
Ejemplo n.º 3
0
    def setup(self, stage=None):
        BASE_PATH = pathify(configs.BASE_DIR)
        self.train_transform = A.Compose(
            [
                A.Resize(width=448, height=448),
                A.ShiftScaleRotate(
                    shift_limit=0.2, scale_limit=0.2, rotate_limit=0),
                ToTensor(),
            ],
            bbox_params=A.BboxParams(format="yolo",
                                     label_fields=["class_labels"]),
        )
        self.test_transform = A.Compose(
            [A.Resize(width=448, height=448),
             ToTensor()],
            bbox_params=A.BboxParams(format="yolo",
                                     label_fields=["class_labels"]),
        )
        if stage == "fit" or stage is None:
            train_files = [
                BASE_PATH / "2007_train.txt", BASE_PATH / "2012_train.txt"
            ]
            val_files = [
                BASE_PATH / "2007_val.txt", BASE_PATH / "2012_val.txt"
            ]
            self.train_dataset = VOCDataset(train_files,
                                            transform=self.train_transform)
            self.val_dataset = VOCDataset(val_files,
                                          transform=self.test_transform)

        if stage == "test":
            test_files = [BASE_PATH / "2007_test.txt"]
            self.test_dataset = VOCDataset(test_files,
                                           transform=self.test_transform)
Ejemplo n.º 4
0
 def __init__(
     self,
     in_channels=3,
     architecture=None,
     split_size=7,
     num_boxes=2,
     num_classes=20,
 ):
     super(YoloV1Model, self).__init__()
     self.in_channels = in_channels
     self.darknet = self._create_conv(architecture)
     self.fcs = self._create_fcs(split_size, num_boxes, num_classes)
     self.train_transform = A.Compose(
         [
             A.Resize(width=448, height=448),
             A.ShiftScaleRotate(
                 shift_limit=0.2, scale_limit=0.2, rotate_limit=0),
             ToTensor(),
         ],
         bbox_params=A.BboxParams(format="yolo",
                                  label_fields=["class_labels"]),
     )
     self.test_transform = A.Compose(
         [A.Resize(width=448, height=448),
          ToTensor()],
         bbox_params=A.BboxParams(format="yolo",
                                  label_fields=["class_labels"]),
     )
Ejemplo n.º 5
0
def get_transform(mode='train', H=384, W=576):
    if mode == 'train':
        transform = A.Compose([
            A.Flip(p=.5),
            A.OneOf([
                A.CLAHE(clip_limit=2, p=.5),
                A.IAASharpen(p=.25),
            ],
                    p=.35),
            A.OneOf(
                [A.RandomBrightness(),
                 A.RandomContrast(),
                 A.RandomGamma()],
                p=.3),
            A.OneOf([
                A.ElasticTransform(
                    alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
                A.GridDistortion(),
                A.OpticalDistortion(distort_limit=2, shift_limit=0.5),
            ],
                    p=.3),
            A.ShiftScaleRotate(scale_limit=0.2,
                               rotate_limit=0,
                               shift_limit=0,
                               border_mode=0,
                               p=.5),
            A.CropNonEmptyMaskIfExists(height=int(1400 * .9),
                                       width=int(2100 * .9),
                                       p=.5),
            A.Resize(H, W),
            ToTensor()
        ])
    else:
        transform = A.Compose([A.Resize(352, 544), ToTensor()])
    return transform
Ejemplo n.º 6
0
def get_transforms(phase, width=1600, height=256):
    list_transforms = []
    if phase == "train":
        list_transforms.extend([
            albu.HorizontalFlip(),
            albu.OneOf([
                albu.RandomContrast(),
                albu.RandomGamma(),
                albu.RandomBrightness(),
            ],
                       p=0.3),
            albu.OneOf([
                albu.ElasticTransform(
                    alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
                albu.GridDistortion(),
                albu.OpticalDistortion(distort_limit=2, shift_limit=0.5),
            ],
                       p=0.3),
            albu.ShiftScaleRotate(),
        ])
    list_transforms.extend([
        albu.Resize(width, height, always_apply=True),
        albu.Normalize(mean=(0.485, 0.456, 0.406),
                       std=(0.229, 0.224, 0.225),
                       p=1),
        ToTensor(),
    ])
    list_trfms = albu.Compose(list_transforms)
    return list_trfms
Ejemplo n.º 7
0
    def get_input_pair(self, image_info):
        dataset_path = self.dataset_path
        img_path = os.path.join(
            dataset_path, image_info["dataset_folder"], self.images_folder,
            image_info["name"] + '_' + image_info["position"] + '.' +
            self.image_type)

        img = Image.open(img_path)

        img_array = np.array(img)

        augm = Compose([
            RandomCrop(224, 224),
            RandomRotate90(),
            Flip(),
            OneOf([RGBShift(), CLAHE(clip_limit=2)], p=0.4),
            ToTensor()
        ],
                       p=1)

        augmented = augm(image=img_array)

        augmented_img = augmented['image']

        return {"features": augmented_img, "targets": augmented_img}
Ejemplo n.º 8
0
def get_train_transform(img_size):

    train_transform = A.Compose([
        A.Resize(img_size, img_size),
        A.Transpose(p=0.5),
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.5),
        A.ShiftScaleRotate(p=0.5),
        A.HueSaturationValue(hue_shift_limit=0.2,
                             sat_shift_limit=0.2,
                             val_shift_limit=0.2,
                             p=0.5),
        A.RandomBrightnessContrast(brightness_limit=(-0.1, 0.1),
                                   contrast_limit=(-0.1, 0.1),
                                   p=0.5),
        A.Normalize(mean=[0.485, 0.456, 0.406],
                    std=[0.229, 0.224, 0.225],
                    max_pixel_value=255.0,
                    p=1.0),
        A.CoarseDropout(p=0.5),
        A.Cutout(p=0.5),
        ToTensor()
    ])

    return train_transform
Ejemplo n.º 9
0
def make_data(data_folder: str,
              mode: str,
              transform: dict,
              num_workers: int,
              batch_size: int,
              fold: str = None,
              folds_path: str = None,
              positive_ratio_range: Tuple[float, float] = (0.3, 0.8),
              epochs: int = 50):
    img_filenames, mask_filenames, non_emptiness = make_filenames(
        data_folder=data_folder, mode=mode, fold=fold, folds_path=folds_path)
    _transform = A.load(transform[mode], 'yaml')
    _transform.transforms = _transform.transforms + [ToTensor()]
    dataset = PneumothoraxDataset(img_filenames=img_filenames,
                                  mask_filenames=mask_filenames,
                                  transform=_transform,
                                  non_emptiness=non_emptiness)

    sampler = EmptySampler(data_source=dataset,
                           positive_ratio_range=positive_ratio_range,
                           epochs=epochs)
    loader = make_data_loader(dataset=dataset,
                              sampler=sampler if mode == 'train' else None,
                              batch_size=batch_size,
                              num_workers=num_workers)
    return loader
    def get_input_pair(self, image_info):
        dataset_path = self.dataset_path
        img_path = os.path.join(dataset_path, image_info["dataset_folder"], self.images_folder,
                                image_info["name"] + '_' + image_info["position"] + '.' + self.image_type)
        mask_path = os.path.join(dataset_path, image_info["dataset_folder"], self.masks_folder,
                                 image_info["name"] + '_' + image_info["position"] + '.' + self.mask_type)

        img = Image.open(img_path)
        mask = Image.open(mask_path)

        img_array = np.array(img)
        mask_array = np.array(mask).astype(np.float32)

        aug = Compose([
            RandomCrop(224, 224),
            RandomRotate90(),
            Flip(),
            OneOf([
                RGBShift(),
                CLAHE(clip_limit=2)
            ], p=0.4),
            ToTensor()
        ], p=1)

        augmented = aug(image=img_array, mask=mask_array)

        augmented_img = augmented['image']
        augmented_mask = augmented['mask']

        augmented_mask = augmented_mask.squeeze().long()

        return {"features": augmented_img, "targets": augmented_mask}
Ejemplo n.º 11
0
    def __init__(self,
                 detect_path='weights/best-checkpoint.bin',
                 model_arc='weights/model_eff_arc.json',
                 weights='weights/model_best_acc.h5'):
        #clf
        super().__init__()
        with open(model_arc, 'r') as f:
            self.model = tf.keras.models.model_from_json(f.read())
        self.model.load_weights(weights)
        self.mapper = [
            'TH', 'ACB', 'Acecook', 'Addidas', 'Agribank', 'Bidv', 'Big C',
            'Cai Lan', 'Chinsu', 'Colgate', 'FPT', 'Habeco', 'Hai Ha',
            'Jollibee', 'KFC', 'Kinh Do', 'Lotte mart', 'Mbbank new',
            'Mbbank old', 'Neptune', 'Nike', 'Pepsi', 'Petrolimex',
            'Phuc Long', 'Samsung', 'SHB', 'Techcombank', 'The Coffe House',
            'The gioi di dong', 'TPbank', 'Vietcombank', 'Vietinbank',
            'Viettel', 'Vinamilk', 'Vinfast', 'Vinmart', 'Vifon', 'Vnpt',
            'Vpbank'
        ]
        #detect
        self.transform = A.Compose([
            A.Resize(height=512, width=512, p=1.0),
            ToTensor(),
        ])

        self.load_detect(detect_path)
Ejemplo n.º 12
0
 def get_train_transform(self):
     transforms = [] if self.transforms is None else self.transforms
     return A.Compose(transforms + [ToTensor()],
                      bbox_params={
                          'format': 'pascal_voc',
                          'label_fields': ['labels']
                      })
Ejemplo n.º 13
0
 def __init__(self,phase='train',datalabel='', resize=(320,320),imgs_per_video=30,min_frames=0,\
 normalize=dict(mean=[0.5,0.5,0.5],std=[0.5,0.5,0.5]),frame_interval=10,max_frames=300,augment='augment0'):
     assert phase in ['train', 'val', 'test']
     self.datalabel = datalabel
     self.phase = phase
     self.imgs_per_video = imgs_per_video
     self.frame_interval = frame_interval
     self.num_classes = 2
     self.epoch = 0
     self.max_frames = max_frames
     if min_frames:
         self.min_frames = min_frames
     else:
         self.min_frames = max_frames * 0.3
     self.dataset = []
     self.aug = augmentations[augment]
     resize_ = (int(resize[0] / 0.8), int(resize[1] / 0.8))
     self.resize = resize
     #Resize(*resize_,interpolation=cv2.INTER_CUBIC),
     self.trans = Compose(
         [CenterCrop(*resize),
          ToTensor(normalize=normalize)])
     ###############
     # doing resize and center crop in trans
     if type(datalabel) != str:
         self.dataset = datalabel
         return
     if 'ff-5' in self.datalabel:
         for i, j in enumerate([
                 'Origin', 'Deepfakes', 'NeuralTextures', 'FaceSwap',
                 'Face2Face'
         ]):
             temp = FF_dataset(j, self.datalabel.split('-')[2], phase)
             temp = [[k[0], i] for k in temp]
             self.dataset += temp
     elif 'ff-all' in self.datalabel:
         for i in [
                 'Origin', 'Deepfakes', 'NeuralTextures', 'FaceSwap',
                 'Face2Face'
         ]:
             self.dataset += FF_dataset(i,
                                        self.datalabel.split('-')[2], phase)
         if phase != 'test':
             self.dataset = make_balance(self.dataset)
     elif 'ff' in self.datalabel:
         self.dataset += FF_dataset(
             self.datalabel.split('-')[1],
             self.datalabel.split('-')[2], phase) + FF_dataset(
                 "Origin",
                 self.datalabel.split('-')[2], phase)
     elif 'celeb' in self.datalabel:
         self.dataset = Celeb_test
     elif 'deeper' in self.datalabel:
         self.dataset = deeperforensics_dataset(phase) + FF_dataset(
             'Origin',
             self.datalabel.split('-')[1], phase)
     elif 'dfdc' in self.datalabel:
         self.dataset = dfdc_dataset(phase)
     else:
         raise (Exception('no such datset'))
Ejemplo n.º 14
0
def get_transforms(img_size):

    train_transform = A.Compose([
        A.Resize(img_size, img_size),
        A.HorizontalFlip(),
        A.VerticalFlip(),
        A.ShiftScaleRotate(),
        A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ToTensor()
    ])
    test_transform = A.Compose([
        A.Resize(img_size, img_size),
        A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ToTensor()
    ])

    return train_transform, test_transform
Ejemplo n.º 15
0
def get_validation_tfms(dataset_normalize_stats):
	size = getModelInputSize()
	return	A.Compose(
		[
			A.Resize(size[0], size[1]),
			ToTensor(dataset_normalize_stats),
		])
#--------------------------------------
Ejemplo n.º 16
0
 def __init__(self, detect_path):
     super().__init__()
     #detect
     self.transform = A.Compose([
         A.Resize(height=384, width=384, p=1.0),
         ToTensor(),
     ])
     self.load_detect(detect_path)
 def __init__(self, filelist, train_transforms=None, blur_mask=False):
     self.blur_mask = blur_mask
     self.filelist = filelist
     transform_list = [albu.Normalize(), ToTensor()]
     if train_transforms:
         #train_transforms = [albu.VerticalFlip(), albu.HorizontalFlip(), albu.ShiftScaleRotate()]
         transform_list = train_transforms + transform_list
     self.augs = albu.Compose(transform_list)
Ejemplo n.º 18
0
    def __init__(self, folder, img_size=512):
        self.root = folder
        self.to_tensor = ToTensor()

        df = pd.read_csv('../dataset/train_folds_5.csv')
        self.image_name_list = df[df['exist_labels'] == 1]['fname'].to_list()
        self.img_size = img_size

        print("number of sample: ", self.__len__())
Ejemplo n.º 19
0
def get_transforms2(phase):
    list_transforms = []
    list_transforms.extend([
        HorizontalFlip(p=1),
        Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), p=1),
        ToTensor(),
    ])
    list_trfms = Compose(list_transforms)
    return list_trfms
Ejemplo n.º 20
0
def get_valid_transform(img_size):
    # constants from imagenet data
    test_transform = A.Compose([
        A.Resize(img_size, img_size),
        A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ToTensor()
    ])

    return test_transform
Ejemplo n.º 21
0
def test_torch_to_tensor_raises_runtime_error():
    with pytest.raises(RuntimeError) as exc_info:
        aug = ToTensor()  # noqa F841
    message = (
        "`ToTensor` is obsolete and it was removed from Albumentations. Please use `ToTensorV2` instead - "
        "https://albumentations.ai/docs/api_reference/pytorch/transforms/"
        "#albumentations.pytorch.transforms.ToTensorV2. "
        "\n\nIf you need `ToTensor` downgrade Albumentations to version 0.5.2."
    )
    assert str(exc_info.value) == message
Ejemplo n.º 22
0
    def get_input_pair(self, data_info_row):
        if len(self.channels) == 0:
            raise Exception('You have to specify at least one channel.')

        instance_name = '_'.join(
            [data_info_row['name'],
             str(data_info_row['position'])])
        image_path = get_filepath(self.dataset_path,
                                  data_info_row['name'],
                                  self.images_folder,
                                  instance_name,
                                  file_type=self.image_type)
        mask_path = get_filepath(self.dataset_path,
                                 data_info_row['name'],
                                 self.masks_folder,
                                 instance_name,
                                 file_type=self.mask_type)

        images_array = filter_by_channels(read_tensor(image_path),
                                          self.channels, self.neighbours)

        if images_array.ndim == 2:
            images_array = np.expand_dims(images_array, -1)

        masks_array = read_tensor(mask_path)

        aug = Compose([
            RandomRotate90(),
            Flip(),
            OneOf(
                [
                    RandomSizedCrop(min_max_height=(int(
                        self.image_size * 0.7), self.image_size),
                                    height=self.image_size,
                                    width=self.image_size),
                    RandomBrightnessContrast(brightness_limit=0.15,
                                             contrast_limit=0.15),
                    #MedianBlur(blur_limit=3, p=0.2),
                    MaskDropout(p=0.6),
                    ElasticTransform(alpha=15, sigma=5, alpha_affine=5),
                    GridDistortion(p=0.6)
                ],
                p=0.8),
            ToTensor()
        ])

        augmented = aug(image=images_array, mask=masks_array)
        augmented_images = augmented['image']
        augmented_masks = augmented['mask']
        if self.classification_head:
            masks_class = ((augmented_masks.sum() > 0) *
                           1).unsqueeze(-1).float()  #.type(torch.FloatTensor)
            return augmented_images, [augmented_masks, masks_class]
        else:
            return {'features': augmented_images, 'targets': augmented_masks}
Ejemplo n.º 23
0
    def get_input_pair(self, data_info_row):
        if len(self.channels) == 0:
            raise Exception('You have to specify at least one channel.')

        instance_name = '_'.join(
            [data_info_row['name'], data_info_row['position']])
        image_path = get_filepath(self.dataset_path,
                                  data_info_row['dataset_folder'],
                                  self.images_folder,
                                  instance_name,
                                  file_type=self.image_type)
        mask_path = get_filepath(self.dataset_path,
                                 data_info_row['dataset_folder'],
                                 self.masks_folder,
                                 instance_name,
                                 file_type=self.mask_type)

        images_array = filter_by_channels(read_tensor(image_path),
                                          self.channels)

        if images_array.ndim == 2:
            images_array = np.expand_dims(images_array, -1)

        masks_array = read_tensor(mask_path)

        if self.channels[0] == 'rgb':
            rgb_tensor = images_array[:, :, :3].astype(np.uint8)

            rgb_aug = Compose(
                [OneOf([RGBShift(), CLAHE(clip_limit=2)], p=0.4)], p=0.9)

            augmented_rgb = rgb_aug(image=rgb_tensor, mask=masks_array)
            images_array = np.concatenate(
                [augmented_rgb['image'], images_array[:, :, 3:]], axis=2)
            masks_array = augmented_rgb['mask']

        aug = Compose([
            RandomRotate90(),
            Flip(),
            OneOf([
                RandomSizedCrop(min_max_height=(int(
                    self.image_size * 0.7), self.image_size),
                                height=self.image_size,
                                width=self.image_size)
            ],
                  p=0.4),
            ToTensor()
        ])

        augmented = aug(image=images_array, mask=masks_array)
        augmented_images = augmented['image']
        augmented_masks = augmented['mask']

        return {'features': augmented_images, 'targets': augmented_masks}
Ejemplo n.º 24
0
def complex_preprocess(normalize = normalize):
    return A.Compose([A.Cutout(num_holes=8, max_h_size=3, max_w_size=3, p=0.3),
                      A.ShiftScaleRotate(shift_limit=(-0.05, 0.05), scale_limit=(0, .01), rotate_limit=45,
                                         border_mode=0),
                      A.HorizontalFlip(),
                      A.RandomGamma((40, 120)),
                      A.GaussNoise(var_limit=0.01),
                      A.RandomContrast((-0.2, 0.2)),
                      ToTensor(normalize=normalize)

                      ])
Ejemplo n.º 25
0
def get_augumentation(phase,
                      width=512,
                      height=512,
                      min_area=0.,
                      min_visibility=0.):
    list_transforms = []
    if phase == 'train':
        list_transforms.extend([
            albu.augmentations.transforms.LongestMaxSize(max_size=width,
                                                         always_apply=True),
            albu.PadIfNeeded(min_height=height,
                             min_width=width,
                             always_apply=True,
                             border_mode=0,
                             value=[0, 0, 0]),
            albu.augmentations.transforms.RandomResizedCrop(height=height,
                                                            width=width,
                                                            p=0.3),
            albu.augmentations.transforms.Flip(),
            albu.augmentations.transforms.Transpose(),
            albu.OneOf([
                albu.RandomBrightnessContrast(brightness_limit=0.5,
                                              contrast_limit=0.4),
                albu.RandomGamma(gamma_limit=(50, 150)),
                albu.NoOp()
            ]),
            albu.OneOf([
                albu.RGBShift(r_shift_limit=20,
                              b_shift_limit=15,
                              g_shift_limit=15),
                albu.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=5),
                albu.NoOp()
            ]),
            albu.CLAHE(p=0.8),
            albu.HorizontalFlip(p=0.5),
            albu.VerticalFlip(p=0.5),
        ])
    if (phase == 'test' or phase == 'valid'):
        list_transforms.extend([albu.Resize(height=height, width=width)])
    list_transforms.extend([
        albu.Normalize(mean=(0.485, 0.456, 0.406),
                       std=(0.229, 0.224, 0.225),
                       p=1),
        ToTensor()
    ])
    if (phase == 'test'):
        return albu.Compose(list_transforms)
    return albu.Compose(list_transforms,
                        bbox_params=albu.BboxParams(
                            format='pascal_voc',
                            min_area=min_area,
                            min_visibility=min_visibility,
                            label_fields=['category_id']))
Ejemplo n.º 26
0
def get_transforms(phase):
    list_transforms = []
    if phase == "train":
        list_transforms.extend([
            HorizontalFlip(p=0.5),  # only horizontal flip as of now
        ])
    list_transforms.extend([
        Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), p=1),
        ToTensor(),
    ])
    list_trfms = Compose(list_transforms)
    return list_trfms
Ejemplo n.º 27
0
    def __init__(self, data_folder, mode, transform=None, folder_index=None, folds_distr_path=None):
        self.transform = transform
        self.mode = mode

        self.train_image_path = os.path.join(data_folder, 'train')
        self.train_mask_path = os.path.join(data_folder, 'mask')
        self.test_image_path = os.path.join(data_folder, 'test')

        self.fold_index = None
        self.folds_distr_path = folds_distr_path
        self.set_mode(mode, folder_index)
        self.to_tensor = ToTensor()
Ejemplo n.º 28
0
def get_training_tfms(dataset_normalize_stats):
    size = getModelInputSize()
    return A.Compose([
        A.Resize(size[0], size[1]),
        A.RandomRotate90(p=0.5),
        A.Flip(p=0.5),
        A.ColorJitter(p=0.5),
        A.RandomGamma(p=0.5),
        A.RandomContrast(p=0.3),
        A.RandomBrightness(p=0.5),
        ToTensor(dataset_normalize_stats),
    ])
 def __init__(self):
     self.transforms = Compose([
         PadIfNeeded(40, 40),
         RandomCrop(32, 32),
         HorizontalFlip(p=.5),
         Cutout(8, 8),
         Normalize(mean=(0.4914, 0.4822, 0.4465),
                   std=(0.2023, 0.1994, 0.2010),
                   max_pixel_value=255.0,
                   always_apply=True,
                   p=1.0),
         ToTensor()
     ])
def load_references(content, style, h, w):
    C_orig = plt.imread(content)
    S_orig = plt.imread(style)

    transform = albumentations.Compose([
        albumentations.Resize(h, w, always_apply=True),
        albumentations.Normalize(always_apply=True),
        ToTensor(),
    ])

    C = transform(image=C_orig)["image"].unsqueeze(0)
    S = transform(image=S_orig)["image"].unsqueeze(0)

    return C, S