def load_data(traindir, valdir, **kwargs): """generate the train and val dataloader, you can change this for your specific task Args: traindir (str): train dataset dir valdir (str): validation dataset dir Returns: tuple: the train dataset and validation dataset """ train_transform = T_seg.Compose([ T_seg.RandomCrop(512), T_seg.RandomHorizontalFlip(), T_seg.RandomVerticalFlip(), T_seg.ToTensor(), T_seg.Normalize(), ]) val_transform = T_seg.Compose([ T_seg.ToTensor(), T_seg.Normalize(), ]) dataset_train = SegmentationDataset( traindir, extentions=kwargs['extensions'], transforms=train_transform, ) dataset_val = SegmentationDataset(valdir, extentions=kwargs['extensions'], transforms=val_transform) return dataset_train, dataset_val
def test_ToTensor(fp): img = read_img(fp) mask = read_img(mask_file) result_img, result_mask = transforms_seg.Compose([ transforms_seg.ToTensor() ])(img, mask) assert type(result_img) == torch.Tensor assert len(result_img.shape) == 3 assert result_img.shape[1:3] == img.shape[0:2] assert type(result_mask) == torch.Tensor assert torch.all(torch.unique(result_mask) == torch.tensor([0,1,2,3])) == True
def test_Pad(fp): img = read_img(fp) mask = read_img(mask_file) # constant value result_img, result_mask = transforms_seg.Pad(10, fill=1)(img, mask) if result_mask.ndim == 2: assert result_mask[0, 0] == 0 else: assert result_mask[0, 0, 0] == 0 # reflect value result_img, result_mask = transforms_seg.Pad(20, padding_mode='reflect')(img, mask) assert result_mask.shape[0:2] == (mask.shape[0] + 40, mask.shape[1] + 40) assert result_mask[0, 0] == mask[20, 20] assert result_mask.dtype == mask.dtype # all padding mode methods for item in [ 'reflect', 'edge', 'linear_ramp', 'maximum', 'mean', 'median', 'minimum', 'symmetric', 'wrap' ]: # for item in ['edge']: result_img, result_mask = transforms_seg.Pad(10, padding_mode=item)(img, mask) assert result_mask.dtype == mask.dtype assert result_mask.shape[0:2] == (mask.shape[0] + 20, mask.shape[1] + 20) result_img, result_mask = transforms_seg.Pad((10, 20), padding_mode=item)(img, mask) assert result_mask.shape[0:2] == (mask.shape[0] + 40, mask.shape[1] + 20) assert result_mask.dtype == mask.dtype result_img, result_mask = transforms_seg.Pad((10, 20, 30, 40), padding_mode=item)(img, mask) assert result_mask.shape[0:2] == (mask.shape[0] + 60, mask.shape[1] + 40) assert result_mask.dtype == mask.dtype result_img, result_mask = transforms_seg.Compose( [transforms_seg.Pad(10, fill=1), transforms_seg.ToTensor()])(img, mask) assert type(result_mask) == torch.Tensor
def test_Normalize(fp): img = read_img(fp) mask = read_img(mask_file) channels = 1 if img.ndim==2 else img.shape[2] mean = [img.mean()] if channels==1 else np.array(img.mean(axis=(0, 1))).tolist() std = [img.std()] if channels==1 else np.array(img.std(axis=(0, 1))).tolist() result_img, result_mask = transforms_seg.Compose([ transforms_seg.ToTensor(), transforms_seg.Normalize(mean, std) ])(img, mask) assert type(result_img) == torch.Tensor assert len(result_img.shape) == 3 assert result_img.shape[1:3] == img.shape[0:2] assert type(result_mask) == torch.Tensor assert torch.all(torch.unique(result_mask) == torch.tensor([0,1,2,3])) == True
def test_RandomFlip(fp): img = read_img(fp) mask = read_img(mask_file) result_img, result_mask = transforms_seg.RandomFlip(p=0)(img, mask) assert result_mask.dtype == mask.dtype assert result_mask.shape[0:2] == mask.shape[0:2] if result_mask.ndim == 2: height, width = mask.shape assert result_mask[0, 0] == mask[0, 0] else: height, width, depth = mask.shape assert (result_mask[0, 0, :] == mask[0, 0, :]).any() == True # tensor result_img, result_mask = transforms_seg.Compose( [transforms_seg.RandomFlip(p=0.1), transforms_seg.ToTensor()])(img, mask) assert type(result_mask) == torch.Tensor assert result_mask.shape[0:2] == mask.shape[0:2]
def test_Resize(fp): img = read_img(fp) mask = read_img(mask_file) assert mask.shape == (650,500) result_img, result_mask = transforms_seg.Compose([ transforms_seg.Resize(300), transforms_seg.ToTensor(), ])(img, mask) assert result_mask.shape == torch.Size([300, 300]) assert type(result_mask) == torch.Tensor assert np.all(np.unique(result_mask) == np.array([0,1,2,3])) == True result_img, result_mask = transforms_seg.Compose([ transforms_seg.Resize(833), ])(img, mask) assert result_mask.shape[0:2] == (833, 833) assert result_mask.dtype == mask.dtype result_img, result_mask = transforms_seg.Compose([ transforms_seg.Resize((500,300)), ])(img, mask) assert result_mask.shape[0:2] == (500, 300) assert result_mask.dtype == mask.dtype