def load_data(traindir, valdir, **kwargs): """generate the train and val dataloader, you can change this for your specific task Args: traindir (str): train dataset dir valdir (str): validation dataset dir Returns: tuple: the train dataset and validation dataset """ train_transform = T_seg.Compose([ T_seg.RandomCrop(512), T_seg.RandomHorizontalFlip(), T_seg.RandomVerticalFlip(), T_seg.ToTensor(), T_seg.Normalize(), ]) val_transform = T_seg.Compose([ T_seg.ToTensor(), T_seg.Normalize(), ]) dataset_train = SegmentationDataset( traindir, extentions=kwargs['extensions'], transforms=train_transform, ) dataset_val = SegmentationDataset(valdir, extentions=kwargs['extensions'], transforms=val_transform) return dataset_train, dataset_val
def test_ToGray(fp): img = read_img(fp) mask = read_img(mask_file) result_img, result_mask = transforms_seg.Compose([transforms_seg.ToGray() ])(img, mask) assert result_img.dtype == img.dtype assert result_img.ndim == 2 result_img, result_mask = transforms_seg.Compose( [transforms_seg.ToGray(output_channels=5)])(img, mask) assert result_img.shape == (img.shape[0], img.shape[1], 5) assert result_img.dtype == img.dtype assert result_mask.dtype == mask.dtype assert np.all(np.unique(result_mask) == np.array([0, 1, 2, 3])) == True
def test_CenterCrop(fp): img = read_img(fp) mask = read_img(mask_file) result_img, result_mask = transforms_seg.Compose([ transforms_seg.CenterCrop(300), ])(img, mask) assert result_mask.shape[0:2] == (300,300) assert result_mask.dtype == mask.dtype result_img, result_mask = transforms_seg.Compose([ transforms_seg.CenterCrop((500,300)), ])(img, mask) assert result_mask.shape[0:2] == (500,300) assert result_mask.dtype == mask.dtype with pytest.raises(ValueError) as excinfo: transforms_seg.CenterCrop(1000)(img, mask) assert 'the output_size should' in str(excinfo.value)
def test_GaussianBlur(fp): img = read_img(fp) mask = read_img(mask_file) result_img, result_mask = transforms_seg.Compose( [transforms_seg.GaussianBlur(kernel_size=5)])(img, mask) assert result_img.shape == img.shape assert result_img.dtype == img.dtype assert result_mask.dtype == mask.dtype assert np.all(np.unique(result_mask) == np.array([0, 1, 2, 3])) == True
def test_RandomContrast(fp): img = read_img(fp) mask = read_img(mask_file) result_img, result_mask = transforms_seg.Compose( [transforms_seg.RandomContrast()])(img, mask) assert result_img.shape == img.shape assert result_img.dtype == img.dtype assert result_mask.dtype == mask.dtype assert np.all(np.unique(result_mask) == np.array([0, 1, 2, 3])) == True result_img, result_mask = transforms_seg.Compose( [transforms_seg.RandomContrast(max_factor=1.2)])(img, mask) assert result_img.shape == img.shape assert result_img.dtype == img.dtype if result_img.ndim == 2: assert abs(float(result_img[0, 0]) / float(img[0, 0])) <= 1.2 else: assert abs(float(result_img[0, 0, 0]) / float(img[0, 0, 0])) <= 1.2 assert result_mask.dtype == mask.dtype assert np.all(np.unique(result_mask) == np.array([0, 1, 2, 3])) == True
def test_RandomBrightness(fp): img = read_img(fp) mask = read_img(mask_file) result_img, result_mask = transforms_seg.Compose( [transforms_seg.RandomBrightness()])(img, mask) assert result_img.shape == img.shape assert result_img.dtype == img.dtype assert result_mask.dtype == mask.dtype assert np.all(np.unique(result_mask) == np.array([0, 1, 2, 3])) == True result_img, result_mask = transforms_seg.Compose( [transforms_seg.RandomBrightness(max_value=10)])(img, mask) assert result_img.shape == img.shape assert result_img.dtype == img.dtype if result_img.ndim == 2: assert abs(float(result_img[0, 0]) - float(img[0, 0])) <= 10 else: assert abs(float(result_img[0, 0, 0]) - float(img[0, 0, 0])) <= 10 assert result_mask.dtype == mask.dtype assert np.all(np.unique(result_mask) == np.array([0, 1, 2, 3])) == True
def test_RandomNoise(fp): img = read_img(fp) mask = read_img(mask_file) for item in ['gaussian', 'salt', 'pepper', 's&p']: result_img, result_mask = transforms_seg.Compose( [transforms_seg.RandomNoise(mode=item)])(img, mask) assert result_img.shape == img.shape assert result_img.dtype == img.dtype assert result_mask.dtype == mask.dtype assert np.all(np.unique(result_mask) == np.array([0, 1, 2, 3])) == True
def test_ToTensor(fp): img = read_img(fp) mask = read_img(mask_file) result_img, result_mask = transforms_seg.Compose([ transforms_seg.ToTensor() ])(img, mask) assert type(result_img) == torch.Tensor assert len(result_img.shape) == 3 assert result_img.shape[1:3] == img.shape[0:2] assert type(result_mask) == torch.Tensor assert torch.all(torch.unique(result_mask) == torch.tensor([0,1,2,3])) == True
def test_Resize(fp): img = read_img(fp) mask = read_img(mask_file) assert mask.shape == (650,500) result_img, result_mask = transforms_seg.Compose([ transforms_seg.Resize(300), transforms_seg.ToTensor(), ])(img, mask) assert result_mask.shape == torch.Size([300, 300]) assert type(result_mask) == torch.Tensor assert np.all(np.unique(result_mask) == np.array([0,1,2,3])) == True result_img, result_mask = transforms_seg.Compose([ transforms_seg.Resize(833), ])(img, mask) assert result_mask.shape[0:2] == (833, 833) assert result_mask.dtype == mask.dtype result_img, result_mask = transforms_seg.Compose([ transforms_seg.Resize((500,300)), ])(img, mask) assert result_mask.shape[0:2] == (500, 300) assert result_mask.dtype == mask.dtype
def test_Pad(fp): img = read_img(fp) mask = read_img(mask_file) # constant value result_img, result_mask = transforms_seg.Pad(10, fill=1)(img, mask) if result_mask.ndim == 2: assert result_mask[0, 0] == 0 else: assert result_mask[0, 0, 0] == 0 # reflect value result_img, result_mask = transforms_seg.Pad(20, padding_mode='reflect')(img, mask) assert result_mask.shape[0:2] == (mask.shape[0] + 40, mask.shape[1] + 40) assert result_mask[0, 0] == mask[20, 20] assert result_mask.dtype == mask.dtype # all padding mode methods for item in [ 'reflect', 'edge', 'linear_ramp', 'maximum', 'mean', 'median', 'minimum', 'symmetric', 'wrap' ]: # for item in ['edge']: result_img, result_mask = transforms_seg.Pad(10, padding_mode=item)(img, mask) assert result_mask.dtype == mask.dtype assert result_mask.shape[0:2] == (mask.shape[0] + 20, mask.shape[1] + 20) result_img, result_mask = transforms_seg.Pad((10, 20), padding_mode=item)(img, mask) assert result_mask.shape[0:2] == (mask.shape[0] + 40, mask.shape[1] + 20) assert result_mask.dtype == mask.dtype result_img, result_mask = transforms_seg.Pad((10, 20, 30, 40), padding_mode=item)(img, mask) assert result_mask.shape[0:2] == (mask.shape[0] + 60, mask.shape[1] + 40) assert result_mask.dtype == mask.dtype result_img, result_mask = transforms_seg.Compose( [transforms_seg.Pad(10, fill=1), transforms_seg.ToTensor()])(img, mask) assert type(result_mask) == torch.Tensor
def test_Normalize(fp): img = read_img(fp) mask = read_img(mask_file) channels = 1 if img.ndim==2 else img.shape[2] mean = [img.mean()] if channels==1 else np.array(img.mean(axis=(0, 1))).tolist() std = [img.std()] if channels==1 else np.array(img.std(axis=(0, 1))).tolist() result_img, result_mask = transforms_seg.Compose([ transforms_seg.ToTensor(), transforms_seg.Normalize(mean, std) ])(img, mask) assert type(result_img) == torch.Tensor assert len(result_img.shape) == 3 assert result_img.shape[1:3] == img.shape[0:2] assert type(result_mask) == torch.Tensor assert torch.all(torch.unique(result_mask) == torch.tensor([0,1,2,3])) == True
def test_RandomFlip(fp): img = read_img(fp) mask = read_img(mask_file) result_img, result_mask = transforms_seg.RandomFlip(p=0)(img, mask) assert result_mask.dtype == mask.dtype assert result_mask.shape[0:2] == mask.shape[0:2] if result_mask.ndim == 2: height, width = mask.shape assert result_mask[0, 0] == mask[0, 0] else: height, width, depth = mask.shape assert (result_mask[0, 0, :] == mask[0, 0, :]).any() == True # tensor result_img, result_mask = transforms_seg.Compose( [transforms_seg.RandomFlip(p=0.1), transforms_seg.ToTensor()])(img, mask) assert type(result_mask) == torch.Tensor assert result_mask.shape[0:2] == mask.shape[0:2]