def apply_and_show_transforms(transforms): ids = np.random.randint(0, n, (n_imgs, 1)).tolist() for new_trafo in transforms: all_trafos = [ utils.to_channel_first, utils.normalize, utils.to_tensor, new_trafo ] trafo = partial(compose, transforms=all_trafos) dataset = utils.DatasetWithTransform(images, labels, transform=trafo) show_sample_images(dataset, n_imgs, ids)
(train_imgs, val_imgs, train_labels, val_labels) = sklearn.model_selection.train_test_split(images, labels, shuffle=True, test_size=.15, stratify=labels) assert len(train_imgs) == len(train_labels) assert len(val_imgs) == len(val_labels) assert len(train_imgs) + len(val_imgs) == n_imgs trafos = [utils.to_channel_first, utils.normalize, utils.to_tensor] trafos = partial(utils.compose, transforms=trafos) train_data = utils.DatasetWithTransform(train_imgs, train_labels, transform=trafos) val_data = utils.DatasetWithTransform(val_imgs, val_labels, transform=trafos) print("N Training: ", len(train_imgs)) print("N Val: ", len(val_imgs)) n_pixels = images[0].size n_classes = 10 model = LogisticRegressor(n_pixels, n_classes) model.to(device) train_batch_size = 4 train_loader = torch.utils.data.DataLoader(train_data, batch_size=train_batch_size, shuffle=True)
utils.to_channel_first, utils.normalize, random_flip, random_color_jitter, utils.to_tensor ] trafos = functools.partial(utils.compose, transforms=trafos) model = nn.Sequential(resnet18(num_classes=10), nn.LogSoftmax(dim=1)) model = model.to(device) # get training and validation data images, labels = utils.load_cifar('./cifar10/train') (train_images, train_labels, val_images, val_labels) = utils.make_cifar_train_val_split(images, labels) train_dataset = utils.DatasetWithTransform(train_images, train_labels, transform=trafos) val_dataset = utils.DatasetWithTransform( val_images, val_labels, transform=utils.get_default_cifar_transform()) train_loader = DataLoader( train_dataset, batch_size=4, shuffle=True, ) val_loader = DataLoader(val_dataset, batch_size=25) optimizer = Adam(model.parameters(), lr=1.e-3) # %tensorboard --logdir runs n_epochs = 5 utils.run_cifar_training(model,
# (note that alternatively we could also have accepted a list of transforms # in DatasetWithTransform) def compose(image, target, transforms): for trafo in transforms: image, target = trafo(image, target) return image, target # create the dataset with the transformations trafos = [utils.to_channel_first, utils.normalize, utils.to_tensor] trafo = partial( compose, transforms=trafos) # freezes compose with trafos as a function call dataset = utils.DatasetWithTransform(images, labels, transform=trafo) # function to show an image target pair returned from the dataset def show_image(ax, image, target, trafo_name): # need to go back to numpy array and WHC axis order image = image.numpy().transpose((1, 2, 0)) # find the label name label = categories[target.item()] ax.imshow(image) ax.set_title("{label} : {trafo_name}".format(label=label, trafo_name=trafo_name)) def show_sample_images(dataset, n_samples, ids): n = len(dataset)
import matplotlib.pyplot as plt import torch import utils from functools import partial from logistic_regressor_conv_filters import gaussian_kernel laplacian_filter = torch.Tensor([[0,1,0], [1,-4,1], [0,1,0]]) gaussian_filter = gaussian_kernel(dim=3) imgs, labels = utils.load_cifar(os.path.join('./cifar10', 'test')) trafos = [utils.to_channel_first, utils.normalize, utils.to_tensor] trafos = partial(utils.compose, transforms=trafos) data = utils.DatasetWithTransform(imgs, labels, transform=trafos) loader = torch.utils.data.DataLoader(data, batch_size=4, shuffle=False) conv_gaussian = torch.nn.Conv2d(in_channels=3, out_channels=6, kernel_size=3, bias=False) conv_gaussian.weight.data[:,:] = gaussian_filter conv_laplacian = torch.nn.Conv2d(in_channels=6, out_channels=9, kernel_size=3, bias=False) conv_laplacian.weight.data[:,:] = laplacian_filter conv_laplacian_alone = torch.nn.Conv2d(in_channels=3, out_channels=6, kernel_size=3, bias=False) conv_laplacian_alone.weight.data[:,:] = laplacian_filter conv_gaussian2 = torch.nn.Conv2d(in_channels=6, out_channels=9, kernel_size=3, bias=False) conv_gaussian2.weight.data[:,:] = gaussian_filter