ax = plt.subplot(gs[i]) plt.axis('off') ax.set_xticklabels([]) ax.set_yticklabels([]) ax.set_aspect('equal') plt.imshow(sample.reshape(samp_size), cmap='gray') return fig def SaveFig(fname, samples, fig_size, samp_size): fig = PlotFig(samples, fig_size, samp_size) plt.savefig(fname, bbox_inches='tight') plt.close(fig) if __name__ == "__main__": # Dataset MNISTtools.downloadMNIST(path='MNIST_data', unzip=True) x_train, y_train = MNISTtools.loadMNIST(dataset="training", path="MNIST_data") x_train = x_train.astype(np.float32) / 255. # Create NN Model nn = NeuralNetwork.NN(784,128,784,"sigmoid") # Training the Model out_folder = 'out/' import os if not os.path.exists(out_folder): os.makedirs(out_folder) loss_rec = [] batch_size = 16 for i in range(10001): # Sample Data Batch
#!/usr/bin/env python # coding: utf-8 # In[1]: import MNISTtools import numpy as np from matplotlib import pyplot as plt # In[10]: xtrain, ltrain = MNISTtools.load(dataset="training", path="../mnist") # In[11]: xtrain.shape # In[12]: xtrain.size # In[13]: MNISTtools.show(xtrain[:, 42]) ltrain[42] # In[14]: def normalize_MNIST_images(x): return ((x - 127.5) / 127.5).astype(np.float64)
import numpy as np import copy from matplotlib import pyplot import numpy.linalg as la import MNISTtools xtrain, ltrain = MNISTtools.load() print("Shape of xtrain:", xtrain.shape) #Q1 print("Shape of ltrain:", ltrain.shape) #Q1 print("Size of xtrain:", xtrain.size) #Q1 print("Size of ltrain:", ltrain.size) #Q1 print("Dimension of xtrain:", xtrain.ndim) #Q1 print("Dimension of ltrain:", ltrain.ndim) #Q1 print("Image of index 42") MNISTtools.show(xtrain[:, 42]) #Q2 print("Label of image of index 42:", ltrain[42]) #Q2 print("Maximum of xtrain:", xtrain.max()) #Q3 print("Minimum of xtrain:", xtrain.min()) #Q3 print("Type of xtrain:", type(xtrain)) #Q3 def normalize_MNIST_images(x): #Q4 x = x.astype(np.float32) x = x * (2 / 255) - 1 return x xtrain = normalize_MNIST_images(xtrain) def label2onehot(lbl): #Q5 d = np.zeros((lbl.max() + 1, lbl.size))
# Load modules import numpy as np import torch import matplotlib.pyplot as plt import MNISTtools import torch.nn as nn import torch.nn.functional as F """Let us first load and normalize MNIST testing and training data.""" def normalize_MNIST_images(x): return 2 * x.astype(np.float32) / 255. - 1 xtrain, ltrain = MNISTtools.load(dataset="training") xtrain = normalize_MNIST_images(xtrain) xtest, ltest = MNISTtools.load(dataset='testing') xtest = normalize_MNIST_images(xtest) """#### Questions 1 Torch expects that the input of a convolutional layer is stored in the following format $$ \texttt{Batch size} \times \texttt{Number of input channels} \times \texttt{Image height} \times \texttt{Image width} $$ The number of input channels in our case is 1 because MNIST is composed of grayscale images. It would have been 3 if the images were in RGB color. In deeper layers, the number of input channels will be the number of input feature maps. Reorganise the tensors _xtrain_ and _xtest_ accordingly. Hint:
import os import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchvision.utils as vutils import MNISTtools import numpy as np import models_mnist # Dataset MNISTtools.downloadMNIST(path='MNIST_data', unzip=True) MNISTtools.downloadMNIST(path='FMNIST_data', unzip=True, fashion=True) x1_train, y1_train = MNISTtools.loadMNIST(dataset="training", path="MNIST_data") x1_test, y1_test = MNISTtools.loadMNIST(dataset="testing", path="MNIST_data") x2_train, y2_train = MNISTtools.loadMNIST(dataset="training", path="FMNIST_data") x2_test, y2_test = MNISTtools.loadMNIST(dataset="testing", path="FMNIST_data") x1_train = x1_train.astype(np.float32) / 255. x2_train = x2_train.astype(np.float32) / 255. x1_test = x1_test.astype(np.float32) / 255. x2_test = x2_test.astype(np.float32) / 255. # Prepare Synchronized Dataset x1_class = [] x2_class = [] for i in range(10): x1_c = x1_train[y1_train == i] x2_c = x2_train[y2_train == i] x1_class.append(x1_c[:5000])
def OneHot(y): return np.eye(10, dtype=np.float32)[y] def Noise(x): mean = 0 stddev = 0.3 noise = np.random.normal(mean, stddev, x.shape) return x + noise if __name__ == "__main__": # Dataset MNISTtools.downloadMNIST(path='MNIST_data', unzip=True) y_train, _ = MNISTtools.loadMNIST(dataset="training", path="MNIST_data") y_test, _ = MNISTtools.loadMNIST(dataset="testing", path="MNIST_data") # Data Processing y_train = y_train.astype(np.float32) / 255. y_test = y_test.astype(np.float32) / 255. x_train = [] x_test = [] for y in y_train: x_train.append(Noise(y)) for y in y_test: x_test.append(Noise(y)) x_train = np.array(x_train) x_test = np.array(x_test)