def dataloader_show(dataloader, pic_number): data_iter = iter(dataloader) images, labels = next(data_iter) fig, axes = plt.subplots(figsize=(10, 4), ncols=pic_number) for ii in range(pic_number): ax = axes[ii] helper.imshow(images[ii], ax=ax, normalize=True)
def show_example(dataset): """ Randomly show examples of images before normalize """ m = dataset.__len__() index = random.randint(0,m) img, _ = dataset.__getitem__(index) imshow(img)
def load_image(image_name, show=False, gray=True, title=""): if gray: plt.gray() image_path = './images_in/' + image_name if image_name.find(".npy") != -1: img = np.load(image_path) elif image_name.find(".png") != -1: img = mpng.imread(image_path) else: raise Exception( "File name must include either .png or .npy file extension") if show: h.imshow(img, t=title) return img
def sanity_check(image_path): probs, labels = predict(image_path, model, args.top_k) ps = [x for x in probs.cpu().detach().numpy()[0]] npar = [x for x in labels.cpu().numpy()[0]] names = list() inv_mapping = {v: k for k, v in model.class_to_idx.items()} for i in npar: names.append(cat_to_name[str(inv_mapping[i])]) h.imshow(h.process_image(image_path), ax=plt.subplot(2, 1, 1)) plt.title(names[0]) plt.subplot(2, 1, 2) sb.barplot(y=names, x=ps, color=sb.color_palette()[0]) plt.show()
def H_inv(data, verbose=True, in_dB=True): # Given TF zeroes = [ h.exp_img(0.9, pi / 2), h.exp_img(0.9, -pi / 2), h.exp_img(0.95, pi / 8), h.exp_img(0.95, -pi / 8) ] poles = [0, -0.99, -0.99, 0.9] # 2x -0.99?? num = np.poly(zeroes) denum = np.poly(poles) # Inverse TF # Inverse poles/zeroes and num/denum to have inverse TF zeroes_inv = poles poles_inv = zeroes num_inv = denum denum_inv = num if verbose: # Verify for pole stability pole_stable = True for pole in poles_inv: if np.abs(pole) > 1: pole_stable = False break if pole_stable: print("Filter Stable") else: print("Filter Unstable") # Print zplane for TF and inverse TF # zplane(num, denum, t="H(z) zplane") zplane(num_inv, denum_inv, t="H(z)-1 zplane") # h.plot_filter(num, denum, t="H(z) (original) transfer function", in_dB=in_dB) # h.plot_filter(num_inv, denum_inv, t="H(z)-1 (inverse) transfer function", in_dB=in_dB) data_filtered = signal.lfilter(num_inv, denum_inv, data) h.imshow(data_filtered, t="After H(z)-1 filter") return data_filtered
def rotate90(data, testing=False): # Rotation matrix rot_mat = [ [0, -1], # [[cos(-90), -sin(-90)], [1, 0] ] # [ sin(-90), cos(-90)]] # Complete image doesn't have a third dimension like rotate testing image... if testing: x_size, y_size, z_size = data.shape else: x_size, y_size = data.shape x_half = int((x_size - 1) / 2) y_half = int((y_size - 1) / 2) if testing: data_rotated = np.zeros((y_size, x_size, z_size)) else: data_rotated = np.zeros((y_size, x_size)) for y in range(0, y_size): for x in range(0, x_size): # Compute coordinates for centered image x_centered = x - x_half y_centered = y - y_half # Rotate at origin new_centered_pos = np.matmul(rot_mat, np.array([x_centered, y_centered])) # Translate back to position new_x_ind = new_centered_pos[0] + x_half + 1 new_y_ind = new_centered_pos[1] + y_half data_rotated[new_y_ind][new_x_ind] = data[y][x] h.imshow(data_rotated, t="After 90 degree rotation") return data_rotated
def compress_image(data, compress=True, compression_value=0.5, passing_matrix=None, verbose=False): if compress: # Find covariance matrix and then eigenvalues and eigenvectors cov_matrix = np.cov(data, rowvar=True) eigenvalues, eigenvectors = np.linalg.eig(cov_matrix) passing_matrix = np.transpose(eigenvectors) else: # Since original base is orthogonal, inverse = transpose passing_matrix = np.transpose(passing_matrix) zero_appending_matrix = np.zeros( (int(passing_matrix.shape[0] - data.shape[0]), data.shape[1])) data = np.append(data, zero_appending_matrix, axis=0) # Find 0 ratio that was used for compressing image compression_value = float(zero_appending_matrix.shape[0] / passing_matrix.shape[0]) data_compressed = np.matmul(passing_matrix, data) # Only send values of the matrix that do not have zeros if compress: new_compressed_image = data_compressed[0:int((1 - compression_value) * data_compressed.shape[0])] data_compressed = new_compressed_image if verbose: if compress: name = "Compressed image with %.1f compression ratio" % compression_value h.imshow(data_compressed, t=name) else: name = "Compressed image with %.1f compression ratio" % compression_value h.imshow(data_compressed, t=name) return data_compressed, passing_matrix
train_transforms = transforms.Compose([ transforms.RandomRotation(30), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) ]) test_transforms = transforms.Compose([ transforms.Resize(255), transforms.CenterCrop(224), transforms.ToTensor() ]) # Pass transforms in here, then run the next cell to see how the transforms look train_data = datasets.ImageFolder(data_dir + '/train', transform=train_transforms) test_data = datasets.ImageFolder(data_dir + '/test', transform=test_transforms) trainloader = torch.utils.data.DataLoader(train_data, batch_size=32) testloader = torch.utils.data.DataLoader(test_data, batch_size=32) # change this to the trainloader or testloader data_iter = iter(testloader) images, labels = next(data_iter) fig, axes = plt.subplots(figsize=(10, 4), ncols=4) for ii in range(4): ax = axes[ii] helper.imshow(images[ii], ax=ax, normalize=False)
shuffle=True) # download test dataset testdata = datasets.FashionMNIST( "~/.pytch/F_MNIST_data/", download=True, train=False, transform=transformer, ) testdownloader = tch.utils.data.DataLoader(testdata, batch_size=64, shuffle=True) # view the images img, label = next(iter(traindownloader)) helper.imshow(img[10, :]) print2(label[10]) plt.show() # define new classifier class class MyNeuroNetwork(nn.Module): _inputs = 784 _neuron1 = 128 _neuron2 = 64 _neuron3 = 32 _output = 10 def __init__(self): super().__init__()
import helper parser = argparse.ArgumentParser(description='Image Classifier') parser.add_argument('--inp_image',type = str, default = 'flowers/valid/1/image_06755.jpg', help = 'Path to dataset directory') parser.add_argument('--checkpoint',type=str,default='trained1.pth',help='Checkpoint') parser.add_argument('--gpu',type=str,default='cpu',help='GPU') parser.add_argument('--json_class',type=str,default='cat_to_name.json',help='JSON of key value') parser.add_argument('--top_k',type=int,default=5,help='Top k classes and probabilities') args=parser.parse_args() class_to_name= helper.load_class(args.json_class) model=helper.load(args.checkpoint) print(model) vals=torch.load(args.checkpoint) image = helper.process_image(args.inp_image) helper.imshow(image) probs, classes = helper.predict(args.inp_image, model, args.top_k, args.gpu) print(probs) print(classes) helper.display_image(args.inp_image, class_to_name, classes,probs)
# TODO: Using the image datasets and the trainforms, define the dataloaders trainloader = DataLoader(image_datasets_train, batch_size=64, shuffle=True) validloader = DataLoader(image_datasets_valid, batch_size=64, shuffle=True) testloader = DataLoader(image_datasets_test, batch_size=32, shuffle=False) # In[4]: data_iter = iter(testloader) images, labels = next(data_iter) fig, axes = plt.subplots(figsize=(10, 4), ncols=4) for ii in range(4): ax = axes[ii] helper.imshow(images[ii], ax=ax) # ### Label mapping # # You'll also need to load in a mapping from category label to category name. You can find this in the file `cat_to_name.json`. It's a JSON object which you can read in with the [`json` module](https://docs.python.org/2/library/json.html). This will give you a dictionary mapping the integer encoded categories to the actual names of the flowers. # In[5]: with open('cat_to_name.json', 'r') as f: cat_to_name = json.load(f) # # Building and training the classifier #
def denoise(data, trans_bi=False, by_hand=False, verbose=True, show_plot=True): fd_pass = 500 fd_stop = 750 fe = 1600 w = 1 / fe wd_pass = fd_pass wd_stop = fd_stop g_pass = 0.5 g_stop = 40 if trans_bi: if not by_hand: # "Gauchissement" wa_pass = h.gauchissement(fd_pass, fe) if verbose: print(wa_pass) # Write H(s) -> H(z) function z = sp.Symbol('z') s = 2 * fe * (z - 1) / (z + 1) H = 1 / ((s / wa_pass)**2 + np.sqrt(2) * (s / wa_pass) + 1) H = sp.simplify(H) if verbose: print(H) # Seperate num and denum into fractions num, denum = sp.fraction(H) # Put them in polynomial form num = sp.poly(num) denum = sp.poly(denum) # Extract all coefficients and write it in np.array form k = 1 / 2.3914 num = np.float64(np.array(num.all_coeffs())) * k denum = np.float64(np.array(denum.all_coeffs())) * k if verbose: print("Num and Denum: " + str(num) + ", " + str(denum)) # Extract zeros and poles by finding roots of num and den zeros = np.roots(num) poles = np.roots(denum) if verbose: print("Zeros and poles: " + str(zeros) + ", " + str(poles)) if verbose: zplane(num, denum, t="zPlane 2nd order butterworth bilinéaire filter") h.plot_filter(num, denum, t="2nd order butterworth bilinéaire filter", in_dB=True, in_freq=True, fe=fe) else: # Done by hand zeros = [-1, -1] poles = [np.complex(-0.2314, 0.3951), np.complex(-0.2314, -0.3951)] k = 1 / 2.39 num = np.poly(zeros) * k num = k * np.poly(zeros) denum = np.poly(poles) if verbose: print("Num and Denum: " + str(num, ) + ", " + str(denum)) zplane(num, denum, t="Butterworth order 2 (trans. bilinéaire) zplane") h.plot_filter(num, denum, t="Butterworth order 2 (trans. bilinéaire)", in_dB=True, in_freq=True, fe=fe) data_denoised = signal.lfilter(num, denum, data) if show_plot: h.imshow(data_denoised, t="After Butterworth order 2 trans. bilinéaire filter") else: order = np.zeros(4) wn = np.zeros(4) # Butterworth order[0], wn[0] = signal.buttord(wd_pass, wd_stop, g_pass, g_stop, False, fe) # Chebyshev type 1 order[1], wn[1] = signal.cheb1ord(wd_pass, wd_stop, g_pass, g_stop, False, fe) # Chebyshev type 2 order[2], wn[2] = signal.cheb2ord(wd_pass, wd_stop, g_pass, g_stop, False, fe) # Elliptic order[3], wn[3] = signal.ellipord(wd_pass, wd_stop, g_pass, g_stop, False, fe) lowest_order_index = np.argmin(order) if verbose: print(order) print(lowest_order_index) print(wn) if (lowest_order_index == 0): filter_name = "Butterworth filter order {order}".format( order=order[0]) num, denum = signal.butter(order[0], wn[0], 'lowpass', False, 'ba', fe) elif (lowest_order_index == 1): filter_name = "Cheby1 filter order {order}".format(order=order[1]) num, denum = signal.cheby1(order[1], g_pass, wn[1], 'lowpass', False, 'ba', fe) elif (lowest_order_index == 2): filter_name = "Cheby2 filter order {order}".format(order=order[2]) num, denum = signal.cheby2(order[2], g_stop, wn[2], 'lowpass', False, 'ba', fe) filter_name = "Cheby2 " + str(order[2]) + " order" else: filter_name = "Ellip filter order {order}".format(order=order[3]) num, denum = signal.ellip(order[3], g_pass, g_stop, wn[3], 'lowpass', False, 'ba', fe) if verbose: print(filter_name) filter_response_str = "Filter response " + filter_name zplane_str = "zPlane " + filter_name h.plot_filter(num, denum, t=filter_response_str, in_dB=True, in_freq=True, fe=fe) zplane(num, denum, t=zplane_str) data_denoised = signal.lfilter(num, denum, data) if show_plot: h.imshow(data_denoised, "After python function noise filter") return data_denoised
import torch from torchvision import datasets, transforms import helper transform = transforms.Compose([ transforms.Resize((255, 255)), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) ]) dataset = datasets.ImageFolder('Cat_dog_data/train', transform=transform) dataloader = torch.utils.data.DataLoader(dataset, batch_size=64, shuffle=True) images, labels = next(iter(dataloader)) helper.imshow(images[0], normalize=True) # Using transforms, we can augment the training data to extract more and diverse information # But we should not augment test data other than resizing, cropping, and such. Because the test set should represent # the images we would encounter in the real world data_dir = 'Cat_dog_data' train_transforms = transforms.Compose([ transforms.RandomRotation(30), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor() ]) test_transforms = transforms.Compose([ transforms.Resize(255),
img_out_decompressed, passing_matrix = functions.compress_image( img_out_compressed, compress=False, passing_matrix=passing_matrix, verbose=verbose) plt.show() # Necessary to see all plots and images else: verbose = True plt.show( ) # Not sure why, but there is always an empty graph showing at the beginning # Aberration targ_img = img_aberration if testing else img_complete h.imshow(targ_img, "Original") img_out_filtered = functions.H_inv(targ_img, verbose=verbose, in_dB=True) plt.show() # Rotation targ_img = img_rotate if testing else img_out_filtered h.imshow(targ_img, "Original") img_out_rotated = functions.rotate90( img_rotate if testing else img_out_filtered, testing) plt.show() # Denoise (1) bilinear transform targ_img = img_noise if testing else img_out_rotated img_out_denoised_transbi = functions.denoise(targ_img,
batch_size=64, shuffle=True) # Download and load the test data testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) # %% [markdown] # Here we can see one of the images. # %% image, label = next(iter(trainloader)) helper.imshow(image[0, :]) # %% [markdown] # ## Building the network # # Here you should define your network. As with MNIST, each image is 28x28 which is a total of 784 pixels, and there are 10 classes. You should include at least one hidden layer. We suggest you use ReLU activations for the layers and to return the logits or log-softmax from the forward pass. It's up to you how many layers you add and the size of those layers. # %% # %% # TODO: Define your network architecture here class Classifier(nn.Module): def __init__(self): super().__init__() self.fc1 = nn.Linear(784, 256)
data_dir = 'MarioMapImg' # train_transforms = transforms.Compose([ # transforms.RandomRotation(30), # transforms.RandomHorizontalFlip(), # transforms.ToTensor()]) # test_transforms = transforms.Compose([transforms.Resize(224, 3056), # transforms.ToTensor()]) # train_data = datasets.ImageFolder(data_dir + '/level1', # transform=train_transforms) # test_data = datasets.ImageFolder(data_dir + '/level2', # transform=test_transforms) # #Data Loading # trainloader = torch.utils.data.DataLoader(train_data, # batch_size=32) # testloader = torch.utils.data.DataLoader(test_data, batch_size=32) # data_iter=iter(testloader) # images, labels = next(data_iter) transform = transforms.Compose( [transforms.Resize((224, 3056)), transforms.ToTensor()]) dataset = datasets.ImageFolder(data_dir, transform=transform) dataloader = torch.utils.data.DataLoader(dataset, batch_size=32, shuffle=True) images, labels = next(iter(dataloader)) helper.imshow(images[0], normalize=False)
import helper # Define a transform to normalize the data transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) # Download and load the training data trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) """ see one image in the data set image, label = next(iter(trainloader)) helper.imshow(image[0,:]); """ #create the model model = nn.Sequential(nn.Linear(784, 128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 10), nn.LogSoftmax(dim=1)) criterion = nn.NLLLoss() images, labels = next(iter(trainloader)) images = images.view(images.shape[0], -1)