def increasing_blur_data(nnet, Xset, Tset, var_range=(0.001, 0.05), num_steps=5, trials_per_step=5, directory='data/', name='data'): change = [] f = FloatProgress(min=0, max=(num_steps * trials_per_step)) display(f) for var_step in np.linspace(var_range[0], var_range[1], num_steps): filename = directory + name + '-' + str(var_step) + '.csv' accuracy = [] for trial in range(trials_per_step): Xcopy = add_image_blur(Xset, var_step) try: percent = ml.percent_correct(nnet.use(Xcopy)[0], Tset) except: percent = ml.percent_correct(ml.batched_use(nnet, Xcopy), Tset) accuracy.append(percent) f.value += 1 change.append(accuracy) pd.DataFrame(change).to_csv(filename, index=False) try: natural_per = ml.percent_correct(nnet.use(Xset)[0], Tset) except: natural_per = ml.percent_correct(ml.batched_use(nnet, Xset), Tset) filename = directory + name + '.metadata' with open(filename, 'w') as f: print(f'natural: ml.percent_correct(nnet.use(Xset)[0], Tset) = {natural_per}', file=f)
def run_increasing_noise(nnet, Xset, Tset, var_range=(0.001, 0.05), num_steps=5, trials_per_step=5): change = [] f = FloatProgress(min=0, max=(num_steps * trials_per_step)) display(f) for var_step in np.linspace(var_range[0], var_range[1], num_steps): accuracy = [] for trial in range(trials_per_step): Xcopy = add_image_noise(Xset, var_step) try: percent = ml.percent_correct(nnet.use(Xcopy)[0], Tset) except: percent = ml.percent_correct(ml.batched_use(nnet, Xcopy, 1000), Tset) accuracy.append(percent) f.value += 1 change.append(accuracy) change = np.array(change) x = np.linspace(var_range[0], var_range[1], num_steps) y = np.mean(change, axis=1) yerr = np.std(change, axis=1) return (x, y, yerr)
def change_in_pixels_data(nnet, Xset, Tset, end_pixel_val=10, trials_per_pixel=5, directory='data/', name='data'): perturbs = ['stuck', 'dead', 'hot'] f = FloatProgress(min=0, max=(end_pixel_val * trials_per_pixel * len(perturbs))) display(f) for i, perturb in enumerate(perturbs): filename = directory + name + '-' + perturb + '.csv' change = [] for pixels in range(end_pixel_val): accuracy = [] for trial in range(trials_per_pixel): Xcopy = change_pixel(Xset, pixels_to_change=pixels+1, pertrub=perturb) try: percent = ml.percent_correct(nnet.use(Xcopy)[0], Tset) except: percent = ml.percent_correct(ml.batched_use(nnet, Xcopy), Tset) accuracy.append(percent) f.value += 1 change.append(accuracy) pd.DataFrame(change).to_csv(filename, index=False) try: natural_per = ml.percent_correct(nnet.use(Xset)[0], Tset) except: natural_per = ml.percent_correct(ml.batched_use(nnet, Xset), Tset) filename = directory + name + '.metadata' with open(filename, 'w') as f: print(f'natural: ml.percent_correct(nnet.use(Xset)[0], Tset) = {natural_per}', file=f)
def test_increasing_noise(nnet, Xset, Tset, var_range=(0.001, 0.05), num_steps=5, trials_per_step=5, name='img.pdf', model_name='Augmented Model'): noise_results = run_increasing_noise(nnet, Xset, Tset, var_range, num_steps, trials_per_step) try: natural_per = ml.percent_correct(nnet.use(Xset)[0], Tset) except: natural_per = ml.percent_correct(ml.batched_use(nnet, Xset, 100), Tset) plot_increasing_noise(natural_per, (model_name, noise_results), var_range, num_steps, name)
def change_in_pixels_plot(nnet, Xset, Tset, end_pixel_val=10, trials_per_pixel=5, name='img.pdf'): perturbs = ['stuck', 'dead', 'hot'] f = FloatProgress(min=0, max=(end_pixel_val * trials_per_pixel * len(perturbs))) display(f) plt.figure(figsize=(6, 4)) for i, perturb in enumerate(perturbs): change = [] for pixels in range(end_pixel_val): accuracy = [] for trial in range(trials_per_pixel): Xcopy = change_pixel(Xset, pixels_to_change=pixels+1, pertrub=perturb) try: percent = ml.percent_correct(nnet.use(Xcopy)[0], Tset) except: percent = ml.percent_correct(ml.batched_use(nnet, Xcopy), Tset) accuracy.append(percent) f.value += 1 change.append(accuracy) change = np.array(change) x = np.arange(1, change.shape[0] + 1) y = np.mean(change, axis=1) yerr = np.std(change, axis=1) plt.errorbar(x, y, yerr=yerr, marker='.', lw=1, capsize=5, capthick=1.5, markeredgecolor='k', label=f'{perturb}', color=COLORS[i]) try: natural_per = ml.percent_correct(nnet.use(Xset)[0], Tset) except: natural_per = ml.percent_correct(ml.batched_use(nnet, Xset), Tset) plt.hlines(natural_per, 1, change.shape[0], label=f'natural', linestyle='dashed', alpha=0.3) plt.xticks(np.arange(1, end_pixel_val + 1)) plt.xlabel('Number of Pixels Changed') plt.ylabel('Accuracy ( \% )') plt.legend(loc='best', fontsize='medium') plt.grid(True); plt.tight_layout(); plt.savefig(name, bbox_inches='tight') plt.show();
def test_increasing_blur(nnet, Xset, Tset, var_range=(0.2, 0.7), num_steps=5, trials_per_step=5, name='img.pdf'): change = [] f = FloatProgress(min=0, max=(num_steps * trials_per_step)) display(f) for var_step in np.linspace(var_range[0], var_range[1], num_steps): accuracy = [] for trial in range(trials_per_step): Xcopy = add_image_blur(Xset, var_step) try: percent = ml.percent_correct(nnet.use(Xcopy)[0], Tset) except: percent = ml.percent_correct(ml.batched_use(nnet, Xcopy), Tset) accuracy.append(percent) f.value += 1 change.append(accuracy) change = np.array(change) x = np.linspace(var_range[0], var_range[1], num_steps) y = np.mean(change, axis=1) yerr = np.std(change, axis=1) plt.figure(figsize=(6, 4)) plt.errorbar(x, y, yerr=yerr, marker='.', lw=1, capsize=5, capthick=1.5, markeredgecolor='k', color=COLORS[0]) try: natural_per = ml.percent_correct(nnet.use(Xset)[0], Tset) except: natural_per = ml.percent_correct(ml.batched_use(nnet, Xset), Tset) plt.hlines(natural_per, var_range[0], var_range[1], label=f'natural', linestyle='dashed', alpha=0.3) plt.xticks(np.linspace(var_range[0], var_range[1], num_steps)) plt.xlabel('Standard deviation for Gaussian kernel') plt.ylabel('Accuracy ( \% )') plt.legend(loc='best', fontsize='medium') plt.grid(True); plt.tight_layout(); plt.savefig(name, bbox_inches='tight') plt.show();
def classified_diff(nnet, Xset, Xcopy, Tset): try: Xset_classes, _ = nnet.use(Xset) Xcopy_classes, _ = nnet.use(Xcopy) except: Xset_classes = ml.batched_use(nnet, Xset) Xcopy_classes = ml.batched_use(nnet, Xcopy) diff_index = [ i for i in range(len(Xset_classes)) if Xset_classes[i] == Tset[i] and Xset_classes[i] != Xcopy_classes[i] ] return diff_index, 100 - ml.percent_correct(Xset_classes, Xcopy_classes)
def generate_increasing_noise_plot(): full_start = time.time() print('Loading data', flush=True) # Xtrain, Ttrain = dm.load_cifar_10('../notebooks/cifar-10-batches-py/data_batch_*') # Xtest, Ttest = dm.load_cifar_10('../notebooks/cifar-10-batches-py/test_batch') Xtrain, Ttrain, Xtest, Ttest, _, _ = dm.load_mnist('../notebooks/mnist.pkl.gz') lessnoise_Xtrain = dm.apply_manipulations(Xtrain, per_func=lambda x: per.add_image_noise(x, variance=0.025)) # 0.025 lessnoise_Xtest = dm.apply_manipulations(Xtest, per_func=lambda x: per.add_image_noise(x, variance=0.025)) # 0.025 noise_Xtrain = dm.apply_manipulations(Xtrain, per_func=lambda x: per.add_image_noise(x, variance=0.05)) # 0.05 noise_Xtest = dm.apply_manipulations(Xtest, per_func=lambda x: per.add_image_noise(x, variance=0.05)) # 0.05 morenoise_Xtrain = dm.apply_manipulations(Xtrain, per_func=lambda x: per.add_image_noise(x, variance=0.1)) # 0.1 morenoise_Xtest = dm.apply_manipulations(Xtest, per_func=lambda x: per.add_image_noise(x, variance=0.1)) # 0.1 print('Done loading data', flush=True) # model = '../notebooks/pretrained_cifar_clean.pkl' model = '../notebooks/pretrained_mnist_clean.pkl' with open(model, 'rb') as f: nnet = torch.load(f) nnet.cuda() print('Testing loaded network', flush=True) clean_pct = ml.percent_correct(Ttest, ml.batched_use(nnet, Xtest, 1000)) print('Done testing loaded network', flush=True) print('Training transfer learning iterations in loop', flush=True) var_range = (0.001, 0.1) res_list = [('Clean', per.run_increasing_noise(nnet, Xtest, Ttest, var_range, trials_per_step=25))] for ds, name in zip([lessnoise_Xtrain, noise_Xtrain, morenoise_Xtrain], ['{:.3f}'.format(0.025), '{:.3f}'.format(0.05), '{:.3f}'.format(0.1)]): new_model = nnet # nnet.transfer_learn_setup([256, 512], freeze=True) # nnet.train(ds, Ttrain, n_epochs=10, batch_size=200, optim='Adam', learning_rate=0.0005, verbose=True) new_model.transfer_learn_setup([256], freeze=True) new_model.train(ds, Ttrain, n_epochs=20, batch_size=200, optim='Adam', learning_rate=0.0005, verbose=True) res_list.append((name, per.run_increasing_noise(new_model, Xtest, Ttest, var_range, trials_per_step=25))) print(res_list[-1]) print('Generating plot', flush=True) per.plot_increasing_noise(clean_pct, res_list, var_range, 5, 'delme.pdf') full_end = time.time() print('Start time: {}'.format(time.ctime(full_start)), flush=True) print('End time: {}'.format(time.ctime(full_end)), flush=True) print('Total duration: {} seconds'.format(full_end - full_start), flush=True)
def main(): full_start = time.time() res_file = './training-out.csv' fieldnames = [ 'epochs', 'batch_size', 'learning_rate', 'conv_layers', 'conv_kernel_stride', 'max_pool_kernel_stride', 'fc_layers', 'training_time', 'final_error', 'train_pct', 'test_pct' ] prep_results_file(res_file, fieldnames) # Can change which data is loaded here if you want to work with a clean dataset print('Loading data', flush=True) # Xtrain, Ttrain = dm.load_cifar_10('../notebooks/new-cifar/1var-noise-train') # Xtest, Ttest = dm.load_cifar_10('../notebooks/new-cifar/1var-noise-test') Xtrain, Ttrain = dm.load_cifar_10( '../notebooks/cifar-10-batches-py/data_batch_*') Xtest, Ttest = dm.load_cifar_10( '../notebooks/cifar-10-batches-py/test_batch') print('Done loading data', flush=True) l_epochs = [10, 20] l_batch_size = [125] l_rho = [0.0005, 0.001] l_conn_layers = [[], [256]] l_conv_layers = [[64, 64, 128, 128, 256, 256, 512, 512], [128, 128, 128, 128, 256, 256, 512, 512]] l_conv_kernels = { '8': [[(5, 1, 2), (5, 1, 2), (3, 1, 1), (3, 1, 1), (3, 1, 1), (3, 1, 1), (3, 1, 1), (3, 1, 1)], [(3, 1, 1), (3, 1, 1), (3, 1, 1), (3, 1, 1), (3, 1, 1), (3, 1, 1), (3, 1, 1), (3, 1, 1)]], '6': [[(5, 1, 2), (5, 1, 2), (3, 1, 1), (3, 1, 1), (3, 1, 1), (3, 1, 1)]], '3': [[(6, 2), (3, 2), (2, 1)]], '2': [[(4, 2), (2, 2)]], '1': [[(4, 2)]] } l_pool_kernels = { '8': [[(), (2, 2), (), (2, 2), (), (2, 2), (), (2, 2)]], '6': [[(), (2, 2), (), (2, 2), (), (2, 2)]], '3': [[(2, 2), (2, 1), ()]], '2': [[(2, 1), (2, 1)]], '1': [[(2, 1)]] } n_trials = 0 for v in l_conv_layers: n_trials += (len(l_conv_kernels[str(len(v))]) * len(l_pool_kernels[str(len(v))])) n_trials *= (len(l_epochs) * len(l_batch_size) * len(l_rho) * len(l_conn_layers)) trial = 1 for epochs, batch_size, rho, conv, conn in itertools.product( l_epochs, l_batch_size, l_rho, l_conv_layers, l_conn_layers): for conv_kernels, pool_kernels in itertools.product( l_conv_kernels[str(len(conv))], l_pool_kernels[str(len(conv))]): print( '\n###### Trying network structure {} out of {} ######'.format( trial, n_trials), flush=True) results = { 'epochs': epochs, 'batch_size': batch_size, 'learning_rate': rho, 'conv_layers': conv, 'conv_kernel_stride': conv_kernels, 'max_pool_kernel_stride': pool_kernels, 'fc_layers': conn } nnet = nnc.NeuralNetwork_Convolutional( n_channels_in_image=Xtrain.shape[1], image_size=Xtrain.shape[2], n_units_in_conv_layers=results['conv_layers'], kernels_size_and_stride=results['conv_kernel_stride'], max_pooling_kernels_and_stride=results[ 'max_pool_kernel_stride'], n_units_in_fc_hidden_layers=results['fc_layers'], classes=np.unique(Ttrain), use_gpu=True, random_seed=12) nnet.train(Xtrain, Ttrain, n_epochs=results['epochs'], batch_size=results['batch_size'], optim='Adam', learning_rate=results['learning_rate'], verbose=True) train_percent = ml.percent_correct(ml.batched_use(nnet, Xtrain), Ttrain) train_percent = ml.percent_correct(ml.batched_use(nnet, Xtest), Ttest) results['training_time'] = nnet.training_time results['final_error'] = nnet.error_trace[-1].item() results['train_pct'] = train_percent results['test_pct'] = test_percent save_results(res_file, results, fieldnames) trial += 1 full_end = time.time() print('Start time: {}'.format(time.ctime(full_start)), flush=True) print('End time: {}'.format(time.ctime(full_end)), flush=True) print('Total duration: {} seconds'.format(full_end - full_start), flush=True)
def augmented_training(Xtrain, Ttrain, Xtest, Ttest, type='pixel', model='MNIST', technique="constant"): if type == 'pixel': perturbs = ['stuck', 'dead', 'hot'] training_vals = np.arange(1, 11) test_types = perturbs xlabel = 'Num Training Pixel(s)' test_pixel_change = 2 elif type == 'noise': perturbs = ['noise'] training_vals = np.linspace(0.001, 0.05, 5) test_types = [0.02550, 0.03775, 0.05000] xlabel = 'Training Variance of Noise' else: perturbs = ['blur'] training_vals = np.linspace(0.04, 1.00, 5) test_types = [0.50, 0.750, 1.00] xlabel = 'Standard deviation for Gaussian kernel' trials = 25 f = FloatProgress(min=0, max=(len(perturbs) * len(training_vals) * (trials * len(test_types)))) display(f) for i, perturb in enumerate(perturbs): natural_acc = [] augmented_acc = np.zeros((len(test_types), 2, len(training_vals))) """ test_perturb_1 p_1 p_2 p_3 p_4 mean [[0., 0., 0., 0.], std [0., 0., 0., 0.]], test_perturn_2 ... """ for p, val in enumerate(training_vals): if type == 'pixel': Mtrain = per.change_pixel(Xtrain, pixels_to_change=val + 1, pertrub=perturb) elif type == 'noise': Mtrain = per.add_image_noise(Xtrain, val) else: Mtrain = per.add_image_blur(Xtrain, val) if model == 'MNIST': if (technique == 'incremental'): nnet = per.train_incremental_mnist(Xtrain, Ttrain, Mtrain) elif (technique == 'transfer'): nnet = nnc.NeuralNetwork_Convolutional.load_network( '../notebooks/pretrained_mnist_clean.pkl') nnet.transfer_learn_setup([256], freeze=False) nnet.train(Mtrain, Ttrain, n_epochs=20, batch_size=200, optim='Adam', learning_rate=0.0005, verbose=True) else: nnet = per.train_mnist(Mtrain, Ttrain) else: if (technique == 'incremental'): nnet = per.train_incremental_cifar(Xtrain, Ttrain, Mtrain) elif (technique == 'transfer'): nnet = nnc.NeuralNetwork_Convolutional.load_network( '../notebooks/pretrained_cifar_clean.pkl') nnet.transfer_learn_setup([256, 512], freeze=False) nnet.train(Mtrain, Ttrain, n_epochs=5, batch_size=200, optim='Adam', learning_rate=0.0005, verbose=True) else: nnet = per.train_cifar(Mtrain, Ttrain) print('Finished training a model...', flush=True) natural_acc.append( ml.percent_correct(ml.batched_use(nnet, Xtest), Ttest)) for t, test_perturb in enumerate(test_types): tmp = [] for trial in range(trials): if type == 'pixel': M = per.change_pixel( Xtest, pixels_to_change=test_pixel_change, pertrub=test_perturb) elif type == 'noise': M = per.add_image_noise(Xtest, test_perturb) else: M = per.add_image_blur(Xtest, test_perturb) tmp.append( ml.percent_correct(ml.batched_use(nnet, M), Ttest)) f.value += 1 augmented_acc[t, 0, p] = np.mean(tmp) augmented_acc[t, 1, p] = np.std(tmp) print('finished testing: ', perturb, flush=True) plt.figure(figsize=(6, 4)) for t, test_perturb in enumerate(test_types): if type == 'pixel': label = test_perturb elif type == 'noise': label = f'{test_perturb:.5f}' else: label = f'{test_perturb:.3f}' plt.errorbar(training_vals, augmented_acc[t, 0], yerr=augmented_acc[t, 1], marker='.', lw=1, capsize=5, capthick=1.5, label=label, markeredgecolor='k', color=COLORS[t]) plt.plot(training_vals, natural_acc, marker='.', lw=1, label=f'natural', markeredgecolor='k', color=COLORS[3]) plt.xticks(training_vals) plt.xlabel(xlabel) plt.ylabel('Accuracy ( \% )') plt.legend(loc='best', fontsize='medium') plt.grid(True) plt.tight_layout() plt.savefig('../notebooks/media/' + technique + '/' + model.lower() + '-' + type + '-' + perturb + '.pdf', bbox_inches='tight')