def test_add_objects(self): network = Network(dt=1.0, learning=False) inpt = Input(100) network.add_layer(inpt, name='X') lif = LIFNodes(50) network.add_layer(lif, name='Y') assert inpt == network.layers['X'] assert lif == network.layers['Y'] conn = Connection(inpt, lif) network.add_connection(conn, source='X', target='Y') assert conn == network.connections[('X', 'Y')] monitor = Monitor(lif, state_vars=['s', 'v']) network.add_monitor(monitor, 'Y') assert monitor == network.monitors['Y'] network.save('net.pt') _network = load_network('net.pt', learning=True) assert _network.learning assert 'X' in _network.layers assert 'Y' in _network.layers assert ('X', 'Y') in _network.connections assert 'Y' in _network.monitors del _network os.remove('net.pt')
def test_empty(self): for dt in [0.1, 1.0, 5.0]: network = Network(dt=dt) assert network.dt == dt network.run(inpts={}, time=1000) network.save('net.p') _network = load_network('net.p') assert _network.dt == dt os.remove('net.p')
def test_empty(self): for dt in [0.1, 1.0, 5.0]: network = Network(dt=dt) assert network.dt == dt network.run(inpts={}, time=1000) network.save('net.pt') _network = load_network('net.pt') assert _network.dt == dt assert _network.learning del _network _network = load_network('net.pt', learning=True) assert _network.dt == dt assert _network.learning del _network _network = load_network('net.pt', learning=False) assert _network.dt == dt assert not _network.learning del _network os.remove('net.pt')
def main(seed=0, n_neurons=100, n_train=60000, n_test=10000, inhib=250, time=50, lr=1e-2, lr_decay=0.99, dt=1, theta_plus=0.05, theta_decay=1e-7, progress_interval=10, update_interval=250, train=True, plot=False, gpu=False): assert n_train % update_interval == 0 and n_test % update_interval == 0, \ 'No. examples must be divisible by update_interval' params = [ seed, n_neurons, n_train, inhib, time, lr, lr_decay, theta_plus, theta_decay, progress_interval, update_interval ] test_params = [ seed, n_neurons, n_train, n_test, inhib, time, lr, lr_decay, theta_plus, theta_decay, progress_interval, update_interval ] model_name = '_'.join([str(x) for x in params]) np.random.seed(seed) if gpu: torch.set_default_tensor_type('torch.cuda.FloatTensor') torch.cuda.manual_seed_all(seed) else: torch.manual_seed(seed) if train: n_examples = n_train else: n_examples = n_test n_sqrt = int(np.ceil(np.sqrt(n_neurons))) n_classes = 10 # Build network. if train: network = Network(dt=dt) input_layer = Input(n=784, traces=True, trace_tc=5e-2) network.add_layer(input_layer, name='X') output_layer = DiehlAndCookNodes(n=n_neurons, traces=True, rest=0, reset=0, thresh=1, refrac=0, decay=1e-2, trace_tc=5e-2, theta_plus=theta_plus, theta_decay=theta_decay) network.add_layer(output_layer, name='Y') w = 0.3 * torch.rand(784, n_neurons) input_connection = Connection(source=network.layers['X'], target=network.layers['Y'], w=w, update_rule=PostPre, nu=[0, lr], wmin=0, wmax=1, norm=78.4) network.add_connection(input_connection, source='X', target='Y') w = -inhib * (torch.ones(n_neurons, n_neurons) - torch.diag(torch.ones(n_neurons))) recurrent_connection = Connection(source=network.layers['Y'], target=network.layers['Y'], w=w, wmin=-inhib, wmax=0) network.add_connection(recurrent_connection, source='Y', target='Y') else: path = os.path.join('..', '..', 'params', data, model) network = load_network(os.path.join(path, model_name + '.pt')) network.connections['X', 'Y'].update_rule = NoOp( connection=network.connections['X', 'Y'], nu=network.connections['X', 'Y'].nu) network.layers['Y'].theta_decay = 0 network.layers['Y'].theta_plus = 0 # Load Fashion-MNIST data. dataset = FashionMNIST(path=os.path.join('..', '..', 'data', 'FashionMNIST'), download=True) if train: images, labels = dataset.get_train() else: images, labels = dataset.get_test() images = images.view(-1, 784) images = images / 255 # if train: # for i in range(n_neurons): # network.connections['X', 'Y'].w[:, i] = images[i] + images[i].mean() * torch.randn(784) # Record spikes during the simulation. spike_record = torch.zeros(update_interval, time, n_neurons) # Neuron assignments and spike proportions. if train: assignments = -torch.ones_like(torch.Tensor(n_neurons)) proportions = torch.zeros_like(torch.Tensor(n_neurons, n_classes)) rates = torch.zeros_like(torch.Tensor(n_neurons, n_classes)) ngram_scores = {} else: path = os.path.join('..', '..', 'params', data, model) path = os.path.join(path, '_'.join(['auxiliary', model_name]) + '.pt') assignments, proportions, rates, ngram_scores = torch.load( open(path, 'rb')) # Sequence of accuracy estimates. curves = {'all': [], 'proportion': [], 'ngram': []} if train: best_accuracy = 0 spikes = {} for layer in set(network.layers): spikes[layer] = Monitor(network.layers[layer], state_vars=['s'], time=time) network.add_monitor(spikes[layer], name='%s_spikes' % layer) # Train the network. if train: print('\nBegin training.\n') else: print('\nBegin test.\n') inpt_axes = None inpt_ims = None spike_ims = None spike_axes = None weights_im = None assigns_im = None perf_ax = None start = t() for i in range(n_examples): if i % progress_interval == 0 and train: network.connections['X', 'Y'].update_rule.nu[1] *= lr_decay if i % progress_interval == 0: print(f'Progress: {i} / {n_examples} ({t() - start:.4f} seconds)') start = t() if i % update_interval == 0 and i > 0: if i % len(labels) == 0: current_labels = labels[-update_interval:] else: current_labels = labels[i % len(images) - update_interval:i % len(images)] # Update and print accuracy evaluations. curves, predictions = update_curves(curves, current_labels, n_classes, spike_record=spike_record, assignments=assignments, proportions=proportions, ngram_scores=ngram_scores, n=2) print_results(curves) if train: if any([x[-1] > best_accuracy for x in curves.values()]): print( 'New best accuracy! Saving network parameters to disk.' ) # Save network to disk. path = os.path.join('..', '..', 'params', data, model) if not os.path.isdir(path): os.makedirs(path) network.save(os.path.join(path, model_name + '.pt')) path = os.path.join( path, '_'.join(['auxiliary', model_name]) + '.pt') torch.save((assignments, proportions, rates, ngram_scores), open(path, 'wb')) best_accuracy = max([x[-1] for x in curves.values()]) # Assign labels to excitatory layer neurons. assignments, proportions, rates = assign_labels( spike_record, current_labels, n_classes, rates) # Compute ngram scores. ngram_scores = update_ngram_scores(spike_record, current_labels, n_classes, 2, ngram_scores) print() # Get next input sample. image = images[i % n_examples] sample = rank_order(datum=image, time=time, dt=dt) inpts = {'X': sample} # Run the network on the input. network.run(inpts=inpts, time=time) retries = 0 while spikes['Y'].get('s').sum() < 5 and retries < 3: retries += 1 image *= 2 sample = rank_order(datum=image, time=time, dt=dt) inpts = {'X': sample} network.run(inpts=inpts, time=time) # Add to spikes recording. spike_record[i % update_interval] = spikes['Y'].get('s').t() # Optionally plot various simulation information. if plot: _input = images[i % n_examples].view(28, 28) reconstruction = inpts['X'].view(time, 784).sum(0).view(28, 28) _spikes = {layer: spikes[layer].get('s') for layer in spikes} input_exc_weights = network.connections['X', 'Y'].w square_weights = get_square_weights( input_exc_weights.view(784, n_neurons), n_sqrt, 28) square_assignments = get_square_assignments(assignments, n_sqrt) # inpt_axes, inpt_ims = plot_input(_input, reconstruction, label=labels[i], axes=inpt_axes, ims=inpt_ims) spike_ims, spike_axes = plot_spikes(_spikes, ims=spike_ims, axes=spike_axes) weights_im = plot_weights(square_weights, im=weights_im, wmax=0.25) # assigns_im = plot_assignments(square_assignments, im=assigns_im) # perf_ax = plot_performance(curves, ax=perf_ax) plt.pause(1e-8) network.reset_() # Reset state variables. print(f'Progress: {n_examples} / {n_examples} ({t() - start:.4f} seconds)') i += 1 if i % len(labels) == 0: current_labels = labels[-update_interval:] else: current_labels = labels[i % len(images) - update_interval:i % len(images)] # Update and print accuracy evaluations. curves, predictions = update_curves(curves, current_labels, n_classes, spike_record=spike_record, assignments=assignments, proportions=proportions, ngram_scores=ngram_scores, n=2) print_results(curves) if train: if any([x[-1] > best_accuracy for x in curves.values()]): print('New best accuracy! Saving network parameters to disk.') # Save network to disk. if train: path = os.path.join('..', '..', 'params', data, model) if not os.path.isdir(path): os.makedirs(path) network.save(os.path.join(path, model_name + '.pt')) path = os.path.join( path, '_'.join(['auxiliary', model_name]) + '.pt') torch.save((assignments, proportions, rates, ngram_scores), open(path, 'wb')) if train: print('\nTraining complete.\n') else: print('\nTest complete.\n') print('Average accuracies:\n') for scheme in curves.keys(): print('\t%s: %.2f' % (scheme, float(np.mean(curves[scheme])))) # Save accuracy curves to disk. path = os.path.join('..', '..', 'curves', data, model) if not os.path.isdir(path): os.makedirs(path) if train: to_write = ['train'] + params else: to_write = ['test'] + params to_write = [str(x) for x in to_write] f = '_'.join(to_write) + '.pt' torch.save((curves, update_interval, n_examples), open(os.path.join(path, f), 'wb')) # Save results to disk. path = os.path.join('..', '..', 'results', data, model) if not os.path.isdir(path): os.makedirs(path) results = [ np.mean(curves['all']), np.mean(curves['proportion']), np.mean(curves['ngram']), np.max(curves['all']), np.max(curves['proportion']), np.max(curves['ngram']) ] if train: to_write = params + results else: to_write = test_params + results to_write = [str(x) for x in to_write] if train: name = 'train.csv' else: name = 'test.csv' if not os.path.isfile(os.path.join(path, name)): with open(os.path.join(path, name), 'w') as f: if train: f.write( 'random_seed,n_neurons,n_train,inhib,time,lr,lr_decay,theta_plus,theta_decay,' 'progress_interval,update_interval,mean_all_activity,mean_proportion_weighting,' 'mean_ngram,max_all_activity,max_proportion_weighting,max_ngram\n' ) else: f.write( 'random_seed,n_neurons,n_train,n_test,inhib,time,lr,lr_decay,theta_plus,theta_decay,' 'progress_interval,update_interval,mean_all_activity,mean_proportion_weighting,' 'mean_ngram,max_all_activity,max_proportion_weighting,max_ngram\n' ) with open(os.path.join(path, name), 'a') as f: f.write(','.join(to_write) + '\n')
def main(seed=0, n_neurons=100, n_train=60000, n_test=10000, inhib=250, lr=1e-2, lr_decay=1, time=100, dt=1, theta_plus=0.05, theta_decay=1e-7, intensity=1, progress_interval=10, update_interval=100, plot=False, train=True, gpu=False, no_inhib=False, no_theta=False): assert n_train % update_interval == 0 and n_test % update_interval == 0, \ 'No. examples must be divisible by update_interval' params = [ seed, n_neurons, n_train, inhib, lr, lr_decay, time, dt, theta_plus, theta_decay, intensity, progress_interval, update_interval ] test_params = [ seed, n_neurons, n_train, n_test, inhib, lr, lr_decay, time, dt, theta_plus, theta_decay, intensity, progress_interval, update_interval ] model_name = '_'.join([str(x) for x in params]) np.random.seed(seed) if gpu: torch.set_default_tensor_type('torch.cuda.FloatTensor') torch.cuda.manual_seed_all(seed) else: torch.manual_seed(seed) n_examples = n_train if train else n_test n_sqrt = int(np.ceil(np.sqrt(n_neurons))) n_classes = 10 # Build network. if train: network = Network() input_layer = Input(n=784, traces=True, trace_tc=5e-2) network.add_layer(input_layer, name='X') output_layer = DiehlAndCookNodes(n=n_neurons, traces=True, rest=0, reset=0, thresh=5, refrac=0, decay=1e-2, trace_tc=5e-2, theta_plus=theta_plus, theta_decay=theta_decay) network.add_layer(output_layer, name='Y') w = 0.3 * torch.rand(784, n_neurons) input_connection = Connection(source=network.layers['X'], target=network.layers['Y'], w=w, update_rule=WeightDependentPostPre, nu=[0, lr], wmin=0, wmax=1, norm=78.4) network.add_connection(input_connection, source='X', target='Y') w = -inhib * (torch.ones(n_neurons, n_neurons) - torch.diag(torch.ones(n_neurons))) recurrent_connection = Connection(source=network.layers['Y'], target=network.layers['Y'], w=w, wmin=-inhib, wmax=0, update_rule=WeightDependentPostPre, nu=[0, -100 * lr], norm=inhib / 2 * n_neurons) network.add_connection(recurrent_connection, source='Y', target='Y') mask = network.connections['Y', 'Y'].w == 0 masks = {('Y', 'Y'): mask} else: network = load_network(os.path.join(params_path, model_name + '.pt')) network.connections['X', 'Y'].update_rule = NoOp( connection=network.connections['X', 'Y'], nu=network.connections['X', 'Y'].nu) network.connections['Y', 'Y'].update_rule = NoOp( connection=network.connections['X', 'Y'], nu=network.connections['X', 'Y'].nu) network.layers['Y'].theta_decay = 0 network.layers['Y'].theta_plus = 0 if no_inhib: del network.connections['Y', 'Y'] if no_theta: network.layers['Y'].theta = 0 # Load MNIST data. dataset = MNIST(path=data_path, download=True) if train: images, labels = dataset.get_train() else: images, labels = dataset.get_test() images = images.view(-1, 784) images *= intensity labels = labels.long() monitors = {} for layer in set(network.layers): if 'v' in network.layers[layer].__dict__: monitors[layer] = Monitor(network.layers[layer], state_vars=['s', 'v'], time=time) else: monitors[layer] = Monitor(network.layers[layer], state_vars=['s'], time=time) network.add_monitor(monitors[layer], name=layer) # Train the network. if train: print('\nBegin training.\n') else: print('\nBegin test.\n') inpt_axes = None inpt_ims = None spike_ims = None spike_axes = None voltage_ims = None voltage_axes = None weights_im = None weights2_im = None unclamps = {} per_class = int(n_neurons / n_classes) for label in range(n_classes): unclamp = torch.ones(n_neurons).byte() unclamp[label * per_class:(label + 1) * per_class] = 0 unclamps[label] = unclamp predictions = torch.zeros(n_examples) corrects = torch.zeros(n_examples) start = t() for i in range(n_examples): if i % progress_interval == 0: print(f'Progress: {i} / {n_examples} ({t() - start:.4f} seconds)') start = t() if i % update_interval == 0 and i > 0 and train: network.save(os.path.join(params_path, model_name + '.pt')) network.connections['X', 'Y'].update_rule.nu[1] *= lr_decay # Get next input sample. image = images[i % len(images)] label = labels[i % len(images)].item() sample = poisson(datum=image, time=time, dt=dt) inpts = {'X': sample} # Run the network on the input. if train: network.run(inpts=inpts, time=time, unclamp={'Y': unclamps[label]}, masks=masks) else: network.run(inpts=inpts, time=time) if not train: retries = 0 while monitors['Y'].get('s').sum() == 0 and retries < 3: retries += 1 image *= 1.5 sample = poisson(datum=image, time=time, dt=dt) inpts = {'X': sample} if train: network.run(inpts=inpts, time=time, unclamp={'Y': unclamps[label]}, masks=masks) else: network.run(inpts=inpts, time=time) output = monitors['Y'].get('s') summed_neurons = output.sum(dim=1).view(n_classes, per_class) summed_classes = summed_neurons.sum(dim=1) prediction = torch.argmax(summed_classes).item() correct = prediction == label predictions[i] = prediction corrects[i] = int(correct) # Optionally plot various simulation information. if plot: # _input = image.view(28, 28) # reconstruction = inpts['X'].view(time, 784).sum(0).view(28, 28) # v = {'Y': monitors['Y'].get('v')} s = {layer: monitors[layer].get('s') for layer in monitors} input_exc_weights = network.connections['X', 'Y'].w square_weights = get_square_weights( input_exc_weights.view(784, n_neurons), n_sqrt, 28) recurrent_weights = network.connections['Y', 'Y'].w # inpt_axes, inpt_ims = plot_input(_input, reconstruction, label=labels[i], axes=inpt_axes, ims=inpt_ims) # voltage_ims, voltage_axes = plot_voltages(v, ims=voltage_ims, axes=voltage_axes) spike_ims, spike_axes = plot_spikes(s, ims=spike_ims, axes=spike_axes) weights_im = plot_weights(square_weights, im=weights_im) weights2_im = plot_weights(recurrent_weights, im=weights2_im, wmin=-inhib, wmax=0) plt.pause(1e-8) network.reset_() # Reset state variables. print(f'Progress: {n_examples} / {n_examples} ({t() - start:.4f} seconds)') if train: network.save(os.path.join(params_path, model_name + '.pt')) if train: print('\nTraining complete.\n') else: print('\nTest complete.\n') accuracy = torch.mean(corrects).item() * 100 print(f'\nAccuracy: {accuracy}\n') to_write = params + [accuracy] if train else test_params + [accuracy] to_write = [str(x) for x in to_write] name = 'train.csv' if train else 'test.csv' if not os.path.isfile(os.path.join(results_path, name)): with open(os.path.join(results_path, name), 'w') as f: if train: f.write( 'random_seed,n_neurons,n_train,inhib,lr,lr_decay,time,timestep,theta_plus,' 'theta_decay,intensity,progress_interval,update_interval,accuracy\n' ) else: f.write( 'random_seed,n_neurons,n_train,n_test,inhib,lr,lr_decay,time,timestep,' 'theta_plus,theta_decay,intensity,progress_interval,update_interval,accuracy\n' ) with open(os.path.join(results_path, name), 'a') as f: f.write(','.join(to_write) + '\n') if labels.numel() > n_examples: labels = labels[:n_examples] else: while labels.numel() < n_examples: if 2 * labels.numel() > n_examples: labels = torch.cat( [labels, labels[:n_examples - labels.numel()]]) else: labels = torch.cat([labels, labels]) # Compute confusion matrices and save them to disk. confusion = confusion_matrix(labels, predictions) to_write = ['train'] + params if train else ['test'] + test_params f = '_'.join([str(x) for x in to_write]) + '.pt' torch.save(confusion, os.path.join(confusion_path, f))
def main(seed=0, n_examples=100, gpu=False, plot=False): np.random.seed(seed) if gpu: torch.set_default_tensor_type('torch.cuda.FloatTensor') torch.cuda.manual_seed_all(seed) else: torch.manual_seed(seed) model_name = '0_12_4_150_4_0.01_0.99_60000_250.0_250_1.0_0.05_1e-07_0.5_0.2_10_250' network = load_network(os.path.join(params_path, f'{model_name}.pt')) for l in network.layers: network.layers[l].dt = network.dt for c in network.connections: network.connections[c].dt = network.dt network.layers['Y'].one_spike = True network.layers['Y'].lbound = None kernel_size = 12 side_length = 20 n_filters = 150 time = 250 intensity = 0.5 crop = 4 conv_size = network.connections['X', 'Y'].conv_size locations = network.connections['X', 'Y'].locations conv_prod = int(np.prod(conv_size)) n_neurons = n_filters * conv_prod n_classes = 10 # Voltage recording for excitatory and inhibitory layers. voltage_monitor = Monitor(network.layers['Y'], ['v'], time=time) network.add_monitor(voltage_monitor, name='output_voltage') # Load MNIST data. dataset = MNIST(path=data_path, download=True) images, labels = dataset.get_test() images *= intensity images = images[:, crop:-crop, crop:-crop] # Neuron assignments and spike proportions. path = os.path.join(params_path, '_'.join(['auxiliary', model_name]) + '.pt') assignments, proportions, rates, ngram_scores = torch.load(open( path, 'rb')) spikes = {} for layer in set(network.layers): spikes[layer] = Monitor(network.layers[layer], state_vars=['s'], time=time) network.add_monitor(spikes[layer], name=f'{layer}_spikes') # Train the network. print('\nBegin black box adversarial attack.\n') spike_ims = None spike_axes = None weights_im = None inpt_ims = None inpt_axes = None max_iters = 25 delta = 0.1 epsilon = 0.1 for i in range(n_examples): # Get next input sample. original = images[i % len(images)].contiguous().view(-1) label = labels[i % len(images)] # Check if the image is correctly classified. sample = poisson(datum=original, time=time) inpts = {'X': sample} # Run the network on the input. network.run(inpts=inpts, time=time) # Check for incorrect classification. s = spikes['Y'].get('s').view(1, n_neurons, time) prediction = ngram(spikes=s, ngram_scores=ngram_scores, n_labels=10, n=2).item() if prediction != label: continue # Create adversarial example. adversarial = False while not adversarial: adv_example = 255 * torch.rand(original.size()) sample = poisson(datum=adv_example, time=time) inpts = {'X': sample} # Run the network on the input. network.run(inpts=inpts, time=time) # Check for incorrect classification. s = spikes['Y'].get('s').view(1, n_neurons, time) prediction = ngram(spikes=s, ngram_scores=ngram_scores, n_labels=n_classes, n=2).item() if prediction == label: adversarial = True j = 0 current = original.clone() while j < max_iters: # Orthogonal perturbation. # perturb = orthogonal_perturbation(delta=delta, image=adv_example, target=original) # temp = adv_example + perturb # # Forward perturbation. # temp = temp.clone() + forward_perturbation(epsilon * get_diff(temp, original), temp, adv_example) # print(temp) perturbation = torch.randn(original.size()) unnormed_source_direction = original - perturbation source_norm = torch.norm(unnormed_source_direction) source_direction = unnormed_source_direction / source_norm dot = torch.dot(perturbation, source_direction) perturbation -= dot * source_direction perturbation *= epsilon * source_norm / torch.norm(perturbation) D = 1 / np.sqrt(epsilon**2 + 1) direction = perturbation - unnormed_source_direction spherical_candidate = current + D * direction spherical_candidate = torch.clamp(spherical_candidate, 0, 255) new_source_direction = original - spherical_candidate new_source_direction_norm = torch.norm(new_source_direction) # length if spherical_candidate would be exactly on the sphere length = delta * source_norm # length including correction for deviation from sphere deviation = new_source_direction_norm - source_norm length += deviation # make sure the step size is positive length = max(0, length) # normalize the length length = length / new_source_direction_norm candidate = spherical_candidate + length * new_source_direction candidate = torch.clamp(candidate, 0, 255) sample = poisson(datum=candidate, time=time) inpts = {'X': sample} # Run the network on the input. network.run(inpts=inpts, time=time) # Check for incorrect classification. s = spikes['Y'].get('s').view(1, n_neurons, time) prediction = ngram(spikes=s, ngram_scores=ngram_scores, n_labels=10, n=2).item() # Optionally plot various simulation information. if plot: _input = original.view(side_length, side_length) reconstruction = candidate.view(side_length, side_length) _spikes = { 'X': spikes['X'].get('s').view(side_length**2, time), 'Y': spikes['Y'].get('s').view(n_neurons, time) } w = network.connections['X', 'Y'].w spike_ims, spike_axes = plot_spikes(spikes=_spikes, ims=spike_ims, axes=spike_axes) weights_im = plot_locally_connected_weights(w, n_filters, kernel_size, conv_size, locations, side_length, im=weights_im) inpt_axes, inpt_ims = plot_input(_input, reconstruction, label=labels[i], ims=inpt_ims, axes=inpt_axes) plt.pause(1e-8) if prediction == label: print('Attack failed.') else: print('Attack succeeded.') adv_example = candidate j += 1 network.reset_() # Reset state variables. print('\nAdversarial attack complete.\n')
def main(seed=0, n_neurons=100, n_train=60000, n_test=10000, inhib=100, lr=0.01, lr_decay=1, time=350, dt=1, theta_plus=0.05, theta_decay=1e-7, progress_interval=10, update_interval=250, plot=False, train=True, gpu=False): assert n_train % update_interval == 0 and n_test % update_interval == 0, \ 'No. examples must be divisible by update_interval' params = [ seed, n_neurons, n_train, inhib, lr_decay, time, dt, theta_plus, theta_decay, progress_interval, update_interval ] model_name = '_'.join([str(x) for x in params]) np.random.seed(seed) if gpu: torch.set_default_tensor_type('torch.cuda.FloatTensor') torch.cuda.manual_seed_all(seed) else: torch.manual_seed(seed) n_examples = n_train if train else n_test n_classes = 10 # Build network. if train: network = Network(dt=dt) input_layer = RealInput(n=784, traces=True, trace_tc=5e-2) network.add_layer(input_layer, name='X') output_layer = DiehlAndCookNodes(n=n_classes, rest=0, reset=1, thresh=1, decay=1e-2, theta_plus=theta_plus, theta_decay=theta_decay, traces=True, trace_tc=5e-2) network.add_layer(output_layer, name='Y') w = torch.rand(784, n_classes) input_connection = Connection(source=input_layer, target=output_layer, w=w, update_rule=MSTDPET, nu=lr, wmin=0, wmax=1, norm=78.4, tc_e_trace=0.1) network.add_connection(input_connection, source='X', target='Y') else: network = load_network(os.path.join(params_path, model_name + '.pt')) network.connections['X', 'Y'].update_rule = NoOp( connection=network.connections['X', 'Y'], nu=network.connections['X', 'Y'].nu) network.layers['Y'].theta_decay = 0 network.layers['Y'].theta_plus = 0 # Load MNIST data. environment = MNISTEnvironment(dataset=MNIST(path=data_path, download=True), train=train, time=time) # Create pipeline. pipeline = Pipeline(network=network, environment=environment, encoding=repeat, action_function=select_spiked, output='Y', reward_delay=None) spikes = {} for layer in set(network.layers): spikes[layer] = Monitor(network.layers[layer], state_vars=('s', ), time=time) network.add_monitor(spikes[layer], name='%s_spikes' % layer) network.add_monitor( Monitor(network.connections['X', 'Y'].update_rule, state_vars=('e_trace', ), time=time), 'X_Y_e_trace') # Train the network. if train: print('\nBegin training.\n') else: print('\nBegin test.\n') spike_ims = None spike_axes = None weights_im = None elig_axes = None elig_ims = None start = t() for i in range(n_examples): if i % progress_interval == 0: print(f'Progress: {i} / {n_examples} ({t() - start:.4f} seconds)') start = t() if i > 0 and train: network.connections['X', 'Y'].update_rule.nu[1] *= lr_decay # Run the network on the input. for j in range(time): pipeline.step(a_plus=1, a_minus=0) if plot: _spikes = {layer: spikes[layer].get('s') for layer in spikes} w = network.connections['X', 'Y'].w square_weights = get_square_weights(w.view(784, n_classes), 4, 28) spike_ims, spike_axes = plot_spikes(_spikes, ims=spike_ims, axes=spike_axes) weights_im = plot_weights(square_weights, im=weights_im) elig_ims, elig_axes = plot_voltages( { 'Y': network.monitors['X_Y_e_trace'].get('e_trace').view( -1, time)[1500:2000] }, plot_type='line', ims=elig_ims, axes=elig_axes) plt.pause(1e-8) pipeline.reset_() # Reset state variables. network.connections['X', 'Y'].update_rule.e_trace = torch.zeros( 784, n_classes) print(f'Progress: {n_examples} / {n_examples} ({t() - start:.4f} seconds)') if train: print('\nTraining complete.\n') else: print('\nTest complete.\n')
def main(seed=0, n_train=60000, n_test=10000, inhib=250, kernel_size=(16, ), stride=(2, ), time=50, n_filters=25, crop=0, lr=1e-2, lr_decay=0.99, dt=1, theta_plus=0.05, theta_decay=1e-7, norm=0.2, progress_interval=10, update_interval=250, train=True, relabel=False, plot=False, gpu=False): assert n_train % update_interval == 0 and n_test % update_interval == 0 or relabel, \ 'No. examples must be divisible by update_interval' params = [ seed, kernel_size, stride, n_filters, crop, lr, lr_decay, n_train, inhib, time, dt, theta_plus, theta_decay, norm, progress_interval, update_interval ] model_name = '_'.join([str(x) for x in params]) if not train: test_params = [ seed, kernel_size, stride, n_filters, crop, lr, lr_decay, n_train, n_test, inhib, time, dt, theta_plus, theta_decay, norm, progress_interval, update_interval ] np.random.seed(seed) if gpu: torch.set_default_tensor_type('torch.cuda.FloatTensor') torch.cuda.manual_seed_all(seed) else: torch.manual_seed(seed) side_length = 28 - crop * 2 n_inpt = side_length**2 n_examples = n_train if train else n_test n_classes = 10 # Build network. if train: network = LocallyConnectedNetwork( n_inpt=n_inpt, input_shape=[side_length, side_length], kernel_size=kernel_size, stride=stride, n_filters=n_filters, inh=inhib, dt=dt, nu=[.1 * lr, lr], theta_plus=theta_plus, theta_decay=theta_decay, wmin=0, wmax=1.0, norm=norm) network.layers['Y'].thresh = 1 network.layers['Y'].reset = 0 network.layers['Y'].rest = 0 else: network = load_network(os.path.join(params_path, model_name + '.pt')) network.connections['X', 'Y'].update_rule = NoOp( connection=network.connections['X', 'Y'], nu=network.connections['X', 'Y'].nu) network.layers['Y'].theta_decay = 0 network.layers['Y'].theta_plus = 0 conv_size = network.connections['X', 'Y'].conv_size locations = network.connections['X', 'Y'].locations conv_prod = int(np.prod(conv_size)) n_neurons = n_filters * conv_prod # Voltage recording for excitatory and inhibitory layers. voltage_monitor = Monitor(network.layers['Y'], ['v'], time=time) network.add_monitor(voltage_monitor, name='output_voltage') # Load Fashion-MNIST data. dataset = FashionMNIST(path=data_path, download=True) if train: images, labels = dataset.get_train() else: images, labels = dataset.get_test() if crop != 0: images = images[:, crop:-crop, crop:-crop] # Record spikes during the simulation. if not train: update_interval = n_examples spike_record = torch.zeros(update_interval, time, n_neurons) # Neuron assignments and spike proportions. if train: assignments = -torch.ones_like(torch.Tensor(n_neurons)) proportions = torch.zeros_like(torch.Tensor(n_neurons, 10)) rates = torch.zeros_like(torch.Tensor(n_neurons, 10)) ngram_scores = {} else: path = os.path.join(params_path, '_'.join(['auxiliary', model_name]) + '.pt') assignments, proportions, rates, ngram_scores = torch.load( open(path, 'rb')) if train: best_accuracy = 0 # Sequence of accuracy estimates. curves = {'all': [], 'proportion': [], 'ngram': []} predictions = {scheme: torch.Tensor().long() for scheme in curves.keys()} spikes = {} for layer in set(network.layers): spikes[layer] = Monitor(network.layers[layer], state_vars=['s'], time=time) network.add_monitor(spikes[layer], name=f'{layer}_spikes') # Train the network. if train: print('\nBegin training.\n') else: print('\nBegin test.\n') spike_ims = None spike_axes = None weights_im = None start = t() for i in range(n_examples): if i % progress_interval == 0 and train: network.connections['X', 'Y'].update_rule.nu[1] *= lr_decay if i % progress_interval == 0: print(f'Progress: {i} / {n_examples} ({t() - start:.4f} seconds)') start = t() if i % update_interval == 0 and i > 0: if i % len(labels) == 0: current_labels = labels[-update_interval:] else: current_labels = labels[i % len(images) - update_interval:i % len(images)] # Update and print accuracy evaluations. curves, preds = update_curves(curves, current_labels, n_classes, spike_record=spike_record, assignments=assignments, proportions=proportions, ngram_scores=ngram_scores, n=2) print_results(curves) for scheme in preds: predictions[scheme] = torch.cat( [predictions[scheme], preds[scheme]], -1) # Save accuracy curves to disk. to_write = ['train'] + params if train else ['test'] + params f = '_'.join([str(x) for x in to_write]) + '.pt' torch.save((curves, update_interval, n_examples), open(os.path.join(curves_path, f), 'wb')) if train: if any([x[-1] > best_accuracy for x in curves.values()]): print( 'New best accuracy! Saving network parameters to disk.' ) # Save network to disk. network.save(os.path.join(params_path, model_name + '.pt')) path = os.path.join( params_path, '_'.join(['auxiliary', model_name]) + '.pt') torch.save((assignments, proportions, rates, ngram_scores), open(path, 'wb')) best_accuracy = max([x[-1] for x in curves.values()]) # Assign labels to excitatory layer neurons. assignments, proportions, rates = assign_labels( spike_record, current_labels, n_classes, rates) # Compute ngram scores. ngram_scores = update_ngram_scores(spike_record, current_labels, n_classes, 2, ngram_scores) print() # Get next input sample. image = images[i % len(images)].contiguous().view(-1) sample = poisson(datum=image, time=time, dt=dt) inpts = {'X': sample} # Run the network on the input. network.run(inpts=inpts, time=time) retries = 0 while spikes['Y'].get('s').sum() < 5 and retries < 3: retries += 1 image *= 2 sample = poisson(datum=image, time=time, dt=dt) inpts = {'X': sample} network.run(inpts=inpts, time=time) # Add to spikes recording. spike_record[i % update_interval] = spikes['Y'].get('s').t() # Optionally plot various simulation information. if plot: _spikes = { 'X': spikes['X'].get('s').view(side_length**2, time), 'Y': spikes['Y'].get('s').view(n_filters * conv_prod, time) } spike_ims, spike_axes = plot_spikes(spikes=_spikes, ims=spike_ims, axes=spike_axes) weights_im = plot_locally_connected_weights( network.connections['X', 'Y'].w, n_filters, kernel_size, conv_size, locations, side_length, im=weights_im, wmin=0, wmax=1) plt.pause(1e-8) network.reset_() # Reset state variables. print(f'Progress: {n_examples} / {n_examples} ({t() - start:.4f} seconds)') i += 1 if i % len(labels) == 0: current_labels = labels[-update_interval:] else: current_labels = labels[i % len(images) - update_interval:i % len(images)] if not train and relabel: # Assign labels to excitatory layer neurons. assignments, proportions, rates = assign_labels( spike_record, current_labels, n_classes, rates) # Compute ngram scores. ngram_scores = update_ngram_scores(spike_record, current_labels, n_classes, 2, ngram_scores) # Update and print accuracy evaluations. curves, preds = update_curves(curves, current_labels, n_classes, spike_record=spike_record, assignments=assignments, proportions=proportions, ngram_scores=ngram_scores, n=2) print_results(curves) for scheme in preds: predictions[scheme] = torch.cat([predictions[scheme], preds[scheme]], -1) if train: if any([x[-1] > best_accuracy for x in curves.values()]): print('New best accuracy! Saving network parameters to disk.') # Save network to disk. network.save(os.path.join(params_path, model_name + '.pt')) path = os.path.join(params_path, '_'.join(['auxiliary', model_name]) + '.pt') torch.save((assignments, proportions, rates, ngram_scores), open(path, 'wb')) if train: print('\nTraining complete.\n') else: print('\nTest complete.\n') print('Average accuracies:\n') for scheme in curves.keys(): print('\t%s: %.2f' % (scheme, float(np.mean(curves[scheme])))) # Save accuracy curves to disk. to_write = ['train'] + params if train else ['test'] + params f = '_'.join([str(x) for x in to_write]) + '.pt' torch.save((curves, update_interval, n_examples), open(os.path.join(curves_path, f), 'wb')) # Save results to disk. path = os.path.join('..', '..', 'results', data, model) if not os.path.isdir(path): os.makedirs(path) results = [ np.mean(curves['all']), np.mean(curves['proportion']), np.mean(curves['ngram']), np.max(curves['all']), np.max(curves['proportion']), np.max(curves['ngram']) ] to_write = params + results if train else test_params + results to_write = [str(x) for x in to_write] name = 'train.csv' if train else 'test.csv' if not os.path.isfile(os.path.join(results_path, name)): with open(os.path.join(path, name), 'w') as f: if train: f.write( 'random_seed,kernel_size,stride,n_filters,crop,n_train,inhib,time,lr,lr_decay,timestep,theta_plus,' 'theta_decay,norm,progress_interval,update_interval,mean_all_activity,mean_proportion_weighting,' 'mean_ngram,max_all_activity,max_proportion_weighting,max_ngram\n' ) else: f.write( 'random_seed,kernel_size,stride,n_filters,crop,n_train,n_test,inhib,time,lr,lr_decay,timestep,' 'theta_plus,theta_decay,norm,progress_interval,update_interval,mean_all_activity,' 'mean_proportion_weighting,mean_ngram,max_all_activity,max_proportion_weighting,max_ngram\n' ) with open(os.path.join(results_path, name), 'a') as f: f.write(','.join(to_write) + '\n') if labels.numel() > n_examples: labels = labels[:n_examples] else: while labels.numel() < n_examples: if 2 * labels.numel() > n_examples: labels = torch.cat( [labels, labels[:n_examples - labels.numel()]]) else: labels = torch.cat([labels, labels]) # Compute confusion matrices and save them to disk. confusions = {} for scheme in predictions: confusions[scheme] = confusion_matrix(labels, predictions[scheme]) to_write = ['train'] + params if train else ['test'] + test_params f = '_'.join([str(x) for x in to_write]) + '.pt' torch.save(confusions, os.path.join(confusion_path, f))
def main(seed=0, p_remove=0): model = '0_16_2_250_4_0.01_0.99_60000_250.0_250_1.0_0.05_1e-07_0.5_0.2_10_250.pt' np.random.seed(seed) torch.manual_seed(seed) torch.set_default_tensor_type('torch.cuda.FloatTensor') torch.cuda.manual_seed_all(seed) crop = 4 time = 250 n_filters = 250 intensity = 0.5 n_examples = 10000 n_classes = 10 # Load network. network = load_network( os.path.join( ROOT_DIR, 'params', 'mnist', 'crop_locally_connected', model ) ) network.connections['X', 'Y'].update_rule = NoOp( connection=network.connections['X', 'Y'], nu=network.connections['X', 'Y'].nu ) network.layers['Y'].theta_decay = 0 network.layers['Y'].theta_plus = 0 network.connections['X', 'Y'].norm = None # Remove `p_remove` percentage of neurons (set outgoing synapses to 0). mask = torch.bernoulli(p_remove * torch.ones(network.layers['Y'].shape)).byte() network.connections['X', 'Y'].w[:, mask] = 0 conv_size = network.connections['X', 'Y'].conv_size conv_prod = int(np.prod(conv_size)) n_neurons = n_filters * conv_prod # Voltage recording for excitatory and inhibitory layers. voltage_monitor = Monitor(network.layers['Y'], ['v'], time=time) network.add_monitor(voltage_monitor, name='output_voltage') # Load MNIST data. dataset = MNIST(path=data_path, download=True, shuffle=True) images, labels = dataset.get_test() images *= intensity images = images[:, crop:-crop, crop:-crop] update_interval = 250 # Record spikes during the simulation. spike_record = torch.zeros(update_interval, time, n_neurons) # Neuron assignments and spike proportions. path = os.path.join( ROOT_DIR, 'params', 'mnist', 'crop_locally_connected', f'auxiliary_{model}' ) assignments, proportions, rates, ngram_scores = torch.load(open(path, 'rb')) # Sequence of accuracy estimates. curves = {'all': [], 'proportion': [], 'ngram': []} predictions = { scheme: torch.Tensor().long() for scheme in curves.keys() } spikes = {} for layer in set(network.layers): spikes[layer] = Monitor(network.layers[layer], state_vars=['s'], time=time) network.add_monitor(spikes[layer], name=f'{layer}_spikes') start = t() for i in range(n_examples): if i % 10 == 0: print(f'Progress: {i} / {n_examples} ({t() - start:.4f} seconds)') start = t() if i % update_interval == 0 and i > 0: if i % len(labels) == 0: current_labels = labels[-update_interval:] else: current_labels = labels[i % len(images) - update_interval:i % len(images)] # Update and print accuracy evaluations. curves, preds = update_curves( curves, current_labels, n_classes, spike_record=spike_record, assignments=assignments, proportions=proportions, ngram_scores=ngram_scores, n=2 ) print_results(curves) for scheme in preds: predictions[scheme] = torch.cat([predictions[scheme], preds[scheme]], -1) # Get next input sample. image = images[i % len(images)].contiguous().view(-1) sample = poisson(datum=image, time=time, dt=1) inpts = {'X': sample} # Run the network on the input. network.run(inpts=inpts, time=time) retries = 0 while spikes['Y'].get('s').sum() < 5 and retries < 3: retries += 1 image *= 2 sample = poisson(datum=image, time=time, dt=1) inpts = {'X': sample} network.run(inpts=inpts, time=time) # Add to spikes recording. spike_record[i % update_interval] = spikes['Y'].get('s').t() network.reset_() # Reset state variables. print(f'Progress: {n_examples} / {n_examples} ({t() - start:.4f} seconds)') i += 1 if i % len(labels) == 0: current_labels = labels[-update_interval:] else: current_labels = labels[i % len(images) - update_interval:i % len(images)] # Update and print accuracy evaluations. curves, preds = update_curves( curves, current_labels, n_classes, spike_record=spike_record, assignments=assignments, proportions=proportions, ngram_scores=ngram_scores, n=2 ) print_results(curves) for scheme in preds: predictions[scheme] = torch.cat([predictions[scheme], preds[scheme]], -1) print('Average accuracies:\n') for scheme in curves.keys(): print('\t%s: %.2f' % (scheme, float(np.mean(curves[scheme])))) # Save results to disk. results = [ np.mean(curves['all']), np.mean(curves['proportion']), np.mean(curves['ngram']), np.max(curves['all']), np.max(curves['proportion']), np.max(curves['ngram']) ] to_write = [str(x) for x in [seed, p_remove] + results] name = 'neuron_robust.csv' if not os.path.isfile(os.path.join(results_path, name)): with open(os.path.join(results_path, name), 'w') as f: f.write( 'random_seed,p_remove\n' ) with open(os.path.join(results_path, name), 'a') as f: f.write(','.join(to_write) + '\n')
import os import torch from experiments import ROOT_DIR from bindsnet.datasets import MNIST from bindsnet.encoding import poisson from bindsnet.network import load_network network = load_network( os.path.join(ROOT_DIR, 'params', 'mnist', 'diehl_and_cook_2015', '2_400_60000_500.0_0.01_0.99_250_1_0.05_1e-07_0.5_10_250.pt')) auxiliary = torch.load( os.path.join( ROOT_DIR, 'params', 'mnist', 'diehl_and_cook_2015', 'auxiliary_2_400_60000_500.0_0.01_0.99_250_1_0.05_1e-07_0.5_10_250.pt') ) images, labels = MNIST(path=os.path.join(ROOT_DIR, 'data', 'MNIST'), download=True, shuffle=True).get_train() spikes = poisson(datum=images[0].view(-1), time=250, dt=1) network.run(inpts={'X': spikes}, time=250)
from bindsnet.datasets import MNIST from bindsnet.network import load_network from experiments import ROOT_DIR from experiments.robustness.mnist import BindsNETModel intensity = 0.5 crop = 4 # Load network. model_name = '0_12_4_150_4_0.01_0.99_60000_250.0_250_1.0_0.05_1e-07_0.5_0.2_10_250' network = load_network( os.path.join( ROOT_DIR, 'params', 'mnist', 'crop_locally_connected', f'{model_name}.pt' ) ) network.layers['Y'].theta_plus = 0 network.layers['Y'].theta_decay = 0 # Neuron assignments and spike proportions. path = os.path.join( ROOT_DIR, 'params', 'mnist', 'crop_locally_connected', '_'.join(['auxiliary', model_name]) + '.pt' ) _, _, _, ngram_scores = torch.load(open(path, 'rb')) # Load MNIST data. dataset = MNIST( path=os.path.join(
def main(seed=0, n_train=60000, n_test=10000, inhib=250, kernel_size=(16, ), stride=(2, ), n_filters=25, n_output=100, time=100, crop=0, lr=1e-2, lr_decay=0.99, dt=1, theta_plus=0.05, theta_decay=1e-7, intensity=1, norm=0.2, progress_interval=10, update_interval=250, train=True, plot=False, gpu=False): assert n_train % update_interval == 0, 'No. examples must be divisible by update_interval' params = [ seed, kernel_size, stride, n_filters, crop, lr, lr_decay, n_train, inhib, time, dt, theta_plus, theta_decay, intensity, norm, progress_interval, update_interval ] model_name = '_'.join([str(x) for x in params]) if not train: test_params = [ seed, kernel_size, stride, n_filters, crop, lr, lr_decay, n_train, n_test, inhib, time, dt, theta_plus, theta_decay, intensity, norm, progress_interval, update_interval ] np.random.seed(seed) if gpu: torch.set_default_tensor_type('torch.cuda.FloatTensor') torch.cuda.manual_seed_all(seed) else: torch.manual_seed(seed) side_length = 28 - crop * 2 n_inpt = side_length**2 n_examples = n_train if train else n_test n_classes = 10 # Build network. if train: network = load_network( os.path.join( ROOT_DIR, 'params', 'mnist', 'crop_locally_connected', '0_12_4_150_4_0.01_0.99_60000_250.0_250_1.0_0.05_1e-07_0.5_0.2_10_250.pt' )) for l in network.layers: network.layers[l].dt = 1 network.layers[l].lbound = None for m in network.monitors: network.monitors[m].record_length = 0 network.connections['X', 'Y'].update_rule = NoOp( connection=network.connections['X', 'Y'], nu=network.connections['X', 'Y'].nu) network.layers['Y'].theta_decay = 0 network.layers['Y'].theta_plus = 0 network.layers['Y'].theta -= 0.5 * network.layers['Y'].theta.mean() network.layers['Y'].one_spike = False del network.connections['Y', 'Y'] output_layer = DiehlAndCookNodes(n=n_output, traces=True, rest=0, reset=0, thresh=1, refrac=0, decay=1e-2, trace_tc=5e-2) hidden_output_connection = Connection( network.layers['Y'], output_layer, nu=[0, lr], update_rule=WeightDependentPostPre, wmin=0, wmax=1, norm=norm * network.layers['Y'].n) w = -inhib * (torch.ones(n_output, n_output) - torch.diag(torch.ones(n_output))) output_recurrent_connection = Connection(output_layer, output_layer, w=w, update_rule=NoOp, wmin=-inhib, wmax=0) network.add_layer(output_layer, name='Z') network.add_connection(hidden_output_connection, source='Y', target='Z') network.add_connection(output_recurrent_connection, source='Z', target='Z') else: network = load_network(os.path.join(params_path, model_name + '.pt')) network.connections['Y', 'Z'].update_rule = NoOp( connection=network.connections['Y', 'Z'], nu=0) # network.layers['Z'].theta = 0 # network.layers['Z'].theta_decay = 0 # network.layers['Z'].theta_plus = 0 # del network.connections['Z', 'Z'] conv_size = network.connections['X', 'Y'].conv_size locations = network.connections['X', 'Y'].locations conv_prod = int(np.prod(conv_size)) n_neurons = n_filters * conv_prod # Voltage recording for excitatory and inhibitory layers. voltage_monitor = Monitor(network.layers['Y'], ['v'], time=time) network.add_monitor(voltage_monitor, name='output_voltage') # Load MNIST data. dataset = MNIST(path=data_path, download=True) if train: images, labels = dataset.get_train() else: images, labels = dataset.get_test() images *= intensity images = images[:, crop:-crop, crop:-crop].contiguous().view(-1, side_length**2) spikes = {} for layer in set(network.layers): spikes[layer] = Monitor(network.layers[layer], state_vars=['s'], time=time) network.add_monitor(spikes[layer], name=f'{layer}_spikes') # Train the network. if train: print('\nBegin training.\n') else: print('\nBegin test.\n') spike_ims = None spike_axes = None weights_im = None weights2_im = None unclamps = {} per_class = int(n_output / n_classes) for label in range(n_classes): unclamp = torch.ones(n_output).byte() unclamp[label * per_class:(label + 1) * per_class] = 0 unclamps[label] = unclamp predictions = torch.zeros(n_examples) corrects = torch.zeros(n_examples) start = t() for i in range(n_examples): if i % progress_interval == 0: print(f'Progress: {i} / {n_examples} ({t() - start:.4f} seconds)') start = t() if i % update_interval == 0 and i > 0: if train: network.save(os.path.join(params_path, model_name + '.pt')) network.connections['X', 'Y'].update_rule.nu[1] *= lr_decay # Get next input sample. image = images[i % len(images)] label = labels[i % len(images)].item() sample = poisson(datum=image, time=time, dt=dt) inpts = {'X': sample} # Run the network on the input. if train: network.run(inpts=inpts, time=time, unclamp={'Z': unclamps[label]}) else: network.run(inpts=inpts, time=time) if not train: retries = 0 while spikes['Z'].get('s').sum() < 5 and retries < 3: retries += 1 image *= 1.5 sample = poisson(datum=image, time=time, dt=dt) inpts = {'X': sample} if train: network.run(inpts=inpts, time=time, unclamp={'Z': unclamps[label]}) else: network.run(inpts=inpts, time=time) output = spikes['Z'].get('s') summed_neurons = output.sum(dim=1).view(per_class, n_classes) summed_classes = summed_neurons.sum(dim=1) prediction = torch.argmax(summed_classes).item() correct = prediction == label predictions[i] = prediction corrects[i] = int(correct) # print(spikes[].get('s').sum(), spikes['Z'].get('s').sum()) # Optionally plot various simulation information. if plot: _spikes = { 'X': spikes['X'].get('s').view(side_length**2, time), 'Y': spikes['Y'].get('s').view(n_neurons, time), 'Z': spikes['Z'].get('s').view(n_output, time) } spike_ims, spike_axes = plot_spikes(spikes=_spikes, ims=spike_ims, axes=spike_axes) weights_im = plot_locally_connected_weights( network.connections['X', 'Y'].w, n_filters, kernel_size, conv_size, locations, side_length, im=weights_im) w = network.connections['Y', 'Z'].w weights2_im = plot_weights(w, im=weights2_im, wmax=1) plt.pause(1e-8) network.reset_() # Reset state variables. print(f'Progress: {n_examples} / {n_examples} ({t() - start:.4f} seconds)') if train: network.save(os.path.join(params_path, model_name + '.pt')) if train: print('\nTraining complete.\n') else: print('\nTest complete.\n') accuracy = torch.mean(corrects).item() * 100 print(f'\nAccuracy: {accuracy}\n') to_write = params + [accuracy] if train else test_params + [accuracy] to_write = [str(x) for x in to_write] name = 'train.csv' if train else 'test.csv' if not os.path.isfile(os.path.join(results_path, name)): with open(os.path.join(results_path, name), 'w') as f: if train: f.write( 'random_seed,kernel_size,stride,n_filters,crop,lr,lr_decay,n_train,inhib,time,timestep,theta_plus,' 'theta_decay,intensity,norm,progress_interval,accuracy\n') else: f.write( 'random_seed,kernel_size,stride,n_filters,crop,lr,lr_decay,n_train,n_test,inhib,time,timestep,' 'theta_plus,theta_decay,intensity,norm,progress_interval,update_interval,accuracy\n' ) with open(os.path.join(results_path, name), 'a') as f: f.write(','.join(to_write) + '\n') if labels.numel() > n_examples: labels = labels[:n_examples] else: while labels.numel() < n_examples: if 2 * labels.numel() > n_examples: labels = torch.cat( [labels, labels[:n_examples - labels.numel()]]) else: labels = torch.cat([labels, labels]) # Compute confusion matrices and save them to disk. confusion = confusion_matrix(labels, predictions) to_write = ['train'] + params if train else ['test'] + test_params f = '_'.join([str(x) for x in to_write]) + '.pt' torch.save(confusion, os.path.join(confusion_path, f))
def main(seed=0, n_train=60000, n_test=10000, time=50, lr=0.01, lr_decay=0.95, update_interval=500, max_prob=1.0, plot=False, train=True, gpu=False): assert n_train % update_interval == 0 and n_test % update_interval == 0, \ 'No. examples must be divisible by update_interval' params = [seed, n_train, time, lr, lr_decay, update_interval, max_prob] model_name = '_'.join([str(x) for x in params]) if not train: test_params = [ seed, n_train, n_test, time, lr, lr_decay, update_interval, max_prob ] np.random.seed(seed) if gpu: torch.set_default_tensor_type('torch.cuda.FloatTensor') torch.cuda.manual_seed_all(seed) else: torch.manual_seed(seed) criterion = torch.nn.CrossEntropyLoss( ) # Loss function on output firing rates. n_examples = n_train if train else n_test if train: # Network building. network = Network() # Groups of neurons. input_layer = RealInput(n=784, sum_input=True) output_layer = IFNodes(n=10, sum_input=True) bias = RealInput(n=1, sum_input=True) network.add_layer(input_layer, name='X') network.add_layer(output_layer, name='Y') network.add_layer(bias, name='Y_b') # Connections between groups of neurons. input_connection = Connection(source=input_layer, target=output_layer, norm=150, wmin=-1, wmax=1) bias_connection = Connection(source=bias, target=output_layer) network.add_connection(input_connection, source='X', target='Y') network.add_connection(bias_connection, source='Y_b', target='Y') # State variable monitoring. for l in network.layers: m = Monitor(network.layers[l], state_vars=['s'], time=time) network.add_monitor(m, name=l) else: network = load_network(os.path.join(params_path, model_name + '.pt')) # Load MNIST data. dataset = MNIST(path=data_path, download=True, shuffle=True) if train: images, labels = dataset.get_train() else: images, labels = dataset.get_test() images, labels = images.view(-1, 784) / 255, labels grads = {} accuracies = [] predictions = [] ground_truth = [] best = -np.inf spike_ims, spike_axes, weights_im = None, None, None losses = torch.zeros(update_interval) correct = torch.zeros(update_interval) # Run training. start = t() for i in range(n_examples): label = torch.Tensor([labels[i % len(labels)]]).long() image = images[i % len(labels)] # Run simulation for single datum. inpts = {'X': image.repeat(time, 1), 'Y_b': torch.ones(time, 1)} network.run(inpts=inpts, time=time) # Retrieve spikes and summed inputs from both layers. spikes = { l: network.monitors[l].get('s') for l in network.layers if '_b' not in l } summed_inputs = {l: network.layers[l].summed for l in network.layers} # Compute softmax of output spiking activity and get predicted label. output = summed_inputs['Y'].softmax(0).view(1, -1) predicted = output.argmax(1).item() correct[i % update_interval] = int(predicted == label[0].item()) predictions.append(predicted) ground_truth.append(label) # Compute cross-entropy loss between output and true label. losses[i % update_interval] = criterion(output, label) if train: # Compute gradient of the loss WRT average firing rates. grads['dl/df'] = summed_inputs['Y'].softmax(0) grads['dl/df'][label] -= 1 # Compute gradient of the summed voltages WRT connection weights. # This is an approximation; the summed voltages are not a # smooth function of the connection weights. grads['dl/dw'] = torch.ger(summed_inputs['X'], grads['dl/df']) grads['dl/db'] = grads['dl/df'] # Do stochastic gradient descent calculation. network.connections['X', 'Y'].w -= lr * grads['dl/dw'] network.connections['Y_b', 'Y'].w -= lr * grads['dl/db'] if i > 0 and i % update_interval == 0: accuracies.append(correct.mean() * 100) if train: if accuracies[-1] > best: print() print( 'New best accuracy! Saving network parameters to disk.' ) # Save network to disk. network.save(os.path.join(params_path, model_name + '.pt')) best = accuracies[-1] print() print(f'Progress: {i} / {n_examples} ({t() - start:.3f} seconds)') print(f'Average cross-entropy loss: {losses.mean():.3f}') print(f'Last accuracy: {accuracies[-1]:.3f}') print(f'Average accuracy: {np.mean(accuracies):.3f}') # Decay learning rate. lr *= lr_decay if train: print(f'Best accuracy: {best:.3f}') print(f'Current learning rate: {lr:.3f}') start = t() if plot: w = network.connections['X', 'Y'].w weights = [w[:, i].view(28, 28) for i in range(10)] w = torch.zeros(5 * 28, 2 * 28) for i in range(5): for j in range(2): w[i * 28:(i + 1) * 28, j * 28:(j + 1) * 28] = weights[i + j * 5] spike_ims, spike_axes = plot_spikes(spikes, ims=spike_ims, axes=spike_axes) weights_im = plot_weights(w, im=weights_im, wmin=-1, wmax=1) plt.pause(1e-1) network.reset_() # Reset state variables. accuracies.append(correct.mean() * 100) if train: lr *= lr_decay for c in network.connections: network.connections[c].update_rule.weight_decay *= lr_decay if accuracies[-1] > best: print() print('New best accuracy! Saving network parameters to disk.') # Save network to disk. network.save(os.path.join(params_path, model_name + '.pt')) best = accuracies[-1] print() print(f'Progress: {n_examples} / {n_examples} ({t() - start:.3f} seconds)') print(f'Average cross-entropy loss: {losses.mean():.3f}') print(f'Last accuracy: {accuracies[-1]:.3f}') print(f'Average accuracy: {np.mean(accuracies):.3f}') if train: print(f'Best accuracy: {best:.3f}') if train: print('\nTraining complete.\n') else: print('\nTest complete.\n') print(f'Average accuracy: {np.mean(accuracies):.3f}') # Save accuracy curves to disk. to_write = ['train'] + params if train else ['test'] + params f = '_'.join([str(x) for x in to_write]) + '.pt' torch.save((accuracies, update_interval, n_examples), open(os.path.join(curves_path, f), 'wb')) results = [np.mean(accuracies), np.max(accuracies)] to_write = params + results if train else test_params + results to_write = [str(x) for x in to_write] name = 'train.csv' if train else 'test.csv' if not os.path.isfile(os.path.join(results_path, name)): with open(os.path.join(results_path, name), 'w') as f: if train: f.write( 'seed,n_train,time,lr,lr_decay,update_interval,max_prob,mean_accuracy,max_accuracy\n' ) else: f.write( 'seed,n_train,n_test,time,lr,lr_decay,update_interval,max_prob,mean_accuracy,max_accuracy\n' ) with open(os.path.join(results_path, name), 'a') as f: f.write(','.join(to_write) + '\n') # Compute confusion matrices and save them to disk. confusion = confusion_matrix(ground_truth, predictions) to_write = ['train'] + params if train else ['test'] + test_params f = '_'.join([str(x) for x in to_write]) + '.pt' torch.save(confusion, os.path.join(confusion_path, f))
def main(seed=0, n_train=60000, n_test=10000, inhib=250, kernel_size=(16,), stride=(2,), n_filters=25, crop=4, lr=0.01, lr_decay=1, time=100, dt=1, theta_plus=0.05, theta_decay=1e-7, intensity=1, norm=0.2, progress_interval=10, update_interval=250, plot=False, train=True, gpu=False): assert n_train % update_interval == 0 and n_test % update_interval == 0, \ 'No. examples must be divisible by update_interval' params = [ seed, kernel_size, stride, n_filters, crop, lr, lr_decay, n_train, inhib, time, dt, theta_plus, theta_decay, intensity, norm, progress_interval, update_interval ] model_name = '_'.join([str(x) for x in params]) if not train: test_params = [ seed, kernel_size, stride, n_filters, crop, lr, lr_decay, n_train, n_test, inhib, time, dt, theta_plus, theta_decay, intensity, norm, progress_interval, update_interval ] np.random.seed(seed) if gpu: torch.set_default_tensor_type('torch.cuda.FloatTensor') torch.cuda.manual_seed_all(seed) else: torch.manual_seed(seed) side_length = 28 - crop * 2 n_examples = n_train if train else n_test network = load_network(os.path.join(params_path, model_name + '.pt')) network.layers['X'] = Input(n=400) network.layers['Y'] = DiehlAndCookNodes( n=network.layers['Y'].n, thresh=network.layers['Y'].thresh, rest=network.layers['Y'].rest, reset=network.layers['Y'].reset, theta_plus=network.layers['Y'].theta_plus, theta_decay=network.layers['Y'].theta_decay ) network.add_layer(network.layers['X'], name='X') network.add_layer(network.layers['Y'], name='Y') network.connections['X', 'Y'].source = network.layers['X'] network.connections['X', 'Y'].target = network.layers['Y'] network.connections['X', 'Y'].update_rule = NoOp( connection=network.connections['X', 'Y'], nu=network.connections['X', 'Y'].nu ) network.layers['Y'].theta_decay = 0 network.layers['Y'].theta_plus = 0 conv_size = network.connections['X', 'Y'].conv_size locations = network.connections['X', 'Y'].locations conv_prod = int(np.prod(conv_size)) n_neurons = n_filters * conv_prod n_classes = 10 # Voltage recording for excitatory and inhibitory layers. voltage_monitor = Monitor(network.layers['Y'], ['v'], time=time) network.add_monitor(voltage_monitor, name='output_voltage') # Load MNIST data. dataset = MNIST(path=data_path, download=True) images, labels = dataset.get_test() images *= intensity images = images[:, crop:-crop, crop:-crop] # Neuron assignments and spike proportions. path = os.path.join(params_path, '_'.join(['auxiliary', model_name]) + '.pt') assignments, proportions, rates, ngram_scores = torch.load(open(path, 'rb')) spikes = {} for layer in set(network.layers): spikes[layer] = Monitor(network.layers[layer], state_vars=['s'], time=time) network.add_monitor(spikes[layer], name=f'{layer}_spikes') # Train the network. print('\nBegin black box adversarial attack.\n') spike_ims = None spike_axes = None weights_im = None inpt_ims = None inpt_axes = None max_iters = 25 delta = 0.1 epsilon = 0.1 for i in range(n_examples): # Get next input sample. original = images[i % len(images)].contiguous().view(-1) label = labels[i % len(images)] # Check if the image is correctly classified. sample = poisson(datum=original, time=time) inpts = {'X': sample} # Run the network on the input. network.run(inpts=inpts, time=time) # Check for incorrect classification. s = spikes['Y'].get('s').view(1, n_neurons, time) prediction = ngram(spikes=s, ngram_scores=ngram_scores, n_labels=10, n=2).item() if prediction != label: continue # Create adversarial example. adversarial = False while not adversarial: adv_example = 255 * torch.rand(original.size()) sample = poisson(datum=adv_example, time=time) inpts = {'X': sample} # Run the network on the input. network.run(inpts=inpts, time=time) # Check for incorrect classification. s = spikes['Y'].get('s').view(1, n_neurons, time) prediction = ngram(spikes=s, ngram_scores=ngram_scores, n_labels=n_classes, n=2).item() if prediction == label: adversarial = True j = 0 current = original.clone() while j < max_iters: # Orthogonal perturbation. # perturb = orthogonal_perturbation(delta=delta, image=adv_example, target=original) # temp = adv_example + perturb # # Forward perturbation. # temp = temp.clone() + forward_perturbation(epsilon * get_diff(temp, original), temp, adv_example) # print(temp) perturbation = torch.randn(original.size()) unnormed_source_direction = original - perturbation source_norm = torch.norm(unnormed_source_direction) source_direction = unnormed_source_direction / source_norm dot = torch.dot(perturbation, source_direction) perturbation -= dot * source_direction perturbation *= epsilon * source_norm / torch.norm(perturbation) D = 1 / np.sqrt(epsilon ** 2 + 1) direction = perturbation - unnormed_source_direction spherical_candidate = current + D * direction spherical_candidate = torch.clamp(spherical_candidate, 0, 255) new_source_direction = original - spherical_candidate new_source_direction_norm = torch.norm(new_source_direction) # length if spherical_candidate would be exactly on the sphere length = delta * source_norm # length including correction for deviation from sphere deviation = new_source_direction_norm - source_norm length += deviation # make sure the step size is positive length = max(0, length) # normalize the length length = length / new_source_direction_norm candidate = spherical_candidate + length * new_source_direction candidate = torch.clamp(candidate, 0, 255) sample = poisson(datum=candidate, time=time) inpts = {'X': sample} # Run the network on the input. network.run(inpts=inpts, time=time) # Check for incorrect classification. s = spikes['Y'].get('s').view(1, n_neurons, time) prediction = ngram(spikes=s, ngram_scores=ngram_scores, n_labels=10, n=2).item() # Optionally plot various simulation information. if plot: _input = original.view(side_length, side_length) reconstruction = candidate.view(side_length, side_length) _spikes = { 'X': spikes['X'].get('s').view(side_length ** 2, time), 'Y': spikes['Y'].get('s').view(n_neurons, time) } w = network.connections['X', 'Y'].w spike_ims, spike_axes = plot_spikes(spikes=_spikes, ims=spike_ims, axes=spike_axes) weights_im = plot_locally_connected_weights( w, n_filters, kernel_size, conv_size, locations, side_length, im=weights_im ) inpt_axes, inpt_ims = plot_input( _input, reconstruction, label=labels[i], ims=inpt_ims, axes=inpt_axes ) plt.pause(1e-8) if prediction == label: print('Attack failed.') else: print('Attack succeeded.') adv_example = candidate j += 1 network.reset_() # Reset state variables. print('\nAdversarial attack complete.\n')
def main(seed=0, n_train=60000, n_test=10000, c_low=1, c_high=25, p_low=0.5, kernel_size=(16,), stride=(2,), n_filters=25, crop=4, lr=0.01, lr_decay=1, time=100, dt=1, theta_plus=0.05, theta_decay=1e-7, intensity=1, norm=0.2, progress_interval=10, update_interval=250, plot=False, train=True, gpu=False): assert n_train % update_interval == 0 and n_test % update_interval == 0, \ 'No. examples must be divisible by update_interval' params = [ seed, kernel_size, stride, n_filters, crop, lr, lr_decay, n_train, c_low, c_high, p_low, time, dt, theta_plus, theta_decay, intensity, norm, progress_interval, update_interval ] model_name = '_'.join([str(x) for x in params]) if not train: test_params = [ seed, kernel_size, stride, n_filters, crop, lr, lr_decay, n_train, n_test, c_low, c_high, p_low, time, dt, theta_plus, theta_decay, intensity, norm, progress_interval, update_interval ] np.random.seed(seed) if gpu: torch.set_default_tensor_type('torch.cuda.FloatTensor') torch.cuda.manual_seed_all(seed) else: torch.manual_seed(seed) side_length = 28 - crop * 2 n_inpt = side_length ** 2 input_shape = [side_length, side_length] n_examples = n_train if train else n_test n_classes = 10 if _pair(kernel_size) == input_shape: conv_size = [1, 1] else: conv_size = (int((input_shape[0] - _pair(kernel_size)[0]) / _pair(stride)[0]) + 1, int((input_shape[1] - _pair(kernel_size)[1]) / _pair(stride)[1]) + 1) # Build network. if train: network = Network() input_layer = Input(n=n_inpt, traces=True, trace_tc=5e-2) output_layer = DiehlAndCookNodes( n=n_filters * conv_size[0] * conv_size[1], traces=True, rest=-65.0, reset=-60.0, thresh=-52.0, refrac=5, decay=1e-2, trace_tc=5e-2, theta_plus=theta_plus, theta_decay=theta_decay ) input_output_conn = LocallyConnectedConnection( input_layer, output_layer, kernel_size=kernel_size, stride=stride, n_filters=n_filters, nu=[0, lr], update_rule=PostPre, wmin=0, wmax=1, norm=norm, input_shape=input_shape ) w = torch.zeros(n_filters, *conv_size, n_filters, *conv_size) for fltr1 in range(n_filters): for fltr2 in range(n_filters): if fltr1 != fltr2: for j in range(conv_size[0]): for k in range(conv_size[1]): x1, y1 = fltr1 // np.sqrt(n_filters), fltr1 % np.sqrt(n_filters) x2, y2 = fltr2 // np.sqrt(n_filters), fltr2 % np.sqrt(n_filters) w[fltr1, j, k, fltr2, j, k] = max(-c_high, -c_low * np.sqrt(euclidean([x1, y1], [x2, y2]))) w = w.view(n_filters * conv_size[0] * conv_size[1], n_filters * conv_size[0] * conv_size[1]) recurrent_conn = Connection(output_layer, output_layer, w=w) plt.matshow(w) plt.colorbar() network.add_layer(input_layer, name='X') network.add_layer(output_layer, name='Y') network.add_connection(input_output_conn, source='X', target='Y') network.add_connection(recurrent_conn, source='Y', target='Y') else: network = load_network(os.path.join(params_path, model_name + '.pt')) network.connections['X', 'Y'].update_rule = NoOp( connection=network.connections['X', 'Y'], nu=network.connections['X', 'Y'].nu ) network.layers['Y'].theta_decay = 0 network.layers['Y'].theta_plus = 0 conv_size = network.connections['X', 'Y'].conv_size locations = network.connections['X', 'Y'].locations conv_prod = int(np.prod(conv_size)) n_neurons = n_filters * conv_prod # Voltage recording for excitatory and inhibitory layers. voltage_monitor = Monitor(network.layers['Y'], ['v'], time=time) network.add_monitor(voltage_monitor, name='output_voltage') # Load MNIST data. dataset = MNIST(path=data_path, download=True) if train: images, labels = dataset.get_train() else: images, labels = dataset.get_test() images *= intensity images = images[:, crop:-crop, crop:-crop] # Record spikes during the simulation. spike_record = torch.zeros(update_interval, time, n_neurons) # Neuron assignments and spike proportions. if train: assignments = -torch.ones_like(torch.Tensor(n_neurons)) proportions = torch.zeros_like(torch.Tensor(n_neurons, 10)) rates = torch.zeros_like(torch.Tensor(n_neurons, 10)) ngram_scores = {} else: path = os.path.join(params_path, '_'.join(['auxiliary', model_name]) + '.pt') assignments, proportions, rates, ngram_scores = torch.load(open(path, 'rb')) if train: best_accuracy = 0 # Sequence of accuracy estimates. curves = {'all': [], 'proportion': [], 'ngram': []} predictions = { scheme: torch.Tensor().long() for scheme in curves.keys() } spikes = {} for layer in set(network.layers): spikes[layer] = Monitor(network.layers[layer], state_vars=['s'], time=time) network.add_monitor(spikes[layer], name=f'{layer}_spikes') # Train the network. if train: print('\nBegin training.\n') else: print('\nBegin test.\n') spike_ims = None spike_axes = None weights_im = None # Calculate linear increase every update interval. if train: n_increase = int(p_low * n_examples) / update_interval increase = (c_high - c_low) / n_increase increases = 0 inhib = c_low start = t() for i in range(n_examples): if i % progress_interval == 0: print(f'Progress: {i} / {n_examples} ({t() - start:.4f} seconds)') start = t() if i % update_interval == 0 and i > 0: if train: network.connections['X', 'Y'].update_rule.nu[1] *= lr_decay if increases < n_increase: inhib = inhib + increase print(f'\nIncreasing inhibition to {inhib}.\n') w = torch.zeros(n_filters, *conv_size, n_filters, *conv_size) for fltr1 in range(n_filters): for fltr2 in range(n_filters): if fltr1 != fltr2: for j in range(conv_size[0]): for k in range(conv_size[1]): x1, y1 = fltr1 // np.sqrt(n_filters), fltr1 % np.sqrt(n_filters) x2, y2 = fltr2 // np.sqrt(n_filters), fltr2 % np.sqrt(n_filters) w[fltr1, j, k, fltr2, j, k] = max(-c_high, -c_low * np.sqrt(euclidean([x1, y1], [x2, y2]))) w = w.view(n_filters * conv_size[0] * conv_size[1], n_filters * conv_size[0] * conv_size[1]) network.connections['Y', 'Y'].w = w if i % len(labels) == 0: current_labels = labels[-update_interval:] else: current_labels = labels[i % len(images) - update_interval:i % len(images)] # Update and print accuracy evaluations. curves, preds = update_curves( curves, current_labels, n_classes, spike_record=spike_record, assignments=assignments, proportions=proportions, ngram_scores=ngram_scores, n=2 ) print_results(curves) for scheme in preds: predictions[scheme] = torch.cat([predictions[scheme], preds[scheme]], -1) # Save accuracy curves to disk. to_write = ['train'] + params if train else ['test'] + params f = '_'.join([str(x) for x in to_write]) + '.pt' torch.save((curves, update_interval, n_examples), open(os.path.join(curves_path, f), 'wb')) if train: if any([x[-1] > best_accuracy for x in curves.values()]): print('New best accuracy! Saving network parameters to disk.') # Save network to disk. network.save(os.path.join(params_path, model_name + '.pt')) path = os.path.join(params_path, '_'.join(['auxiliary', model_name]) + '.pt') torch.save((assignments, proportions, rates, ngram_scores), open(path, 'wb')) best_accuracy = max([x[-1] for x in curves.values()]) # Assign labels to excitatory layer neurons. assignments, proportions, rates = assign_labels(spike_record, current_labels, 10, rates) # Compute ngram scores. ngram_scores = update_ngram_scores(spike_record, current_labels, 10, 2, ngram_scores) print() # Get next input sample. image = images[i % update_interval].contiguous().view(-1) sample = poisson(datum=image, time=time, dt=dt) inpts = {'X': sample} # Run the network on the input. network.run(inpts=inpts, time=time) retries = 0 while spikes['Y'].get('s').sum() < 5 and retries < 3: retries += 1 image *= 2 sample = poisson(datum=image, time=time, dt=dt) inpts = {'X': sample} network.run(inpts=inpts, time=time) # Add to spikes recording. spike_record[i % update_interval] = spikes['Y'].get('s').t() # Optionally plot various simulation information. if plot: _spikes = { 'X': spikes['X'].get('s').view(side_length ** 2, time), 'Y': spikes['Y'].get('s').view(n_filters * conv_prod, time) } spike_ims, spike_axes = plot_spikes(spikes=_spikes, ims=spike_ims, axes=spike_axes) weights_im = plot_locally_connected_weights( network.connections[('X', 'Y')].w, n_filters, kernel_size, conv_size, locations, side_length, im=weights_im ) plt.pause(1e-8) network.reset_() # Reset state variables. print(f'Progress: {n_examples} / {n_examples} ({t() - start:.4f} seconds)') i += 1 if i % len(labels) == 0: current_labels = labels[-update_interval:] else: current_labels = labels[i % len(images) - update_interval:i % len(images)] # Update and print accuracy evaluations. curves, preds = update_curves( curves, current_labels, n_classes, spike_record=spike_record, assignments=assignments, proportions=proportions, ngram_scores=ngram_scores, n=2 ) print_results(curves) for scheme in preds: predictions[scheme] = torch.cat([predictions[scheme], preds[scheme]], -1) if train: if any([x[-1] > best_accuracy for x in curves.values()]): print('New best accuracy! Saving network parameters to disk.') # Save network to disk. network.save(os.path.join(params_path, model_name + '.pt')) path = os.path.join(params_path, '_'.join(['auxiliary', model_name]) + '.pt') torch.save((assignments, proportions, rates, ngram_scores), open(path, 'wb')) if train: print('\nTraining complete.\n') else: print('\nTest complete.\n') print('Average accuracies:\n') for scheme in curves.keys(): print('\t%s: %.2f' % (scheme, float(np.mean(curves[scheme])))) # Save accuracy curves to disk. to_write = ['train'] + params if train else ['test'] + params f = '_'.join([str(x) for x in to_write]) + '.pt' torch.save((curves, update_interval, n_examples), open(os.path.join(curves_path, f), 'wb')) # Save results to disk. results = [ np.mean(curves['all']), np.mean(curves['proportion']), np.mean(curves['ngram']), np.max(curves['all']), np.max(curves['proportion']), np.max(curves['ngram']) ] to_write = params + results if train else test_params + results to_write = [str(x) for x in to_write] name = 'train.csv' if train else 'test.csv' if not os.path.isfile(os.path.join(results_path, name)): with open(os.path.join(results_path, name), 'w') as f: if train: f.write( 'random_seed,kernel_size,stride,n_filters,crop,lr,lr_decay,n_train,c_low,c_high,p_low,time,timestep,theta_plus,' 'theta_decay,intensity,norm,progress_interval,update_interval,mean_all_activity,' 'mean_proportion_weighting,mean_ngram,max_all_activity,max_proportion_weighting,max_ngram\n' ) else: f.write( 'random_seed,kernel_size,stride,n_filters,crop,lr,lr_decay,n_train,n_test,c_low,c_high,p_low,time,timestep,' 'theta_plus,theta_decay,intensity,norm,progress_interval,update_interval,mean_all_activity,' 'mean_proportion_weighting,mean_ngram,max_all_activity,max_proportion_weighting,max_ngram\n' ) with open(os.path.join(results_path, name), 'a') as f: f.write(','.join(to_write) + '\n') if labels.numel() > n_examples: labels = labels[:n_examples] else: while labels.numel() < n_examples: if 2 * labels.numel() > n_examples: labels = torch.cat([labels, labels[:n_examples - labels.numel()]]) else: labels = torch.cat([labels, labels]) # Compute confusion matrices and save them to disk. confusions = {} for scheme in predictions: confusions[scheme] = confusion_matrix(labels, predictions[scheme]) to_write = ['train'] + params if train else ['test'] + test_params f = '_'.join([str(x) for x in to_write]) + '.pt' torch.save(confusions, os.path.join(confusion_path, f))
def main(seed=0, n_neurons=100, n_train=60000, n_test=10000, inhib=500, dt=1, theta_plus=0.05, theta_decay=1e-7, intensity=1 / 40, progress_interval=10, update_interval=250, plot=False, train=True, gpu=False): assert n_train % update_interval == 0 and n_test % update_interval == 0, \ 'No. examples must be divisible by update_interval' params = [ seed, n_neurons, n_train, inhib, dt, theta_plus, theta_decay, intensity, progress_interval, update_interval ] test_params = [ seed, n_neurons, n_train, n_test, inhib, dt, theta_plus, theta_decay, intensity, progress_interval, update_interval ] model_name = '_'.join([str(x) for x in params]) np.random.seed(seed) if gpu: torch.set_default_tensor_type('torch.cuda.FloatTensor') torch.cuda.manual_seed_all(seed) else: torch.manual_seed(seed) n_examples = n_train if train else n_test n_sqrt = int(np.ceil(np.sqrt(n_neurons))) n_classes = 10 # Build network. if train: network = Network() input_layer = RealInput(n=40, traces=True, trace_tc=5e-2) network.add_layer(input_layer, name='X') output_layer = DiehlAndCookNodes(n=n_neurons, traces=True, rest=-65.0, reset=-65.0, thresh=-52.0, refrac=5, decay=1e-2, trace_tc=5e-2, theta_plus=theta_plus, theta_decay=theta_decay) network.add_layer(output_layer, name='Y') w = 0.3 * torch.rand(40, n_neurons) input_connection = Connection(source=network.layers['X'], target=network.layers['Y'], w=w, update_rule=PostPre, nu=(0, 1), wmin=0, wmax=1, norm=4) network.add_connection(input_connection, source='X', target='Y') w = -inhib * (torch.ones(n_neurons, n_neurons) - torch.diag(torch.ones(n_neurons))) recurrent_connection = Connection(source=network.layers['Y'], target=network.layers['Y'], w=w, wmin=-inhib, wmax=0) network.add_connection(recurrent_connection, source='Y', target='Y') else: network = load_network(os.path.join(params_path, model_name + '.pt')) network.connections['X', 'Y'].update_rule = NoOp( connection=network.connections['X', 'Y'], nu=network.connections['X', 'Y'].nu) network.layers['Y'].theta_decay = 0 network.layers['Y'].theta_plus = 0 # Load Spoken MNIST data. dataset = SpokenMNIST(path=data_path, download=True, shuffle=False) if train: audio, labels = dataset.get_train() else: audio, labels = dataset.get_test() audio = [_ * intensity for _ in audio] # Record spikes during the simulation. avg_time = int(np.mean([_.size(0) for _ in audio])) spike_record = torch.zeros(update_interval, avg_time, n_neurons) # Neuron assignments and spike proportions. if train: assignments = -torch.ones_like(torch.Tensor(n_neurons)) proportions = torch.zeros_like(torch.Tensor(n_neurons, 10)) rates = torch.zeros_like(torch.Tensor(n_neurons, 10)) ngram_scores = {} else: path = os.path.join(params_path, '_'.join(['auxiliary', model_name]) + '.pt') assignments, proportions, rates, ngram_scores = torch.load( open(path, 'rb')) # Sequence of accuracy estimates. curves = {'all': [], 'proportion': [], 'ngram': []} predictions = {scheme: torch.Tensor().long() for scheme in curves.keys()} if train: best_accuracy = 0 spikes = {} for layer in set(network.layers): spikes[layer] = Monitor(network.layers[layer], state_vars=['s'], time=avg_time) network.add_monitor(spikes[layer], name='%s_spikes' % layer) # Train the network. if train: print('\nBegin training.\n') else: print('\nBegin test.\n') inpt_axes = None inpt_ims = None spike_ims = None spike_axes = None weights_im = None assigns_im = None perf_ax = None start = t() for i in range(n_examples): if i % progress_interval == 0: print(f'Progress: {i} / {n_examples} ({t() - start:.4f} seconds)') start = t() if i % update_interval == 0 and i > 0: if i % len(labels) == 0: current_labels = labels[-update_interval:] else: current_labels = labels[i % len(audio) - update_interval:i % len(audio)] # Update and print accuracy evaluations. curves, preds = update_curves(curves, current_labels, n_classes, spike_record=spike_record, assignments=assignments, proportions=proportions, ngram_scores=ngram_scores, n=2) print_results(curves) for scheme in preds: predictions[scheme] = torch.cat( [predictions[scheme], preds[scheme]], -1) # Save accuracy curves to disk. to_write = ['train'] + params if train else ['test'] + params f = '_'.join([str(x) for x in to_write]) + '.pt' torch.save((curves, update_interval, n_examples), open(os.path.join(curves_path, f), 'wb')) if train: if any([x[-1] > best_accuracy for x in curves.values()]): print( 'New best accuracy! Saving network parameters to disk.' ) # Save network to disk. network.save(os.path.join(params_path, model_name + '.pt')) path = os.path.join( params_path, '_'.join(['auxiliary', model_name]) + '.pt') torch.save((assignments, proportions, rates, ngram_scores), open(path, 'wb')) best_accuracy = max([x[-1] for x in curves.values()]) # Assign labels to excitatory layer neurons. assignments, proportions, rates = assign_labels( spike_record, current_labels, 10, rates) # Compute ngram scores. ngram_scores = update_ngram_scores(spike_record, current_labels, 10, 2, ngram_scores) print() # Get next input sample. sample = audio[i % len(audio)] sample = sample[:40, :] inpts = {'X': sample} time = min(avg_time, sample.size(0)) # Run the network on the input. network.run(inpts=inpts, time=time) retries = 0 while spikes['Y'].get('s').sum() < 5 and retries < 3: retries += 1 sample *= 2 inpts = {'X': sample} network.run(inpts=inpts, time=time) # Add to spikes recording. spike_record[i % update_interval] = spikes['Y'].get('s').t() # Optionally plot various simulation information. if plot: # _input = image.view(28, 28) # reconstruction = inpts['X'].view(time, 40).sum(0).view(8, 5) _spikes = {layer: spikes[layer].get('s') for layer in spikes} input_exc_weights = network.connections[('X', 'Y')].w square_weights = get_square_weights( input_exc_weights.view(40, n_neurons), n_sqrt, (8, 5)) # square_assignments = get_square_assignments(assignments, n_sqrt) # inpt_axes, inpt_ims = plot_input(_input, reconstruction, label=labels[i], axes=inpt_axes, ims=inpt_ims) spike_ims, spike_axes = plot_spikes(_spikes, ims=spike_ims, axes=spike_axes) weights_im = plot_weights(square_weights, im=weights_im) # assigns_im = plot_assignments(square_assignments, im=assigns_im) # perf_ax = plot_performance(curves, ax=perf_ax) plt.pause(1e-8) network.reset_() # Reset state variables. print(f'Progress: {n_examples} / {n_examples} ({t() - start:.4f} seconds)') i += 1 if i % len(labels) == 0: current_labels = labels[-update_interval:] else: current_labels = labels[i % len(audio) - update_interval:i % len(audio)] # Update and print accuracy evaluations. curves, preds = update_curves(curves, current_labels, n_classes, spike_record=spike_record, assignments=assignments, proportions=proportions, ngram_scores=ngram_scores, n=2) print_results(curves) for scheme in preds: predictions[scheme] = torch.cat([predictions[scheme], preds[scheme]], -1) if train: if any([x[-1] > best_accuracy for x in curves.values()]): print('New best accuracy! Saving network parameters to disk.') # Save network to disk. if train: network.save(os.path.join(params_path, model_name + '.pt')) path = os.path.join( params_path, '_'.join(['auxiliary', model_name]) + '.pt') torch.save((assignments, proportions, rates, ngram_scores), open(path, 'wb')) if train: print('\nTraining complete.\n') else: print('\nTest complete.\n') print('Average accuracies:\n') for scheme in curves.keys(): print('\t%s: %.2f' % (scheme, float(np.mean(curves[scheme])))) # Save accuracy curves to disk. to_write = ['train'] + params if train else ['test'] + params f = '_'.join([str(x) for x in to_write]) + '.pt' torch.save((curves, update_interval, n_examples), open(os.path.join(curves_path, f), 'wb')) # Save results to disk. results = [ np.mean(curves['all']), np.mean(curves['proportion']), np.mean(curves['ngram']), np.max(curves['all']), np.max(curves['proportion']), np.max(curves['ngram']) ] to_write = params + results if train else test_params + results to_write = [str(x) for x in to_write] name = 'train.csv' if train else 'test.csv' if not os.path.isfile(os.path.join(results_path, name)): with open(os.path.join(results_path, name), 'w') as f: if train: f.write( 'random_seed,n_neurons,n_train,inhib,timestep,theta_plus,theta_decay,intensity,' 'progress_interval,update_interval,mean_all_activity,mean_proportion_weighting,' 'mean_ngram,max_all_activity,max_proportion_weighting,max_ngram\n' ) else: f.write( 'random_seed,n_neurons,n_train,n_test,inhib,timestep,theta_plus,theta_decay,intensity,' 'progress_interval,update_interval,mean_all_activity,mean_proportion_weighting,' 'mean_ngram,max_all_activity,max_proportion_weighting,max_ngram\n' ) with open(os.path.join(results_path, name), 'a') as f: f.write(','.join(to_write) + '\n') if labels.numel() > n_examples: labels = labels[:n_examples] else: while labels.numel() < n_examples: if 2 * labels.numel() > n_examples: labels = torch.cat( [labels, labels[:n_examples - labels.numel()]]) else: labels = torch.cat([labels, labels]) # Compute confusion matrices and save them to disk. confusions = {} for scheme in predictions: confusions[scheme] = confusion_matrix(labels, predictions[scheme]) to_write = ['train'] + params if train else ['test'] + test_params f = '_'.join([str(x) for x in to_write]) + '.pt' torch.save(confusions, os.path.join(confusion_path, f))
from sklearn.linear_model import LogisticRegression from bindsnet.analysis.plotting import plot_spikes, plot_locally_connected_weights, plot_weights from experiments import ROOT_DIR from bindsnet.datasets import MNIST from bindsnet.encoding import poisson from bindsnet.network import load_network plot = True path = os.path.join( ROOT_DIR, 'params', 'mnist', 'crop_locally_connected', '0_12_4_150_4_0.01_0.99_60000_250.0_250_1.0_0.05_1e-07_0.5_0.2_10_250.pt') network = load_network(file_name=path, learning=False) for l in network.layers: network.layers[l].dt = 1 network.layers[l].lbound = None for m in network.monitors: network.monitors[m].record_length = 0 network.layers['Y'].theta_plus = 0 network.layers['Y'].theta_decay = 0 del network.connections['Y', 'Y'] n_classes = 10 time = 250
def main(seed=0, n_neurons=100, n_train=60000, n_test=10000, inhib=100, lr_decay=1, time=350, dt=1, theta_plus=0.05, theta_decay=1e-7, intensity=1, progress_interval=10, update_interval=250, plot=False, train=True, gpu=False, lr=1e-2, norm=32**2 / 5): #assert n_train % update_interval == 0 and n_test % update_interval == 0, \ # 'No. examples must be divisible by update_interval' params = [ seed, n_neurons, n_train, inhib, lr_decay, time, dt, theta_plus, theta_decay, intensity, progress_interval, update_interval ] test_params = [ seed, n_neurons, n_train, n_test, inhib, lr_decay, time, dt, theta_plus, theta_decay, intensity, progress_interval, update_interval ] model_name = '_'.join([str(x) for x in params]) np.random.seed(seed) if gpu: torch.set_default_tensor_type('torch.cuda.FloatTensor') torch.cuda.manual_seed_all(seed) else: torch.manual_seed(seed) n_examples = n_train if train else n_test n_sqrt = int(np.ceil(np.sqrt(n_neurons))) n_classes = 5 # Build network. if train: network = DiehlAndCook2015v2(n_inpt=32**2, n_neurons=n_neurons, inh=inhib, dt=dt, norm=norm, theta_plus=theta_plus, theta_decay=theta_decay, nu_pre=0, nu_post=np.sqrt(350 / float(time)) * lr) else: network = load_network(os.path.join(params_path, model_name + '.pt')) network.connections['X', 'Y'].update_rule = NoOp( connection=network.connections['X', 'Y'], nu=network.connections['X', 'Y'].nu) network.layers['Y'].theta_decay = 0 network.layers['Y'].theta_plus = 0 # Load MNIST data. #dataset = MNIST(path=data_path, download=True) dataset = VPR('./data/Dataset_lighting4/left') if train: images, labels = dataset.get_train() else: images, labels = dataset.get_test() n_examples = images.shape[0] images = images.view(-1, 32**2) images *= intensity # Record spikes during the simulation. spike_record = torch.zeros(update_interval, time, n_neurons) # Neuron assignments and spike proportions. if train: assignments = -torch.ones_like(torch.Tensor(n_neurons)) proportions = torch.zeros_like(torch.Tensor(n_neurons, 5)) rates = torch.zeros_like(torch.Tensor(n_neurons, 5)) ngram_scores = {} else: path = os.path.join(params_path, '_'.join(['auxiliary', model_name]) + '.pt') assignments, proportions, rates, ngram_scores = torch.load( open(path, 'rb')) # Sequence of accuracy estimates. curves = {'all': [], 'proportion': [], 'ngram': []} predictions = {scheme: torch.Tensor().long() for scheme in curves.keys()} if train: best_accuracy = 0 spikes = {} for layer in set(network.layers) - {'X'}: spikes[layer] = Monitor(network.layers[layer], state_vars=['s'], time=time) network.add_monitor(spikes[layer], name='%s_spikes' % layer) # Train the network. if train: print('\nBegin training.\n') else: print('\nBegin test.\n') inpt_axes = None inpt_ims = None spike_ims = None spike_axes = None weights_im = None assigns_im = None perf_ax = None start = t() for i in range(n_examples): if i % progress_interval == 0: print(f'Progress: {i} / {n_examples} ({t() - start:.4f} seconds)') start = t() if i % update_interval == 0 and i > 0: if train: network.connections['X', 'Y'].update_rule.nu[1] *= lr_decay if i % len(labels) == 0: current_labels = labels[-update_interval:] else: current_labels = labels[i % len(images) - update_interval:i % len(images)] # Update and print accuracy evaluations. curves, preds = update_curves(curves, current_labels, n_classes, spike_record=spike_record, assignments=assignments, proportions=proportions, ngram_scores=ngram_scores, n=2) print_results(curves) for scheme in preds: predictions[scheme] = torch.cat( [predictions[scheme], preds[scheme]], -1) # Save accuracy curves to disk. to_write = ['train'] + params if train else ['test'] + params f = '_'.join([str(x) for x in to_write]) + '.pt' torch.save((curves, update_interval, n_examples), open(os.path.join(curves_path, f), 'wb')) if train: if any([x[-1] > best_accuracy for x in curves.values()]): print( 'New best accuracy! Saving network parameters to disk.' ) # Save network to disk. network.save(os.path.join(params_path, model_name + '.pt')) path = os.path.join( params_path, '_'.join(['auxiliary', model_name]) + '.pt') torch.save((assignments, proportions, rates, ngram_scores), open(path, 'wb')) best_accuracy = max([x[-1] for x in curves.values()]) # Assign labels to excitatory layer neurons. assignments, proportions, rates = assign_labels( spike_record, current_labels, 5, rates) # Compute ngram scores. ngram_scores = update_ngram_scores(spike_record, current_labels, 5, 2, ngram_scores) print() # Get next input sample. image = images[i % len(images)] sample = poisson(datum=image, time=time) inpts = {'X': sample} # Run the network on the input. network.run(inpts=inpts, time=time) retries = 0 while spikes['Y'].get('s').sum() < 5 and retries < 3: retries += 1 image *= 2 sample = poisson(datum=image, time=time) inpts = {'X': sample} network.run(inpts=inpts, time=time) # Add to spikes recording. spike_record[i % update_interval] = spikes['Y'].get('s').t() # Optionally plot various simulation information. if plot: # _input = image.view(28, 28) # reconstruction = inpts['X'].view(time, 784).sum(0).view(28, 28) _spikes = {layer: spikes[layer].get('s') for layer in spikes} input_exc_weights = network.connections[('X', 'Y')].w square_weights = get_square_weights( input_exc_weights.view(32**2, n_neurons), n_sqrt, 32) # square_assignments = get_square_assignments(assignments, n_sqrt) # inpt_axes, inpt_ims = plot_input(_input, reconstruction, label=labels[i], axes=inpt_axes, ims=inpt_ims) spike_ims, spike_axes = plot_spikes(_spikes, ims=spike_ims, axes=spike_axes) weights_im = plot_weights(square_weights, im=weights_im) # assigns_im = plot_assignments(square_assignments, im=assigns_im) # perf_ax = plot_performance(curves, ax=perf_ax) plt.pause(1e-8) network.reset_() # Reset state variables. print(f'Progress: {n_examples} / {n_examples} ({t() - start:.4f} seconds)') i += 1 if i % len(labels) == 0: current_labels = labels[-update_interval:] else: current_labels = labels[i % len(images) - update_interval:i % len(images)] # Update and print accuracy evaluations. curves, preds = update_curves(curves, current_labels, n_classes, spike_record=spike_record, assignments=assignments, proportions=proportions, ngram_scores=ngram_scores, n=2) print_results(curves) for scheme in preds: predictions[scheme] = torch.cat([predictions[scheme], preds[scheme]], -1) if train: if any([x[-1] > best_accuracy for x in curves.values()]): print('New best accuracy! Saving network parameters to disk.') # Save network to disk. if train: network.save(os.path.join(params_path, model_name + '.pt')) path = os.path.join( params_path, '_'.join(['auxiliary', model_name]) + '.pt') torch.save((assignments, proportions, rates, ngram_scores), open(path, 'wb')) if train: print('\nTraining complete.\n') else: print('\nTest complete.\n') print('Average accuracies:\n') for scheme in curves.keys(): print('\t%s: %.2f' % (scheme, float(np.mean(curves[scheme])))) # Save accuracy curves to disk. to_write = ['train'] + params if train else ['test'] + params f = '_'.join([str(x) for x in to_write]) + '.pt' torch.save((curves, update_interval, n_examples), open(os.path.join(curves_path, f), 'wb')) # Save results to disk. results = [ np.mean(curves['all']), np.mean(curves['proportion']), np.mean(curves['ngram']), np.max(curves['all']), np.max(curves['proportion']), np.max(curves['ngram']) ] to_write = params + results if train else test_params + results to_write = [str(x) for x in to_write] name = 'train.csv' if train else 'test.csv' if not os.path.isfile(os.path.join(results_path, name)): with open(os.path.join(results_path, name), 'w') as f: if train: f.write( 'random_seed,n_neurons,n_train,inhib,lr_decay,time,timestep,theta_plus,theta_decay,intensity,' 'progress_interval,update_interval,mean_all_activity,mean_proportion_weighting,' 'mean_ngram,max_all_activity,max_proportion_weighting,max_ngram\n' ) else: f.write( 'random_seed,n_neurons,n_train,n_test,inhib,lr_decay,time,timestep,theta_plus,theta_decay,intensity,' 'progress_interval,update_interval,mean_all_activity,mean_proportion_weighting,' 'mean_ngram,max_all_activity,max_proportion_weighting,max_ngram\n' ) with open(os.path.join(results_path, name), 'a') as f: f.write(','.join(to_write) + '\n') if labels.numel() > n_examples: labels = labels[:n_examples] else: while labels.numel() < n_examples: if 2 * labels.numel() > n_examples: labels = torch.cat( [labels, labels[:n_examples - labels.numel()]]) else: labels = torch.cat([labels, labels]) # Compute confusion matrices and save them to disk. confusions = {} for scheme in predictions: confusions[scheme] = confusion_matrix(labels, predictions[scheme]) to_write = ['train'] + params if train else ['test'] + test_params f = '_'.join([str(x) for x in to_write]) + '.pt' torch.save(confusions, os.path.join(confusion_path, f))
def main(seed=0, n_neurons=100, n_train=60000, n_test=10000, c_low=1, c_high=25, p_low=0.5, time=250, dt=1, theta_plus=0.05, theta_decay=1e-7, intensity=1, progress_interval=10, update_interval=250, plot=False, train=True, gpu=False): assert n_train % update_interval == 0 and n_test % update_interval == 0,\ 'No. examples must be divisible by update_interval' params = [ seed, n_neurons, n_train, c_low, c_high, p_low, time, dt, theta_plus, theta_decay, intensity, progress_interval, update_interval ] model_name = '_'.join([str(x) for x in params]) if not train: test_params = [ seed, n_neurons, n_train, n_test, c_low, c_high, p_low, time, dt, theta_plus, theta_decay, intensity, progress_interval, update_interval ] np.random.seed(seed) if gpu: torch.set_default_tensor_type('torch.cuda.FloatTensor') torch.cuda.manual_seed_all(seed) else: torch.manual_seed(seed) n_examples = n_train if train else n_test n_sqrt = int(np.ceil(np.sqrt(n_neurons))) n_classes = 10 # Build network. if train: network = Network(dt=dt) input_layer = Input(n=784, traces=True) exc_layer = DiehlAndCookNodes(n=n_neurons, traces=True) w = torch.rand(input_layer.n, exc_layer.n) input_exc_conn = Connection(input_layer, exc_layer, w=w, update_rule=PostPre, norm=78.4, nu=(1e-4, 1e-2), wmax=1.0) w = torch.zeros(exc_layer.n, exc_layer.n) for k1 in range(n_neurons): for k2 in range(n_neurons): if k1 != k2: x1, y1 = k1 // np.sqrt(n_neurons), k1 % np.sqrt(n_neurons) x2, y2 = k2 // np.sqrt(n_neurons), k2 % np.sqrt(n_neurons) w[k1, k2] = max( -c_high, -c_low * np.sqrt(euclidean([x1, y1], [x2, y2]))) recurrent_conn = Connection(exc_layer, exc_layer, w=w) network.add_layer(input_layer, name='X') network.add_layer(exc_layer, name='Y') network.add_connection(input_exc_conn, source='X', target='Y') network.add_connection(recurrent_conn, source='Y', target='Y') else: network = load_network(os.path.join(params_path, model_name + '.pt')) network.connections['X', 'Y'].update_rule = NoOp( connection=network.connections['X', 'Y'], nu=network.connections['X', 'Y'].nu) network.layers['Y'].theta_decay = 0 network.layers['Y'].theta_plus = 0 # Load MNIST data. dataset = MNIST(data_path, download=True) if train: images, labels = dataset.get_train() else: images, labels = dataset.get_test() images = images.view(-1, 784) images *= intensity # Record spikes during the simulation. spike_record = torch.zeros(update_interval, int(time / dt), n_neurons) # Neuron assignments and spike proportions. if train: assignments = -torch.ones_like(torch.Tensor(n_neurons)) proportions = torch.zeros_like(torch.Tensor(n_neurons, 10)) rates = torch.zeros_like(torch.Tensor(n_neurons, 10)) ngram_scores = {} else: path = os.path.join(params_path, '_'.join(['auxiliary', model_name]) + '.pt') assignments, proportions, rates, ngram_scores = torch.load( open(path, 'rb')) # Sequence of accuracy estimates. curves = {'all': [], 'proportion': [], 'ngram': []} predictions = {scheme: torch.Tensor().long() for scheme in curves.keys()} if train: best_accuracy = 0 spikes = {} for layer in set(network.layers) - {'X'}: spikes[layer] = Monitor(network.layers[layer], state_vars=['s'], time=int(time / dt)) network.add_monitor(spikes[layer], name='%s_spikes' % layer) # Train the network. if train: print('\nBegin training.\n') else: print('\nBegin test.\n') inpt_axes = None inpt_ims = None spike_ims = None spike_axes = None weights_im = None assigns_im = None perf_ax = None # Calculate linear increase every update interval. if train: n_increase = int(p_low * n_examples) / update_interval increase = (c_high - c_low) / n_increase increases = 0 inhib = c_low start = t() for i in range(n_examples): if train and i % update_interval == 0 and i > 0 and increases < n_increase: inhib = inhib + increase print(f'\nIncreasing inhibition to {inhib}.\n') w = torch.zeros(n_neurons, n_neurons) for k1 in range(n_neurons): for k2 in range(n_neurons): if k1 != k2: x1, y1 = k1 // np.sqrt(n_neurons), k1 % np.sqrt( n_neurons) x2, y2 = k2 // np.sqrt(n_neurons), k2 % np.sqrt( n_neurons) w[k1, k2] = max( -c_high, -inhib * np.sqrt(euclidean([x1, y1], [x2, y2]))) network.connections['Y', 'Y'].w = w increases += 1 if i % progress_interval == 0: print(f'Progress: {i} / {n_examples} ({t() - start:.4f} seconds)') start = t() if i % update_interval == 0 and i > 0: if i % len(labels) == 0: current_labels = labels[-update_interval:] else: current_labels = labels[i % len(images) - update_interval:i % len(images)] # Update and print accuracy evaluations. curves, preds = update_curves(curves, current_labels, n_classes, spike_record=spike_record, assignments=assignments, proportions=proportions, ngram_scores=ngram_scores, n=2) print_results(curves) for scheme in preds: predictions[scheme] = torch.cat( [predictions[scheme], preds[scheme]], -1) # Save accuracy curves to disk. to_write = ['train'] + params if train else ['test'] + params f = '_'.join([str(x) for x in to_write]) + '.pt' torch.save((curves, update_interval, n_examples), open(os.path.join(curves_path, f), 'wb')) if train: if any([x[-1] > best_accuracy for x in curves.values()]): print( 'New best accuracy! Saving network parameters to disk.' ) # Save network to disk. network.save(os.path.join(params_path, model_name + '.pt')) path = os.path.join( params_path, '_'.join(['auxiliary', model_name]) + '.pt') torch.save((assignments, proportions, rates, ngram_scores), open(path, 'wb')) best_accuracy = max([x[-1] for x in curves.values()]) # Assign labels to excitatory layer neurons. assignments, proportions, rates = assign_labels( spike_record, labels[i - update_interval:i], 10, rates) # Compute ngram scores. ngram_scores = update_ngram_scores( spike_record, labels[i - update_interval:i], 10, 2, ngram_scores) print() # Get next input sample. image = images[i] sample = poisson(datum=image, time=int(time / dt)) inpts = {'X': sample} # Run the network on the input. network.run(inpts=inpts, time=time) retries = 0 while spikes['Y'].get('s').sum() < 5 and retries < 3: retries += 1 image *= 2 sample = poisson(datum=image, time=int(time / dt)) inpts = {'X': sample} network.run(inpts=inpts, time=time) # Add to spikes recording. spike_record[i % update_interval] = spikes['Y'].get('s').t() # Optionally plot various simulation information. if plot: inpt = inpts['X'].view(time, 784).sum(0).view(28, 28) _spikes = {layer: spikes[layer].get('s') for layer in spikes} input_exc_weights = network.connections['X', 'Y'].w square_weights = get_square_weights( input_exc_weights.view(784, n_neurons), n_sqrt, 28) square_assignments = get_square_assignments(assignments, n_sqrt) # inpt_axes, inpt_ims = plot_input(images[i].view(28, 28), inpt, label=labels[i], axes=inpt_axes, ims=inpt_ims) spike_ims, spike_axes = plot_spikes(_spikes, ims=spike_ims, axes=spike_axes) weights_im = plot_weights(square_weights, im=weights_im) # assigns_im = plot_assignments(square_assignments, im=assigns_im) # perf_ax = plot_performance(curves, ax=perf_ax) plt.pause(1e-8) network.reset_() # Reset state variables. print(f'Progress: {n_examples} / {n_examples} ({t() - start:.4f} seconds)') i += 1 if i % len(labels) == 0: current_labels = labels[-update_interval:] else: current_labels = labels[i % len(images) - update_interval:i % len(images)] # Update and print accuracy evaluations. curves, preds = update_curves(curves, current_labels, n_classes, spike_record=spike_record, assignments=assignments, proportions=proportions, ngram_scores=ngram_scores, n=2) print_results(curves) for scheme in preds: predictions[scheme] = torch.cat([predictions[scheme], preds[scheme]], -1) if train: if any([x[-1] > best_accuracy for x in curves.values()]): print('New best accuracy! Saving network parameters to disk.') # Save network to disk. network.save(os.path.join(params_path, model_name + '.pt')) path = os.path.join(params_path, '_'.join(['auxiliary', model_name]) + '.pt') torch.save((assignments, proportions, rates, ngram_scores), open(path, 'wb')) if train: print('\nTraining complete.\n') else: print('\nTest complete.\n') print('Average accuracies:\n') for scheme in curves.keys(): print(f'\t%s: %.2f' % (scheme, float(np.mean(curves[scheme])))) # Save accuracy curves to disk. to_write = ['train'] + params if train else ['test'] + params to_write = [str(x) for x in to_write] f = '_'.join(to_write) + '.pt' torch.save((curves, update_interval, n_examples), open(os.path.join(curves_path, f), 'wb')) results = [ np.mean(curves['all']), np.mean(curves['proportion']), np.mean(curves['ngram']), np.max(curves['all']), np.max(curves['proportion']), np.max(curves['ngram']) ] to_write = params + results if train else test_params + results to_write = [str(x) for x in to_write] name = 'train.csv' if train else 'test.csv' if not os.path.isfile(os.path.join(results_path, name)): with open(os.path.join(results_path, name), 'w') as f: if train: f.write( 'random_seed,n_neurons,n_train,excite,c_low,c_high,p_low,time,timestep,theta_plus,theta_decay,' 'intensity,progress_interval,update_interval,mean_all_activity,mean_proportion_weighting,' 'mean_ngram,max_all_activity,max_proportion_weighting,max_ngram\n' ) else: f.write( 'random_seed,n_neurons,n_train,n_test,excite,c_low,c_high,p_low,time,timestep,theta_plus,theta_decay,' 'intensity,progress_interval,update_interval,mean_all_activity,mean_proportion_weighting,' 'mean_ngram,max_all_activity,max_proportion_weighting,max_ngram\n' ) with open(os.path.join(results_path, name), 'a') as f: f.write(','.join(to_write) + '\n') if labels.numel() > n_examples: labels = labels[:n_examples] else: while labels.numel() < n_examples: if 2 * labels.numel() > n_examples: labels = torch.cat( [labels, labels[:n_examples - labels.numel()]]) else: labels = torch.cat([labels, labels]) # Compute confusion matrices and save them to disk. confusions = {} for scheme in predictions: confusions[scheme] = confusion_matrix(labels, predictions[scheme]) to_write = ['train'] + params if train else ['test'] + test_params f = '_'.join([str(x) for x in to_write]) + '.pt' torch.save(confusions, os.path.join(confusion_path, f)) print()
hidden_bias_connection = Connection(source=hidden_bias, target=hidden_layer) hidden_connection = Connection(source=hidden_layer, target=output_layer) output_bias_connection = Connection(source=output_bias, target=output_layer) network.add_connection(input_connection, source='X', target='Y') network.add_connection(hidden_bias_connection, source='Y_b', target='Y') network.add_connection(hidden_connection, source='Y', target='Z') network.add_connection(output_bias_connection, source='Z_b', target='Z') # State variable monitoring. for l in network.layers: m = Monitor(network.layers[l], state_vars=['s'], time=time) network.add_monitor(m, name=l) else: network = load_network(os.path.join(params_path, model_name + '.pt')) # Load MNIST data. dataset = MNIST(path=data_path, download=True, shuffle=True) if train: images, labels = dataset.get_train() else: images, labels = dataset.get_test() images, labels = images[:n_examples], labels[:n_examples] images, labels = iter(images.view(-1, 784) / 255), iter(labels) grads = {} accuracies = [] predictions = []
def main(seed=0, n_train=60000, n_test=10000, kernel_size=(16, ), stride=(4, ), n_filters=25, padding=0, inhib=100, time=25, lr=1e-3, lr_decay=0.99, dt=1, intensity=1, progress_interval=10, update_interval=250, plot=False, train=True, gpu=False): assert n_train % update_interval == 0 and n_test % update_interval == 0, \ 'No. examples must be divisible by update_interval' params = [ seed, n_train, kernel_size, stride, n_filters, padding, inhib, time, lr, lr_decay, dt, intensity, update_interval ] model_name = '_'.join([str(x) for x in params]) if not train: test_params = [ seed, n_train, n_test, kernel_size, stride, n_filters, padding, inhib, time, lr, lr_decay, dt, intensity, update_interval ] np.random.seed(seed) if gpu: torch.set_default_tensor_type('torch.cuda.FloatTensor') torch.cuda.manual_seed_all(seed) else: torch.manual_seed(seed) n_examples = n_train if train else n_test input_shape = [20, 20] if kernel_size == input_shape: conv_size = [1, 1] else: conv_size = (int((input_shape[0] - kernel_size[0]) / stride[0]) + 1, int((input_shape[1] - kernel_size[1]) / stride[1]) + 1) n_classes = 10 n_neurons = n_filters * np.prod(conv_size) total_kernel_size = int(np.prod(kernel_size)) total_conv_size = int(np.prod(conv_size)) # Build network. if train: network = Network() input_layer = Input(n=400, shape=(1, 1, 20, 20), traces=True) conv_layer = DiehlAndCookNodes(n=n_filters * total_conv_size, shape=(1, n_filters, *conv_size), thresh=-64.0, traces=True, theta_plus=0.05 * (kernel_size[0] / 20), refrac=0) conv_layer2 = LIFNodes(n=n_filters * total_conv_size, shape=(1, n_filters, *conv_size), refrac=0) conv_conn = Conv2dConnection(input_layer, conv_layer, kernel_size=kernel_size, stride=stride, update_rule=WeightDependentPostPre, norm=0.05 * total_kernel_size, nu=[0, lr], wmin=0, wmax=0.25) conv_conn2 = Conv2dConnection(input_layer, conv_layer2, w=conv_conn.w, kernel_size=kernel_size, stride=stride, update_rule=None, wmax=0.25) w = -inhib * torch.ones(n_filters, conv_size[0], conv_size[1], n_filters, conv_size[0], conv_size[1]) for f in range(n_filters): for f2 in range(n_filters): if f != f2: w[f, :, :f2, :, :] = 0 w = w.view(n_filters * conv_size[0] * conv_size[1], n_filters * conv_size[0] * conv_size[1]) recurrent_conn = Connection(conv_layer, conv_layer, w=w) network.add_layer(input_layer, name='X') network.add_layer(conv_layer, name='Y') network.add_layer(conv_layer2, name='Y_') network.add_connection(conv_conn, source='X', target='Y') network.add_connection(conv_conn2, source='X', target='Y_') network.add_connection(recurrent_conn, source='Y', target='Y') # Voltage recording for excitatory and inhibitory layers. voltage_monitor = Monitor(network.layers['Y'], ['v'], time=time) network.add_monitor(voltage_monitor, name='output_voltage') else: network = load_network(os.path.join(params_path, model_name + '.pt')) network.connections['X', 'Y'].update_rule = NoOp( connection=network.connections['X', 'Y'], nu=network.connections['X', 'Y'].nu) network.layers['Y'].theta_decay = 0 network.layers['Y'].theta_plus = 0 # Load MNIST data. dataset = MNIST(data_path, download=True) if train: images, labels = dataset.get_train() else: images, labels = dataset.get_test() images *= intensity images = images[:, 4:-4, 4:-4].contiguous() # Record spikes during the simulation. spike_record = torch.zeros(update_interval, time, n_neurons) full_spike_record = torch.zeros(n_examples, n_neurons) # Neuron assignments and spike proportions. if train: logreg_model = LogisticRegression(warm_start=True, n_jobs=-1, solver='lbfgs', max_iter=1000, multi_class='multinomial') logreg_model.coef_ = np.zeros([n_classes, n_neurons]) logreg_model.intercept_ = np.zeros(n_classes) logreg_model.classes_ = np.arange(n_classes) else: path = os.path.join(params_path, '_'.join(['auxiliary', model_name]) + '.pt') logreg_coef, logreg_intercept = torch.load(open(path, 'rb')) logreg_model = LogisticRegression(warm_start=True, n_jobs=-1, solver='lbfgs', max_iter=1000, multi_class='multinomial') logreg_model.coef_ = logreg_coef logreg_model.intercept_ = logreg_intercept logreg_model.classes_ = np.arange(n_classes) # Sequence of accuracy estimates. curves = {'logreg': []} predictions = {scheme: torch.Tensor().long() for scheme in curves.keys()} if train: best_accuracy = 0 spikes = {} for layer in set(network.layers): spikes[layer] = Monitor(network.layers[layer], state_vars=['s'], time=time) network.add_monitor(spikes[layer], name='%s_spikes' % layer) # Train the network. if train: print('\nBegin training.\n') else: print('\nBegin test.\n') inpt_ims = None inpt_axes = None spike_ims = None spike_axes = None weights_im = None plot_update_interval = 100 start = t() for i in range(n_examples): if i % progress_interval == 0: print('Progress: %d / %d (%.4f seconds)' % (i, n_examples, t() - start)) start = t() if i % update_interval == 0 and i > 0: if train: network.connections['X', 'Y'].update_rule.nu[1] *= lr_decay if i % len(labels) == 0: current_labels = labels[-update_interval:] current_record = full_spike_record[-update_interval:] else: current_labels = labels[i % len(labels) - update_interval:i % len(labels)] current_record = full_spike_record[i % len(labels) - update_interval:i % len(labels)] # Update and print accuracy evaluations. curves, preds = update_curves(curves, current_labels, n_classes, full_spike_record=current_record, logreg=logreg_model) print_results(curves) for scheme in preds: predictions[scheme] = torch.cat( [predictions[scheme], preds[scheme]], -1) # Save accuracy curves to disk. to_write = ['train'] + params if train else ['test'] + params f = '_'.join([str(x) for x in to_write]) + '.pt' torch.save((curves, update_interval, n_examples), open(os.path.join(curves_path, f), 'wb')) if train: if any([x[-1] > best_accuracy for x in curves.values()]): print( 'New best accuracy! Saving network parameters to disk.' ) # Save network to disk. network.save(os.path.join(params_path, model_name + '.pt')) path = os.path.join( params_path, '_'.join(['auxiliary', model_name]) + '.pt') torch.save((logreg_model.coef_, logreg_model.intercept_), open(path, 'wb')) best_accuracy = max([x[-1] for x in curves.values()]) # Refit logistic regression model. logreg_model = logreg_fit(full_spike_record[:i], labels[:i], logreg_model) print() # Get next input sample. image = images[i % len(images)] sample = bernoulli(datum=image, time=time, dt=dt, max_prob=1).unsqueeze(1).unsqueeze(1) inpts = {'X': sample} # Run the network on the input. network.run(inpts=inpts, time=time) network.connections['X', 'Y_'].w = network.connections['X', 'Y'].w # Add to spikes recording. spike_record[i % update_interval] = spikes['Y_'].get('s').view( time, -1) full_spike_record[i] = spikes['Y_'].get('s').view(time, -1).sum(0) # Optionally plot various simulation information. if plot and i % plot_update_interval == 0: _input = inpts['X'].view(time, 400).sum(0).view(20, 20) w = network.connections['X', 'Y'].w _spikes = { 'X': spikes['X'].get('s').view(400, time), 'Y': spikes['Y'].get('s').view(n_filters * total_conv_size, time), 'Y_': spikes['Y_'].get('s').view(n_filters * total_conv_size, time) } inpt_axes, inpt_ims = plot_input(image.view(20, 20), _input, label=labels[i % len(labels)], ims=inpt_ims, axes=inpt_axes) spike_ims, spike_axes = plot_spikes(spikes=_spikes, ims=spike_ims, axes=spike_axes) weights_im = plot_conv2d_weights( w, im=weights_im, wmax=network.connections['X', 'Y'].wmax) plt.pause(1e-2) network.reset_() # Reset state variables. print(f'Progress: {n_examples} / {n_examples} ({t() - start:.4f} seconds)') i += 1 if i % len(labels) == 0: current_labels = labels[-update_interval:] current_record = full_spike_record[-update_interval:] else: current_labels = labels[i % len(labels) - update_interval:i % len(labels)] current_record = full_spike_record[i % len(labels) - update_interval:i % len(labels)] # Update and print accuracy evaluations. curves, preds = update_curves(curves, current_labels, n_classes, full_spike_record=current_record, logreg=logreg_model) print_results(curves) for scheme in preds: predictions[scheme] = torch.cat([predictions[scheme], preds[scheme]], -1) if train: if any([x[-1] > best_accuracy for x in curves.values()]): print('New best accuracy! Saving network parameters to disk.') # Save network to disk. network.save(os.path.join(params_path, model_name + '.pt')) path = os.path.join(params_path, '_'.join(['auxiliary', model_name]) + '.pt') torch.save((logreg_model.coef_, logreg_model.intercept_), open(path, 'wb')) if train: print('\nTraining complete.\n') else: print('\nTest complete.\n') print('Average accuracies:\n') for scheme in curves.keys(): print('\t%s: %.2f' % (scheme, float(np.mean(curves[scheme])))) # Save accuracy curves to disk. to_write = ['train'] + params if train else ['test'] + params to_write = [str(x) for x in to_write] f = '_'.join(to_write) + '.pt' torch.save((curves, update_interval, n_examples), open(os.path.join(curves_path, f), 'wb')) # Save results to disk. results = [np.mean(curves['logreg']), np.std(curves['logreg'])] to_write = params + results if train else test_params + results to_write = [str(x) for x in to_write] name = 'train.csv' if train else 'test.csv' if not os.path.isfile(os.path.join(results_path, name)): with open(os.path.join(results_path, name), 'w') as f: if train: columns = [ 'seed', 'n_train', 'kernel_size', 'stride', 'n_filters', 'padding', 'inhib', 'time', 'lr', 'lr_decay', 'dt', 'intensity', 'update_interval', 'mean_logreg', 'std_logreg' ] header = ','.join(columns) + '\n' f.write(header) else: columns = [ 'seed', 'n_train', 'n_test', 'kernel_size', 'stride', 'n_filters', 'padding', 'inhib', 'time', 'lr', 'lr_decay', 'dt', 'intensity', 'update_interval', 'mean_logreg', 'std_logreg' ] header = ','.join(columns) + '\n' f.write(header) with open(os.path.join(results_path, name), 'a') as f: f.write(','.join(to_write) + '\n') if labels.numel() > n_examples: labels = labels[:n_examples] else: while labels.numel() < n_examples: if 2 * labels.numel() > n_examples: labels = torch.cat( [labels, labels[:n_examples - labels.numel()]]) else: labels = torch.cat([labels, labels]) # Compute confusion matrices and save them to disk. confusions = {} for scheme in predictions: confusions[scheme] = confusion_matrix(labels, predictions[scheme]) to_write = ['train'] + params if train else ['test'] + test_params f = '_'.join([str(x) for x in to_write]) + '.pt' torch.save(confusions, os.path.join(confusion_path, f))
def main(seed=0, n_neurons=100, n_train=60000, n_test=10000, inhib=100, lr=0.01, lr_decay=1, time=350, dt=1, theta_plus=0.05, theta_decay=1e-7, progress_interval=10, update_interval=250, plot=False, train=True, gpu=False): assert n_train % update_interval == 0 and n_test % update_interval == 0, \ 'No. examples must be divisible by update_interval' params = [ seed, n_neurons, n_train, inhib, lr_decay, time, dt, theta_plus, theta_decay, progress_interval, update_interval ] test_params = [ seed, n_neurons, n_train, n_test, inhib, lr_decay, time, dt, theta_plus, theta_decay, progress_interval, update_interval ] model_name = '_'.join([str(x) for x in params]) np.random.seed(seed) if gpu: torch.set_default_tensor_type('torch.cuda.FloatTensor') torch.cuda.manual_seed_all(seed) else: torch.manual_seed(seed) n_examples = n_train if train else n_test n_sqrt = int(np.ceil(np.sqrt(n_neurons))) n_classes = 10 # Build network. if train: network = Network(dt=dt) input_layer = RealInput(n=784, traces=True, trace_tc=5e-2) network.add_layer(input_layer, name='X') output_layer = DiehlAndCookNodes( n=n_neurons, traces=True, rest=0, reset=1, thresh=1, refrac=0, decay=1e-2, trace_tc=5e-2, theta_plus=theta_plus, theta_decay=theta_decay ) network.add_layer(output_layer, name='Y') readout = IFNodes(n=n_classes, reset=0, thresh=1) network.add_layer(readout, name='Z') w = torch.rand(784, n_neurons) input_connection = Connection( source=input_layer, target=output_layer, w=w, update_rule=MSTDP, nu=lr, wmin=0, wmax=1, norm=78.4 ) network.add_connection(input_connection, source='X', target='Y') w = -inhib * (torch.ones(n_neurons, n_neurons) - torch.diag(torch.ones(n_neurons))) recurrent_connection = Connection( source=output_layer, target=output_layer, w=w, wmin=-inhib, wmax=0 ) network.add_connection(recurrent_connection, source='Y', target='Y') readout_connection = Connection( source=network.layers['Y'], target=readout, w=torch.rand(n_neurons, n_classes), norm=10 ) network.add_connection(readout_connection, source='Y', target='Z') else: network = load_network(os.path.join(params_path, model_name + '.pt')) network.connections['X', 'Y'].update_rule = NoOp( connection=network.connections['X', 'Y'], nu=network.connections['X', 'Y'].nu ) network.layers['Y'].theta_decay = 0 network.layers['Y'].theta_plus = 0 # Load MNIST data. dataset = MNIST(path=data_path, download=True) if train: images, labels = dataset.get_train() else: images, labels = dataset.get_test() images = images.view(-1, 784) labels = labels.long() spikes = {} for layer in set(network.layers) - {'X'}: spikes[layer] = Monitor(network.layers[layer], state_vars=['s'], time=time) network.add_monitor(spikes[layer], name='%s_spikes' % layer) # Train the network. if train: print('\nBegin training.\n') else: print('\nBegin test.\n') inpt_axes = None inpt_ims = None spike_ims = None spike_axes = None weights_im = None weights2_im = None assigns_im = None perf_ax = None predictions = torch.zeros(update_interval).long() start = t() for i in range(n_examples): if i % progress_interval == 0: print(f'Progress: {i} / {n_examples} ({t() - start:.4f} seconds)') start = t() if i > 0 and train: network.connections['X', 'Y'].update_rule.nu[1] *= lr_decay # Get next input sample. image = images[i % len(images)] # Run the network on the input. for j in range(time): readout = network.layers['Z'].s if readout[labels[i % len(labels)]]: network.run(inpts={'X': image.unsqueeze(0)}, time=1, reward=1, a_minus=0, a_plus=1) else: network.run(inpts={'X': image.unsqueeze(0)}, time=1, reward=0) label = spikes['Z'].get('s').sum(1).argmax() predictions[i % update_interval] = label.long() if i > 0 and i % update_interval == 0: if i % len(labels) == 0: current_labels = labels[-update_interval:] else: current_labels = labels[i % len(images) - update_interval:i % len(images)] accuracy = 100 * (predictions == current_labels).float().mean().item() print(f'Accuracy over last {update_interval} examples: {accuracy}') # Optionally plot various simulation information. if plot: _spikes = {layer: spikes[layer].get('s') for layer in spikes} input_exc_weights = network.connections['X', 'Y'].w square_weights = get_square_weights(input_exc_weights.view(784, n_neurons), n_sqrt, 28) exc_readout_weights = network.connections['Y', 'Z'].w # _input = image.view(28, 28) # reconstruction = inpts['X'].view(time, 784).sum(0).view(28, 28) # square_assignments = get_square_assignments(assignments, n_sqrt) spike_ims, spike_axes = plot_spikes(_spikes, ims=spike_ims, axes=spike_axes) weights_im = plot_weights(square_weights, im=weights_im) weights2_im = plot_weights(exc_readout_weights, im=weights2_im) # inpt_axes, inpt_ims = plot_input(_input, reconstruction, label=labels[i], axes=inpt_axes, ims=inpt_ims) # assigns_im = plot_assignments(square_assignments, im=assigns_im) # perf_ax = plot_performance(curves, ax=perf_ax) plt.pause(1e-8) network.reset_() # Reset state variables. print(f'Progress: {n_examples} / {n_examples} ({t() - start:.4f} seconds)') if train: print('\nTraining complete.\n') else: print('\nTest complete.\n')