def __init__(self, n_inpt, n_neurons=100, inh=17.5, dt=1.0, nu=(1e-4, 1e-2), wmin=0.0, wmax=1.0, norm=78.4, theta_plus=0.05, theta_decay=1e-7): self.n_neurons = n_neurons self.n_output = n_neurons self.network = DiehlAndCook2015v2(n_inpt=n_inpt, n_neurons=n_neurons, inh=inh, dt=dt, nu=nu, wmin=wmin, wmax=wmax, norm=norm, theta_plus=theta_plus, theta_decay=theta_decay)
def main(args): if args.update_steps is None: args.update_steps = max( 250 // args.batch_size, 1 ) #Its value is 16 # why is it always multiplied with step? #update_steps is how many batch to classify before updating the graphs update_interval = args.update_steps * args.batch_size # Value is 240 #update_interval is how many pictures to classify before updating the graphs # Sets up GPU use torch.backends.cudnn.benchmark = False if args.gpu and torch.cuda.is_available(): torch.cuda.manual_seed_all( args.seed ) #to enable reproducability of the code to get the same result else: torch.manual_seed(args.seed) # Determines number of workers to use if args.n_workers == -1: args.n_workers = args.gpu * 4 * torch.cuda.device_count() n_sqrt = int(np.ceil(np.sqrt(args.n_neurons))) if args.reduction == "sum": #could have used switch to improve performance reduction = torch.sum #weight updates for the batch elif args.reduction == "mean": reduction = torch.mean elif args.reduction == "max": reduction = max_without_indices else: raise NotImplementedError # Build network. network = DiehlAndCook2015v2( #Changed here n_inpt=784, # input dimensions are 28x28=784 n_neurons=args.n_neurons, inh=args.inh, dt=args.dt, norm=78.4, nu=(1e-4, 1e-2), reduction=reduction, theta_plus=args.theta_plus, inpt_shape=(1, 28, 28), ) # Directs network to GPU if args.gpu: network.to("cuda") # Load MNIST data. dataset = MNIST( PoissonEncoder(time=args.time, dt=args.dt), None, root=os.path.join(ROOT_DIR, "data", "MNIST"), download=True, train=True, transform=transforms.Compose( #Composes several transforms together [ transforms.ToTensor(), transforms.Lambda(lambda x: x * args.intensity) ]), ) test_dataset = MNIST( PoissonEncoder(time=args.time, dt=args.dt), None, root=os.path.join(ROOT_DIR, "data", "MNIST"), download=True, train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Lambda(lambda x: x * args.intensity) ]), ) # Neuron assignments and spike proportions. n_classes = 10 #changed assignments = -torch.ones(args.n_neurons) #assignments is set to -1 proportions = torch.zeros(args.n_neurons, n_classes) #matrix of 100x10 filled with zeros rates = torch.zeros(args.n_neurons, n_classes) #matrix of 100x10 filled with zeros # Set up monitors for spikes and voltages spikes = {} for layer in set(network.layers): spikes[layer] = Monitor( network.layers[layer], state_vars=["s"], time=args.time ) # Monitors: Records state variables of interest. obj:An object to record state variables from during network simulation. network.add_monitor( spikes[layer], name="%s_spikes" % layer ) #state_vars: Iterable of strings indicating names of state variables to record. #param time: If not ``None``, pre-allocate memory for state variable recording. weights_im = None spike_ims, spike_axes = None, None # Record spikes for length of update interval. spike_record = torch.zeros(update_interval, args.time, args.n_neurons) if os.path.isdir( args.log_dir): #checks if the path is a existing directory shutil.rmtree( args.log_dir) # is used to delete an entire directory tree # Summary writer. writer = SummaryWriter( log_dir=args.log_dir, flush_secs=60 ) #SummaryWriter: these utilities let you log PyTorch models and metrics into a directory for visualization #flush_secs: in seconds, to flush the pending events and summaries to disk. for epoch in range(args.n_epochs): #default is 1 print("\nEpoch: {epoch}\n") labels = [] # Create a dataloader to iterate and batch data dataloader = DataLoader( #It represents a Python iterable over a dataset dataset, batch_size=args.batch_size, #how many samples per batch to load shuffle= True, #set to True to have the data reshuffled at every epoch num_workers=args.n_workers, pin_memory=args. gpu, #If True, the data loader will copy Tensors into CUDA pinned memory before returning them. ) for step, batch in enumerate( dataloader ): #Enumerate() method adds a counter to an iterable and returns it in a form of enumerate object print("Step:", step) global_step = 60000 * epoch + args.batch_size * step if step % args.update_steps == 0 and step > 0: # Convert the array of labels into a tensor label_tensor = torch.tensor(labels) # Get network predictions. all_activity_pred = all_activity(spikes=spike_record, assignments=assignments, n_labels=n_classes) proportion_pred = proportion_weighting( spikes=spike_record, assignments=assignments, proportions=proportions, n_labels=n_classes, ) writer.add_scalar( tag="accuracy/all vote", scalar_value=torch.mean( (label_tensor.long() == all_activity_pred).float()), global_step=global_step, ) #Vennila: Records the accuracies in each step value = torch.mean( (label_tensor.long() == all_activity_pred).float()) value = value.item() accuracy.append(value) print("ACCURACY:", value) writer.add_scalar( tag="accuracy/proportion weighting", scalar_value=torch.mean( (label_tensor.long() == proportion_pred).float()), global_step=global_step, ) writer.add_scalar( tag="spikes/mean", scalar_value=torch.mean(torch.sum(spike_record, dim=1)), global_step=global_step, ) square_weights = get_square_weights( network.connections["X", "Y"].w.view(784, args.n_neurons), n_sqrt, 28, ) img_tensor = colorize(square_weights, cmap="hot_r") writer.add_image( tag="weights", img_tensor=img_tensor, global_step=global_step, dataformats="HWC", ) # Assign labels to excitatory layer neurons. assignments, proportions, rates = assign_labels( spikes=spike_record, labels=label_tensor, n_labels=n_classes, rates=rates, ) labels = [] labels.extend( batch["label"].tolist() ) #for each batch or 16 pictures the labels of it is added to this list # Prep next input batch. inpts = {"X": batch["encoded_image"]} if args.gpu: inpts = { k: v.cuda() for k, v in inpts.items() } #.cuda() is used to set up and run CUDA operations in the selected GPU # Run the network on the input. t0 = time() network.run(inputs=inpts, time=args.time, one_step=args.one_step ) # Simulate network for given inputs and time. t1 = time() - t0 # Add to spikes recording. s = spikes["Y"].get("s").permute((1, 0, 2)) spike_record[(step * args.batch_size) % update_interval:(step * args.batch_size % update_interval) + s.size(0)] = s writer.add_scalar(tag="time/simulation", scalar_value=t1, global_step=global_step) # if(step==1): # input_exc_weights = network.connections["X", "Y"].w # an_array = input_exc_weights.detach().cpu().clone().numpy() # #print(np.shape(an_array)) # data = asarray(an_array) # savetxt('data.csv',data) # print("Beginning weights saved") # if(step==3749): # input_exc_weights = network.connections["X", "Y"].w # an_array = input_exc_weights.detach().cpu().clone().numpy() # #print(np.shape(an_array)) # data2 = asarray(an_array) # savetxt('data2.csv',data2) # print("Ending weights saved") # Plot simulation data. if args.plot: input_exc_weights = network.connections["X", "Y"].w # print("Weights:",input_exc_weights) square_weights = get_square_weights( input_exc_weights.view(784, args.n_neurons), n_sqrt, 28) spikes_ = { layer: spikes[layer].get("s")[:, 0] for layer in spikes } spike_ims, spike_axes = plot_spikes(spikes_, ims=spike_ims, axes=spike_axes) weights_im = plot_weights(square_weights, im=weights_im) plt.pause(1e-8) # Reset state variables. network.reset_state_variables() print(end_accuracy()) #Vennila
def main(seed=0, n_neurons=100, n_train=60000, n_test=10000, inhib=100, lr=1e-2, lr_decay=1, time=350, dt=1, theta_plus=0.05, theta_decay=1e7, intensity=1, progress_interval=10, update_interval=250, plot=False, train=True, gpu=False): assert n_train % update_interval == 0 and n_test % update_interval == 0, \ 'No. examples must be divisible by update_interval' params = [ seed, n_neurons, n_train, inhib, lr, lr_decay, time, dt, theta_plus, theta_decay, intensity, progress_interval, update_interval ] test_params = [ seed, n_neurons, n_train, n_test, inhib, lr, lr_decay, time, dt, theta_plus, theta_decay, intensity, progress_interval, update_interval ] model_name = '_'.join([str(x) for x in params]) np.random.seed(seed) if gpu: torch.set_default_tensor_type('torch.cuda.FloatTensor') torch.cuda.manual_seed_all(seed) else: torch.manual_seed(seed) n_examples = n_train if train else n_test n_sqrt = int(np.ceil(np.sqrt(n_neurons))) n_classes = 10 # Build network. if train: network = DiehlAndCook2015v2(n_inpt=784, n_neurons=n_neurons, inh=inhib, dt=dt, norm=78.4, theta_plus=theta_plus, theta_decay=theta_decay, nu=[0, lr]) else: network = load(os.path.join(params_path, model_name + '.pt')) network.connections['X', 'Y'].update_rule = NoOp( connection=network.connections['X', 'Y'], nu=network.connections['X', 'Y'].nu) network.layers['Y'].theta_decay = 0 network.layers['Y'].theta_plus = 0 # Load MNIST data. dataset = MNIST(path=data_path, download=True) if train: images, labels = dataset.get_train() else: images, labels = dataset.get_test() images = images.view(-1, 784) images *= intensity # Record spikes during the simulation. spike_record = torch.zeros(update_interval, time, n_neurons) full_spike_record = torch.zeros(n_examples, n_neurons).long() # Neuron assignments and spike proportions. if train: assignments = -torch.ones_like(torch.Tensor(n_neurons)) proportions = torch.zeros_like(torch.Tensor(n_neurons, n_classes)) rates = torch.zeros_like(torch.Tensor(n_neurons, n_classes)) ngram_scores = {} else: path = os.path.join(params_path, '_'.join(['auxiliary', model_name]) + '.pt') assignments, proportions, rates, ngram_scores = torch.load( open(path, 'rb')) # Sequence of accuracy estimates. curves = {'all': [], 'proportion': [], 'ngram': []} predictions = {scheme: torch.Tensor().long() for scheme in curves.keys()} if train: best_accuracy = 0 spikes = {} for layer in set(network.layers): spikes[layer] = Monitor(network.layers[layer], state_vars=['s'], time=time) network.add_monitor(spikes[layer], name='%s_spikes' % layer) # Train the network. if train: print('\nBegin training.\n') else: print('\nBegin test.\n') inpt_axes = None inpt_ims = None spike_ims = None spike_axes = None weights_im = None assigns_im = None perf_ax = None start = t() for i in range(n_examples): if i % progress_interval == 0: print(f'Progress: {i} / {n_examples} ({t() - start:.4f} seconds)') start = t() if i % update_interval == 0 and i > 0: if train: network.connections['X', 'Y'].update_rule.nu[1] *= lr_decay if i % len(labels) == 0: current_labels = labels[-update_interval:] else: current_labels = labels[i % len(images) - update_interval:i % len(images)] # Update and print accuracy evaluations. curves, preds = update_curves(curves, current_labels, n_classes, spike_record=spike_record, assignments=assignments, proportions=proportions, ngram_scores=ngram_scores, n=2) print_results(curves) for scheme in preds: predictions[scheme] = torch.cat( [predictions[scheme], preds[scheme]], -1) # Save accuracy curves to disk. to_write = ['train'] + params if train else ['test'] + params f = '_'.join([str(x) for x in to_write]) + '.pt' torch.save((curves, update_interval, n_examples), open(os.path.join(curves_path, f), 'wb')) if train: if any([x[-1] > best_accuracy for x in curves.values()]): print( 'New best accuracy! Saving network parameters to disk.' ) # Save network to disk. network.save(os.path.join(params_path, model_name + '.pt')) path = os.path.join( params_path, '_'.join(['auxiliary', model_name]) + '.pt') torch.save((assignments, proportions, rates, ngram_scores), open(path, 'wb')) best_accuracy = max([x[-1] for x in curves.values()]) # Assign labels to excitatory layer neurons. assignments, proportions, rates = assign_labels( spike_record, current_labels, n_classes, rates) # Compute ngram scores. ngram_scores = update_ngram_scores(spike_record, current_labels, n_classes, 2, ngram_scores) print() # Get next input sample. image = images[i % len(images)] sample = poisson(datum=image, time=time, dt=dt) inpts = {'X': sample} # Run the network on the input. network.run(inpts=inpts, time=time) retries = 0 while spikes['Y'].get('s').sum() < 1 and retries < 3: retries += 1 image *= 2 sample = poisson(datum=image, time=time, dt=dt) inpts = {'X': sample} network.run(inpts=inpts, time=time) # Add to spikes recording. spike_record[i % update_interval] = spikes['Y'].get('s').t() full_spike_record[i] = spikes['Y'].get('s').t().sum(0).long() # Optionally plot various simulation information. if plot: # _input = image.view(28, 28) # reconstruction = inpts['X'].view(time, 784).sum(0).view(28, 28) _spikes = {layer: spikes[layer].get('s') for layer in spikes} input_exc_weights = network.connections[('X', 'Y')].w square_weights = get_square_weights( input_exc_weights.view(784, n_neurons), n_sqrt, 28) # square_assignments = get_square_assignments(assignments, n_sqrt) # inpt_axes, inpt_ims = plot_input(_input, reconstruction, label=labels[i], axes=inpt_axes, ims=inpt_ims) spike_ims, spike_axes = plot_spikes(_spikes, ims=spike_ims, axes=spike_axes) weights_im = plot_weights(square_weights, im=weights_im) # assigns_im = plot_assignments(square_assignments, im=assigns_im) # perf_ax = plot_performance(curves, ax=perf_ax) plt.pause(1e-8) network.reset_() # Reset state variables. print(f'Progress: {n_examples} / {n_examples} ({t() - start:.4f} seconds)') i += 1 if i % len(labels) == 0: current_labels = labels[-update_interval:] else: current_labels = labels[i % len(images) - update_interval:i % len(images)] # Update and print accuracy evaluations. curves, preds = update_curves(curves, current_labels, n_classes, spike_record=spike_record, assignments=assignments, proportions=proportions, ngram_scores=ngram_scores, n=2) print_results(curves) for scheme in preds: predictions[scheme] = torch.cat([predictions[scheme], preds[scheme]], -1) if train: if any([x[-1] > best_accuracy for x in curves.values()]): print('New best accuracy! Saving network parameters to disk.') # Save network to disk. if train: network.save(os.path.join(params_path, model_name + '.pt')) path = os.path.join( params_path, '_'.join(['auxiliary', model_name]) + '.pt') torch.save((assignments, proportions, rates, ngram_scores), open(path, 'wb')) if train: print('\nTraining complete.\n') else: print('\nTest complete.\n') print('Average accuracies:\n') for scheme in curves.keys(): print('\t%s: %.2f' % (scheme, float(np.mean(curves[scheme])))) # Save accuracy curves to disk. to_write = ['train'] + params if train else ['test'] + params f = '_'.join([str(x) for x in to_write]) + '.pt' torch.save((curves, update_interval, n_examples), open(os.path.join(curves_path, f), 'wb')) # Save results to disk. results = [ np.mean(curves['all']), np.mean(curves['proportion']), np.mean(curves['ngram']), np.max(curves['all']), np.max(curves['proportion']), np.max(curves['ngram']) ] to_write = params + results if train else test_params + results to_write = [str(x) for x in to_write] name = 'train.csv' if train else 'test.csv' if not os.path.isfile(os.path.join(results_path, name)): with open(os.path.join(results_path, name), 'w') as f: if train: f.write( 'random_seed,n_neurons,n_train,inhib,lr,lr_decay,time,timestep,theta_plus,theta_decay,intensity,' 'progress_interval,update_interval,mean_all_activity,mean_proportion_weighting,' 'mean_ngram,max_all_activity,max_proportion_weighting,max_ngram\n' ) else: f.write( 'random_seed,n_neurons,n_train,n_test,inhib,lr,lr_decay,time,timestep,theta_plus,theta_decay,' 'intensity,progress_interval,update_interval,mean_all_activity,mean_proportion_weighting,' 'mean_ngram,max_all_activity,max_proportion_weighting,max_ngram\n' ) with open(os.path.join(results_path, name), 'a') as f: f.write(','.join(to_write) + '\n') if labels.numel() > n_examples: labels = labels[:n_examples] else: while labels.numel() < n_examples: if 2 * labels.numel() > n_examples: labels = torch.cat( [labels, labels[:n_examples - labels.numel()]]) else: labels = torch.cat([labels, labels]) # Compute confusion matrices and save them to disk. confusions = {} for scheme in predictions: confusions[scheme] = confusion_matrix(labels, predictions[scheme]) to_write = ['train'] + params if train else ['test'] + test_params f = '_'.join([str(x) for x in to_write]) + '.pt' torch.save(confusions, os.path.join(confusion_path, f)) # Save full spike record to disk. torch.save(full_spike_record, os.path.join(spikes_path, f))
def main(args): update_interval = args.update_steps * args.batch_size # Sets up GPU use torch.backends.cudnn.benchmark = False if args.gpu and torch.cuda.is_available(): torch.cuda.manual_seed_all(args.seed) else: torch.manual_seed(args.seed) # Determines number of workers to use if args.n_workers == -1: args.n_workers = args.gpu * 4 * torch.cuda.device_count() n_sqrt = int(np.ceil(np.sqrt(args.n_neurons))) if args.reduction == "sum": reduction = torch.sum elif args.reduction == "mean": reduction = torch.mean elif args.reduction == "max": reduction = max_without_indices else: raise NotImplementedError # Build network. network = DiehlAndCook2015v2( n_inpt=784, n_neurons=args.n_neurons, inh=args.inh, dt=args.dt, norm=78.4, nu=(0.0, 1e-2), reduction=reduction, theta_plus=args.theta_plus, inpt_shape=(1, 28, 28), ) # Directs network to GPU. if args.gpu: network.to("cuda") # Load MNIST data. dataset = MNIST( PoissonEncoder(time=args.time, dt=args.dt), None, root=os.path.join(ROOT_DIR, "data", "MNIST"), download=True, train=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Lambda(lambda x: x * args.intensity) ]), ) dataset, valid_dataset = torch.utils.data.random_split( dataset, [59000, 1000]) test_dataset = MNIST( PoissonEncoder(time=args.time, dt=args.dt), None, root=os.path.join(ROOT_DIR, "data", "MNIST"), download=True, train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Lambda(lambda x: x * args.intensity) ]), ) # Neuron assignments and spike proportions. n_classes = 10 assignments = -torch.ones(args.n_neurons) proportions = torch.zeros(args.n_neurons, n_classes) rates = torch.zeros(args.n_neurons, n_classes) # Set up monitors for spikes and voltages spikes = {} for layer in set(network.layers): spikes[layer] = Monitor(network.layers[layer], state_vars=["s"], time=args.time) network.add_monitor(spikes[layer], name="%s_spikes" % layer) weights_im = None spike_ims, spike_axes = None, None # Record spikes for length of update interval. spike_record = torch.zeros(update_interval, args.time, args.n_neurons) if os.path.isdir(args.log_dir): shutil.rmtree(args.log_dir) # Summary writer. writer = SummaryWriter(log_dir=args.log_dir, flush_secs=60) for epoch in range(args.n_epochs): print(f"\nEpoch: {epoch}\n") labels = [] # Get training data loader. dataloader = DataLoader( dataset=dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.n_workers, pin_memory=args.gpu, ) for step, batch in enumerate(dataloader): print(f"Step: {step} / {len(dataloader)}") global_step = 60000 * epoch + args.batch_size * step if step % args.update_steps == 0 and step > 0: # Disable learning. network.train(False) # Get test data loader. valid_dataloader = DataLoader( dataset=valid_dataset, batch_size=args.test_batch_size, shuffle=True, num_workers=args.n_workers, pin_memory=args.gpu, ) test_labels = [] test_spike_record = torch.zeros(len(valid_dataset), args.time, args.n_neurons) t0 = time() for test_step, test_batch in enumerate(valid_dataloader): # Prep next input batch. inpts = {"X": test_batch["encoded_image"]} if args.gpu: inpts = {k: v.cuda() for k, v in inpts.items()} # Run the network on the input (inference mode). network.run(inpts=inpts, time=args.time, one_step=args.one_step) # Add to spikes recording. s = spikes["Y"].get("s").permute((1, 0, 2)) test_spike_record[(test_step * args.test_batch_size ):(test_step * args.test_batch_size) + s.size(0)] = s # Plot simulation data. if args.valid_plot: input_exc_weights = network.connections["X", "Y"].w square_weights = get_square_weights( input_exc_weights.view(784, args.n_neurons), n_sqrt, 28) spikes_ = { layer: spikes[layer].get("s")[:, 0] for layer in spikes } spike_ims, spike_axes = plot_spikes(spikes_, ims=spike_ims, axes=spike_axes) weights_im = plot_weights(square_weights, im=weights_im) plt.pause(1e-8) # Reset state variables. network.reset_() test_labels.extend(test_batch["label"].tolist()) t1 = time() - t0 writer.add_scalar(tag="time/test", scalar_value=t1, global_step=global_step) # Convert the list of labels into a tensor. test_label_tensor = torch.tensor(test_labels) # Get network predictions. all_activity_pred = all_activity( spikes=test_spike_record, assignments=assignments, n_labels=n_classes, ) proportion_pred = proportion_weighting( spikes=test_spike_record, assignments=assignments, proportions=proportions, n_labels=n_classes, ) writer.add_scalar( tag="accuracy/valid/all vote", scalar_value=100 * torch.mean( (test_label_tensor.long() == all_activity_pred).float()), global_step=global_step, ) writer.add_scalar( tag="accuracy/valid/proportion weighting", scalar_value=100 * torch.mean( (test_label_tensor.long() == proportion_pred).float()), global_step=global_step, ) square_weights = get_square_weights( network.connections["X", "Y"].w.view(784, args.n_neurons), n_sqrt, 28, ) img_tensor = colorize(square_weights, cmap="hot_r") writer.add_image( tag="weights", img_tensor=img_tensor, global_step=global_step, dataformats="HWC", ) # Convert the array of labels into a tensor label_tensor = torch.tensor(labels) # Get network predictions. all_activity_pred = all_activity(spikes=spike_record, assignments=assignments, n_labels=n_classes) proportion_pred = proportion_weighting( spikes=spike_record, assignments=assignments, proportions=proportions, n_labels=n_classes, ) writer.add_scalar( tag="accuracy/train/all vote", scalar_value=100 * torch.mean( (label_tensor.long() == all_activity_pred).float()), global_step=global_step, ) writer.add_scalar( tag="accuracy/train/proportion weighting", scalar_value=100 * torch.mean( (label_tensor.long() == proportion_pred).float()), global_step=global_step, ) # Assign labels to excitatory layer neurons. assignments, proportions, rates = assign_labels( spikes=spike_record, labels=label_tensor, n_labels=n_classes, rates=rates, ) # Re-enable learning. network.train(True) labels = [] labels.extend(batch["label"].tolist()) # Prep next input batch. inpts = {"X": batch["encoded_image"]} if args.gpu: inpts = {k: v.cuda() for k, v in inpts.items()} # Run the network on the input (training mode). t0 = time() network.run(inpts=inpts, time=args.time, one_step=args.one_step) t1 = time() - t0 writer.add_scalar(tag="time/train/step", scalar_value=t1, global_step=global_step) # Add to spikes recording. s = spikes["Y"].get("s").permute((1, 0, 2)) spike_record[(step * args.batch_size) % update_interval:(step * args.batch_size % update_interval) + s.size(0)] = s # Plot simulation data. if args.plot: input_exc_weights = network.connections["X", "Y"].w square_weights = get_square_weights( input_exc_weights.view(784, args.n_neurons), n_sqrt, 28) spikes_ = { layer: spikes[layer].get("s")[:, 0] for layer in spikes } spike_ims, spike_axes = plot_spikes(spikes_, ims=spike_ims, axes=spike_axes) weights_im = plot_weights(square_weights, im=weights_im) plt.pause(1e-8) # Reset state variables. network.reset_()
# encoded image input for the network encoded_img_input = {input_layer_name: encoded_img} # encoded image label encoded_img_label = sample["Label"] # add to the encoded input list along with the input layer name encoded_test_inputs.append({"Label" : encoded_img_label, "Inputs" : encoded_img_input}) ### NETWORK CONFIGURATION ### # initialize network network = DiehlAndCook2015v2( n_inpt=input_neurons, n_neurons=output_neurons, dt=dt ) ### SIMULATION VARIABLES ### # record the spike times of each neuron during the simulation. spike_record = torch.zeros(1, timesteps, output_neurons) # record the mapping of each neuron to its corresponding label assignments = -torch.ones_like(torch.Tensor(output_neurons)) # how frequently each neuron fires for each input class rates = torch.zeros_like(torch.Tensor(output_neurons, n_classes)) # the likelihood of each neuron firing for each input class
torch.cuda.manual_seed_all(seed) else: torch.manual_seed(seed) # Determines number of workers to use if n_workers == -1: n_workers = gpu * 4 * torch.cuda.device_count() n_sqrt = int(np.ceil(np.sqrt(n_neurons))) start_intensity = intensity network = DiehlAndCook2015v2( n_inpt=784, n_neurons=n_neurons, inh=inh, dt=dt, norm=78.4, nu=(1e-4, 1e-2), theta_plus=theta_plus, inpt_shape=(1, 28, 28), ) if gpu: network.to("cuda") # Load MNIST data. dataset = MNIST( PoissonEncoder(time=time, dt=dt), None, root=os.path.join(ROOT_DIR, "data", "MNIST"), download=True, transform=transforms.Compose(
else: torch.manual_seed(seed) if train: n_examples = n_train else: n_examples = n_test n_sqrt = int(np.ceil(np.sqrt(n_neurons))) start_intensity = intensity n_classes = 10 # Build network. if train: network = DiehlAndCook2015v2( n_inpt=784, n_neurons=n_neurons, inh=inhib, dt=dt, norm=norm, theta_plus=theta_plus, theta_decay=theta_decay ) else: network = load_network(os.path.join(params_path, model_name + '.pt')) network.connections['X', 'Y'].update_rule = NoOp( connection=network.connections['X', 'Y'], nu=network.connections['X', 'Y'].nu ) network.layers['Y'].theta_plus = 0 network.layers['Y'].theta_decay = 0 # Load Fashion-MNIST data. dataset = FashionMNIST(path=data_path, download=True) if train: images, labels = dataset.get_train()