def train(sepal_length, sepal_length_unit_mean_sd, sepal_width, sepal_width_unit_mean_sd, petal_length, petal_length_unit_mean_sd, petal_width, petal_width_unit_mean_sd, unique_species, species): # SpiNNaker setup sim.setup(timestep=1.0, min_delay=1.0, max_delay=10.0, spinnaker_hostname="192.168.1.1") # Calculate input rates input_rates = [] calculate_stim_rates(sepal_length, sepal_length_unit_mean_sd, input_rates, MAX_FREQUENCY) calculate_stim_rates(sepal_width, sepal_width_unit_mean_sd, input_rates, MAX_FREQUENCY) calculate_stim_rates(petal_length, petal_length_unit_mean_sd, input_rates, MAX_FREQUENCY) calculate_stim_rates(petal_width, petal_width_unit_mean_sd, input_rates, MAX_FREQUENCY) # Calculate class rates class_rates = [] for u, _ in enumerate(unique_species): class_rates.append(list((species == u) * MAX_FREQUENCY)) # Build basic network with orthogonal stimulation of both populations input_populations, class_populations = build_basic_network(input_rates, STIMULUS_TIME, class_rates, STIMULUS_TIME, False, 0.0, True, sim) # Create BCPNN model with weights disabled bcpnn_synapse = bcpnn.BCPNNSynapse( tau_zi=BCPNN_TAU_PRIMARY, tau_zj=BCPNN_TAU_PRIMARY, tau_p=BCPNN_TAU_ELIGIBILITY, f_max=MAX_FREQUENCY, w_max=BCPNN_MAX_WEIGHT, weights_enabled=False, plasticity_enabled=True, weight=0.0) # Create all-to-all connector to connect inputs to classes input_class_connector = sim.AllToAllConnector() # Loop through all pairs of input populations and classes plastic_connections = [] for (i, c) in itertools.product(input_populations, class_populations): # Connect input to class with all-to-all plastic synapse connection = sim.Projection(i, c, input_class_connector, bcpnn_synapse, receptor_type="excitatory", label="%s-%s" % (i.label, c.label)) plastic_connections.append(connection) # Run simulation sim.run(STIMULUS_TIME * len(sepal_length)) # Read biases # **HACK** investigate where out by 1000 comes from! learnt_biases = [c.get_data().segments[0].filter(name="bias")[0][-1,:] * 0.001 for c in class_populations] # Read plastic weights learnt_weights = [p.get("weight", format="array") for p in plastic_connections] return learnt_biases, learnt_weights
def test(learnt_weights, learnt_biases): # SpiNNaker setup sim.setup(timestep=1.0, min_delay=1.0, max_delay=10.0, spinnaker_hostname="192.168.1.1") # Generate testing stimuli patters testing_stimuli_rates = [ [MAX_FREQUENCY, MIN_FREQUENCY, MAX_FREQUENCY, MIN_FREQUENCY], [MIN_FREQUENCY, MAX_FREQUENCY, MAX_FREQUENCY, MIN_FREQUENCY], ] # Generate uncertain class stimuli pattern uncertain_stimuli_rates = [ [MAX_FREQUENCY * 0.5], [MAX_FREQUENCY * 0.5], ] # Build basic network input_populations, class_populations = build_basic_network( testing_stimuli_rates, TESTING_STIMULUS_TIME, uncertain_stimuli_rates, TESTING_TIME, True, learnt_biases, False, sim) # Create BCPNN model with weights disabled bcpnn_synapse = bcpnn.BCPNNSynapse(tau_zi=BCPNN_TAU_PRIMARY, tau_zj=BCPNN_TAU_PRIMARY, tau_p=BCPNN_TAU_ELIGIBILITY, f_max=MAX_FREQUENCY, w_max=BCPNN_MAX_WEIGHT, weights_enabled=True, plasticity_enabled=False) for ((i, c), w) in zip(itertools.product(input_populations, class_populations), learnt_weights): # Convert learnt weight matrix into a connection list connections = convert_weights_to_list(w, 1.0, 7.0) # Create projections sim.Projection(i, c, sim.FromListConnector(connections), bcpnn_synapse, receptor_type="excitatory", label="%s-%s" % (i.label, c.label)) # Run simulation sim.run(TESTING_TIME) # Read spikes from input and class populations input_data = [i.get_data() for i in input_populations] class_data = [c.get_data() for c in class_populations] # End simulation on SpiNNaker sim.end() # Return spikes return input_data, class_data
def test_discrete(connection_weight_filenames, hcu_biases, ampa_gain, nmda_gain, tau_ca2, i_alpha, stim_minicolumns, testing_simtime, delay_model, num_hcu, num_mcu_per_hcu, num_mcu_neurons, record_membrane, **setup_kwargs): assert len(hcu_biases) == num_hcu, "An array of biases must be provided for each HCU" assert len(connection_weight_filenames) == (num_hcu ** 2), "A tuple of weight matrix filenames must be provided for each HCU->HCU product" # Scale parameters to obtain HCU size and synaptic stringth num_excitatory, num_inhibitory, JE, JI = scale_parameters(num_mcu_per_hcu, num_mcu_neurons) # Setup simulator and seed RNG sim.setup(timestep=dt, min_delay=dt, max_delay=7.0 * dt, **setup_kwargs) rng = NumpyRNG(seed=1) # Calculate mean firing rate e_cell_mean_firing_rate = (num_mcu_neurons / num_excitatory) * 20.0 # Build HCUs configured for testing hcus = [HCU.testing_adaptive(name="%u" % i, sim=sim, rng=rng, num_excitatory=num_excitatory, num_inhibitory=num_inhibitory, JE=JE, JI=JI, bias=bias, tau_ca2=tau_ca2, i_alpha=i_alpha, e_cell_mean_firing_rate=e_cell_mean_firing_rate, simtime=testing_simtime, record_membrane=record_membrane, stim_spike_times=generate_discrete_hcu_stimuli(stim_minicolumns, num_excitatory, num_mcu_per_hcu)) for i, bias in enumerate(hcu_biases)] # **HACK** not actually plastic - just used to force signed weights bcpnn_synapse = bcpnn.BCPNNSynapse( tau_zi=tau_syn_ampa_gaba, tau_zj=tau_syn_ampa_gaba, tau_p=1000.0, f_max=20.0, w_max=JE, weights_enabled=True, plasticity_enabled=False) # Loop through all hcu products and their corresponding connection weight for connection_weight_filename, ((i_pre, hcu_pre), (i_post, hcu_post)) in zip(connection_weight_filenames, itertools.product(enumerate(hcus), repeat=2)): # Use delay model to calculate delay hcu_delay = delay_model(i_pre, i_post) logger.info("Connecting HCU %u->%u with delay %ums" % (i_pre, i_post, hcu_delay)) # Build connections HCUConnection.testing( sim=sim, pre_hcu=hcu_pre, post_hcu=hcu_post, ampa_gain=ampa_gain, nmda_gain=nmda_gain, ampa_synapse=bcpnn_synapse, nmda_synapse=bcpnn_synapse, connection_weight_filename=connection_weight_filename, delay=hcu_delay) # Run simulation sim.run(testing_simtime) # Read results from HCUs results = [hcu.read_results() for hcu in hcus] return results, sim.end
def test(sepal_length, sepal_length_unit_mean_sd, sepal_width, sepal_width_unit_mean_sd, petal_length, petal_length_unit_mean_sd, petal_width, petal_width_unit_mean_sd, num_species, learnt_biases, learnt_weights): # SpiNNaker setup sim.setup(timestep=1.0, min_delay=1.0, max_delay=10.0, spinnaker_hostname="192.168.1.1") # Calculate input rates input_rates = [] calculate_stim_rates(sepal_length, sepal_length_unit_mean_sd, input_rates, MAX_FREQUENCY) calculate_stim_rates(sepal_width, sepal_width_unit_mean_sd, input_rates, MAX_FREQUENCY) calculate_stim_rates(petal_length, petal_length_unit_mean_sd, input_rates, MAX_FREQUENCY) calculate_stim_rates(petal_width, petal_width_unit_mean_sd, input_rates, MAX_FREQUENCY) # Generate uncertain class pattern uncertain_class_rates = [[MAX_FREQUENCY * (1.0 / num_species)] for s in range(num_species)] # Build basic network testing_time = STIMULUS_TIME * len(sepal_length) input_populations, class_populations = build_basic_network(input_rates, STIMULUS_TIME, uncertain_class_rates, testing_time, True, learnt_biases, False, sim) # Create BCPNN model with weights disabled bcpnn_synapse = bcpnn.BCPNNSynapse( tau_zi=BCPNN_TAU_PRIMARY, tau_zj=BCPNN_TAU_PRIMARY, tau_p=BCPNN_TAU_ELIGIBILITY, f_max=MAX_FREQUENCY, w_max=BCPNN_MAX_WEIGHT, weights_enabled=True, plasticity_enabled=False) for ((i, c), w) in zip(itertools.product(input_populations, class_populations), learnt_weights): # Convert learnt weight matrix into a connection list connections = convert_weights_to_list(w, 1.0, 7.0 * (30.0 / float(CLASS_POP_SIZE))) # Create projections sim.Projection(i, c, sim.FromListConnector(connections), bcpnn_synapse, receptor_type="excitatory", label="%s-%s" % (i.label, c.label)) # Run simulation sim.run(testing_time) # Read spikes from input and class populations input_data = [i.get_data() for i in input_populations] class_data = [c.get_data() for c in class_populations] # End simulation on SpiNNaker sim.end() # Return spikes return input_data, class_data
def train_discrete(ampa_tau_zi, ampa_tau_zj, nmda_tau_zi, nmda_tau_zj, tau_p, stim_minicolumns, training_simtime, delay_model, num_hcu, num_mcu_per_hcu, num_mcu_neurons, **setup_kwargs): # Scale parameters to obtain HCU size and synaptic stringth num_excitatory, num_inhibitory, JE, JI = scale_parameters(num_mcu_per_hcu, num_mcu_neurons) # Setup simulator and seed RNG sim.setup(timestep=dt, min_delay=dt, max_delay=7.0 * dt, **setup_kwargs) rng = NumpyRNG(seed=1) # Calculate mean firing rate e_cell_mean_firing_rate = 4.0#(float(num_mcu_neurons) / float(num_excitatory)) * 20.0 # Build HCUs configured for training hcus = [HCU.training(name="%u" % h, sim=sim, rng=rng, simtime=training_simtime, num_excitatory=num_excitatory, num_inhibitory=num_inhibitory, JE=JE, JI=JI, intrinsic_tau_z=ampa_tau_zj, intrinsic_tau_p=tau_p, e_cell_mean_firing_rate=e_cell_mean_firing_rate, stim_spike_times=generate_discrete_hcu_stimuli(stim_minicolumns, num_excitatory, num_mcu_per_hcu)) for h in range(num_hcu)] # Loop through all hcu products connections = [] for (i_pre, hcu_pre), (i_post, hcu_post) in itertools.product(enumerate(hcus), repeat=2): # Use delay model to calculate delay hcu_delay = delay_model(i_pre, i_post) # Build BCPNN models ampa_synapse = bcpnn.BCPNNSynapse( tau_zi=ampa_tau_zi, tau_zj=ampa_tau_zj, tau_p=tau_p, f_max=20.0, w_max=JE, weights_enabled=False, plasticity_enabled=True, weight=0.0, delay=hcu_delay) nmda_synapse = bcpnn.BCPNNSynapse( tau_zi=nmda_tau_zi, tau_zj=nmda_tau_zj, tau_p=tau_p, f_max=20.0, w_max=JE, weights_enabled=False, plasticity_enabled=True, weight=0.0, delay=hcu_delay) logger.info("Connecting HCU %u->%u with delay %ums" % (i_pre, i_post, hcu_delay)) connections.append(HCUConnection.training( sim=sim, pre_hcu=hcu_pre, post_hcu=hcu_post, ampa_synapse=ampa_synapse, nmda_synapse=nmda_synapse, rng=rng)) # Run simulation sim.run(training_simtime) # Read results from HCUs hcu_results = [hcu.read_results() for hcu in hcus] # Read results from inter-hcu connections connection_results = [c.read_results() for c in connections] return hcu_results, connection_results, sim.end
def train_discrete(ampa_tau_zi, ampa_tau_zj, nmda_tau_zi, nmda_tau_zj, tau_p, minicolumn_indices, training_stim_time, training_interval_time, delay_model, num_hcu, num_mcu_neurons, timer, **setup_kwargs): # Setup simulator and seed RNG sim.setup(timestep=dt, min_delay=dt, max_delay=7.0 * dt, **setup_kwargs) rng = sim.NativeRNG(host_rng=NumpyRNG(seed=1)) # Determine length of each epoch epoch_duration = training_stim_time + training_interval_time # Stimulate minicolumns in sequence stim_minicolumns = [(m, float(i * epoch_duration), 20.0, training_stim_time) for i, m in enumerate(minicolumn_indices)] # Calculate length of training required training_duration = float(len(stim_minicolumns)) * epoch_duration # Calculate mean firing rate e_cell_mean_firing_rate = (float(num_mcu_neurons) / float(NE)) * 20.0 # Build HCUs configured for training hcus = [HCU.training(name="%u" % h, sim=sim, rng=rng, simtime=training_duration, intrinsic_tau_z=ampa_tau_zj, intrinsic_tau_p=tau_p, e_cell_mean_firing_rate=e_cell_mean_firing_rate, stim_spike_times=generate_discrete_hcu_stimuli(stim_minicolumns, num_mcu_neurons)) for h in range(num_hcu)] # Loop through all hcu products connections = [] for (i_pre, hcu_pre), (i_post, hcu_post) in itertools.product(enumerate(hcus), repeat=2): # Use delay model to calculate delay hcu_delay = delay_model(i_pre, i_post) # Build BCPNN models ampa_synapse = bcpnn.BCPNNSynapse( tau_zi=ampa_tau_zi, tau_zj=ampa_tau_zj, tau_p=tau_p, f_max=20.0, w_max=JE, weights_enabled=False, plasticity_enabled=True, weight=0.0, delay=hcu_delay) nmda_synapse = bcpnn.BCPNNSynapse( tau_zi=nmda_tau_zi, tau_zj=nmda_tau_zj, tau_p=tau_p, f_max=20.0, w_max=JE, weights_enabled=False, plasticity_enabled=True, weight=0.0, delay=hcu_delay) logger.info("Connecting HCU %u->%u with delay %ums" % (i_pre, i_post, hcu_delay)) connections.append(HCUConnection.training( sim=sim, pre_hcu=hcu_pre, post_hcu=hcu_post, ampa_synapse=ampa_synapse, nmda_synapse=nmda_synapse, rng=rng)) logger.info("Build time %gs", timer.diff()) # Run simulation sim.run(training_duration) logger.info("Load and run time %gs", timer.diff()) # Read results from HCUs hcu_results = [hcu.read_results() for hcu in hcus] # Read results from inter-hcu connections connection_results = [c.read_results() for c in connections] return hcu_results, connection_results, sim.end
def train(): # SpiNNaker setup sim.setup(timestep=1.0, min_delay=1.0, max_delay=10.0, spinnaker_hostname="192.168.1.1") # Generate orthogonal input stimuli orthogonal_stimuli_rates = [] num_inputs = len(INPUT_NAMES) for i in range(num_inputs): input_stimuli = [] for s in range(TRAINING_TIME / TRAINING_STIMULUS_TIME): input_stimuli.append(MIN_FREQUENCY if ( s % num_inputs) == i else MAX_FREQUENCY) orthogonal_stimuli_rates.append(input_stimuli) # Build basic network with orthogonal stimulation of both populations input_populations, class_populations = build_basic_network( orthogonal_stimuli_rates, TRAINING_STIMULUS_TIME, orthogonal_stimuli_rates, TRAINING_STIMULUS_TIME, False, 0.0, True, sim) # Create BCPNN model with weights disabled bcpnn_synapse = bcpnn.BCPNNSynapse(tau_zi=BCPNN_TAU_PRIMARY, tau_zj=BCPNN_TAU_PRIMARY, tau_p=BCPNN_TAU_ELIGIBILITY, f_max=MAX_FREQUENCY, w_max=BCPNN_MAX_WEIGHT, weights_enabled=False, plasticity_enabled=True, weight=0.0) # Create all-to-all conector to connect inputs to classes input_class_connector = sim.AllToAllConnector() # Loop through all pairs of input populations and classes plastic_connections = [] for (i, c) in itertools.product(input_populations, class_populations): # Connect input to class with all-to-all plastic synapse connection = sim.Projection(i, c, input_class_connector, bcpnn_synapse, receptor_type="excitatory", label="%s-%s" % (i.label, c.label)) plastic_connections.append(connection) # Run simulation sim.run(TRAINING_TIME) # Plot bias evolution num_classes = len(CLASS_NAMES) #bias_figure, bias_axes = pylab.subplots() # **HACK** Extract learnt biases from gsyn channel learnt_biases = [] plotting_times = range(TRAINING_TIME) for i, c in enumerate(class_populations): # Read bias from class bias = c.get_data().segments[0].filter(name="bias")[0] ''' # Loop through plotting times to get mean biases mean_pj = [] for t in plotting_times: # Slice out the rows for all neurons at this time time_rows = gsyn[t::TRAINING_TIME] time_bias = zip(*time_rows)[2] mean_pj.append(numpy.average(numpy.exp(numpy.divide(time_bias,BCPNN_PHI)))) bias_axes.plot(plotting_times, mean_pj, label=c.label) ''' # Add final bias column to list # **HACK** investigate where out by 1000 comes from! learnt_biases.append(bias[-1, :] * 0.001) ''' bias_axes.set_title("Mean final bias") bias_axes.set_ylim((0.0, 1.0)) bias_axes.set_ylabel("Pj") bias_axes.set_xlabel("Time/ms") bias_axes.legend() ''' # Plot weights weight_figure, weight_axes = pylab.subplots(num_inputs, num_classes) # Loop through plastic connections learnt_weights = [] for i, c in enumerate(plastic_connections): # Extract weights and calculate mean weights = c.get("weight", format="array") mean_weight = numpy.average(weights) # Add weights to list learnt_weights.append(weights) # Plot mean weight in each panel axis = weight_axes[i % num_inputs][i / num_classes] axis.matshow([[mean_weight]], cmap=pylab.cm.gray) #axis.set_title("%s: %fuS" % (c.label, mean_weight)) axis.set_title("%u->%u: %f" % (i % num_inputs, i / num_classes, mean_weight)) axis.get_xaxis().set_visible(False) axis.get_yaxis().set_visible(False) # Show figures pylab.show() # End simulation on SpiNNaker sim.end() # Return learnt weights return learnt_weights, learnt_biases