Пример #1
0
    def run(self, spiketimes):
        assert spiketimes.shape[0] == self.n_spike_source, 'spiketimes length should be equal to input neurons'
        start = time.clock()
        sim.reset()
        end = time.clock()
        print "reset uses %f s." % (end - start)
        for i in range(self.n_spike_source):
            spiketime = np.array(spiketimes[i], dtype=float)
            if spiketimes[i].any():
                self.spike_source[i].spike_times = spiketime

        sim.initialize(self.hidden_neurons, V_m=0)
        sim.initialize(self.output_neurons, V_m=0.)
        sim.run(self.sim_time)

        spiketrains = self.output_neurons.get_data(clear=True).segments[0].spiketrains

        # vtrace = self.hidden_neurons.get_data(clear=True).segments[0].filter(name='V_m')[0]
        # plt.figure()
        # plt.plot(vtrace.times, vtrace)
        # plt.show()

        hidden_spiketrains = self.hidden_neurons.get_data(clear=True).segments[0].spiketrains
        spike_cnts = 0
        for spiketrain in hidden_spiketrains:
            spike_cnts += len(list(np.array(spiketrain)))
        self.hidden_spike_cnts.append(spike_cnts)
        print 'hidden spikes: ', spike_cnts

        spiketimes_out = []
        for spiketrain in spiketrains:
            spiketimes_out.append(list(np.array(spiketrain)))


        return np.array(spiketimes_out)
Пример #2
0
def test_column_inhibition():
    """
    Checks if only a single cell fires in the column
    """
    
    LOG.info('Testing inter-column inhibition...')
    
    # reset the simulator
    sim.reset()
    
    LOG.info('Test complete.')
    def forward(self, input, timesteps=None):
        # if timesteps not specified, set to default simulation time
        if timesteps is None:
            timesteps = self.simulation_time

        # first reset the network
        if self.backend == 'nest' or self.backend == 'pynn':
            nest_sim.reset()
        elif self.backend == 'spinnaker':
            spin_sim.reset()

        # set input generators to correct current
        if self.add_bias_as_observation:
            bias = torch.ones(1, dtype=torch.float)
            input = torch.cat((input, bias), dim=0)

        # set offsets to correct current
        # save the original offsets/biases
        bias = []
        for i in range(0, len(self.layers[0])):
            if self.backend == 'pynn' or self.backend == 'spinnaker':
                offset = self.layers[0][i:i+1].get('i_offset')[0]
                bias.append(offset)
                # add the inputs multiplied by their respective weights to the constant input of the
                # first hidden layer
                for j in range(0, len(input)):
                    offset += input[j].detach().item()*self.weights[0][j][i].detach().item()
                self.layers[0][i:i+1].set(i_offset=offset)
            elif self.backend == 'nest':
                offset = self.layers[0][i:i+1].get('I_e')
                bias.append(offset)
                # add the inputs multiplied by their respective weights to the constant input of the
                # first hidden layer
                for j in range(0, len(input)):
                    offset += input[j].detach().item()*self.weights[0][j][i].detach().item()
                self.layers[0][i:i + 1].set(I_e=offset)

        # simulate
        if self.backend == 'nest' or self.backend == 'pynn':
            nest_sim.run(timesteps)
        elif self.backend == 'spinnaker':
            spin_sim.run(timesteps)

        potentials = self.layers[-1].get_data().segments[-1].analogsignals[0][-1]
        # restore original bias in the offset
        for i in range(0, len(self.layers[0])):
            if self.backend == 'pynn' or self.backend == 'spinnaker':
                self.layers[0][i:i + 1].set(i_offset=bias[i])
            elif self.backend == 'nest':
                self.layers[0][i:i + 1].set(I_e=bias[i])
        #  return torch tensor to be compatible with other agents
        return torch.tensor(potentials,dtype=torch.float)
Пример #4
0
def test_nest_dense_increased_weight_fire():
    p1 = pynn.Population(1, pynn.SpikeSourcePoisson(rate = 1))
    p2 = pynn.Population(1, pynn.IF_cond_exp())
    p2.record('spikes')
    d = v.Dense(p1, p2, v.ReLU(), weights = 2)
    pynn.run(1000)
    spiketrains = p2.get_data().segments[-1].spiketrains
    count1 = spiketrains[0].size
    pynn.reset()
    p1 = pynn.Population(1, pynn.SpikeSourcePoisson(rate = 1))
    p2 = pynn.Population(1, pynn.IF_cond_exp())
    p2.record('spikes')
    d = v.Dense(p1, p2, v.ReLU(), weights = 2)
    pynn.run(1000)
    spiketrains = p2.get_data().segments[-1].spiketrains
    count2 = spiketrains[0].size
    assert count2 >= count1 * 2 
Пример #5
0
def test_column_input():
    """
    Tests whether all neurons receive the same feedforward input from
    common proximal dendrite.
    """
    
    LOG.info('Testing column input...')
    
    # reset the simulator
    sim.reset()
    
    column = Column.Column()
    sim.run(1000)
    spikes = column.FetchSpikes()
    print('Spikes before: {}'.format(spikes))
    
    # now stream some input into the column
    column.SetFeedforwardDendrite(1000.0)
    sim.run(1000)
    spikes = column.FetchSpikes().segments[0]
    print('Spikes after: {}'.format(spikes))
    
    LOG.info('Test complete.')
Пример #6
0
def test_squaring():
    """
    Test adding two values using neural substrate works.
    """
    
    LOG.info('Running squaring test...')
    
    # reset the simulator
    sim.reset()
    
    # create input Encoder
    input = ScalarEncoder.ScalarEncoder()
    input.encode(5)
    
    # create decoder
    output = ScalarEncoder.ScalarEncoder()
    output.encode(7)
    
    # creating ensemble
    ensemble = Ensemble.Ensemble(input, output, ensemble_size=200)
    ensemble.Train()
    
    LOG.info('Test complete.')
Пример #7
0
def setup():
    pynn.reset()
    pynn.setup()
record_spikes(inputLayer)
record_spikes(outputLayer)

sim.run(TRAINING_TIME)

save_results(inputLayer, 'results/wineInput')
save_results(outputLayer, 'results/wineOutput')

# Save the weights after training
synapseWeights = synapses.get(["weight"], format="list")

# Can print it to console to check for inactive neurons
# print(synapseWeights)

sim.reset()

# === Training is done, weights are saved and network is reset ===
# === Test the network ===
initialize_network(TIME_STEP, MIN_DELAY, MAX_DELAY)

testSpikeSequence = create_test_spike_sequence(wineTestingData,
                                               TESTING_START_TIME)
testInputLayer = create_layer_of_neurons(INPUT_LAYER_NEURONS,
                                         'test input layer')
testOutputLayer = create_layer_of_neurons(OUTPUT_LAYER_NEURONS,
                                          'test output layer')

build_testing_connections(testSpikeSequence, wineTestingData, testInputLayer)

# Connects layers using weights that were generated during training using STDP synapse
Пример #9
0
    connector = sim.OneToOneConnector()

    connection = sim.Projection(inp, outp, connector, synapse)

    def report_time(t):
        print("Time: {}".format(t))
        return t + simparams['dt']

    par = 'i_offset'
    for p in [0.01]:
        outp.set(**{par: p})
        cellparams[par] = p
        outp.initialize(v=cellparams['v_rest'])
        sim.run(simparams['duration'], callbacks=[report_time])
        sim.reset(annotations={par: p})

    inp_data = inp.get_data()
    outp_data = outp.get_data()

    sim.end()

    plt.figure()
    plot_spiketrains(inp_data.segments)
    plt.show()

    plt.figure()
    plot_spiketrains(outp_data.segments)
    plt.show()

    plt.figure()
Пример #10
0
def runSimGivenStim(stim_type, num_source, num_target, duration, use_stdp,
                    record_source_spikes, source_rates_params, synapse_to_use,
                    ff_conn, lat_conn):
    """
    For running the simulation and returning the required results.
    Arguments:  stim_type, 'bright' or 'dark'
                num_source, number of cells in the source layers
                num_target, number of cells in the target layer
                duration, float
                use_stdp,
                record_source_spikes,
                source_rates_params, params for 8 Gamma distributions
                synapse_to_use, either STDPMechanism or StaticSynapse
                ff_conn, either AllToAllConnector or FixedProbabilityConnector
                lat_conn, same as ff_conn but with a different probability, maybe
    Returns:    target_spikes,
                ff_on_weights,
                ff_off_weights,
                lat_weights,
                ff_on_weights_over_time,
                ff_off_weights_over_time,
                lat_weights_over_time
    """
    on_rates, off_rates = getOnOffSourceRates(
        num_source,
        stim_type,
        on_bright_params=args.source_rates_params[0],
        on_dark_params=args.source_rates_params[1],
        off_bright_params=args.source_rates_params[2],
        off_dark_params=args.source_rates_params[3])
    source_on_pop = pynn.Population(num_source,
                                    pynn.SpikeSourcePoisson(rate=on_rates),
                                    label='source_on_pop')
    source_off_pop = pynn.Population(num_source,
                                     pynn.SpikeSourcePoisson(rate=off_rates),
                                     label='source_off_pop')
    target_pop = pynn.Population(num_target,
                                 pynn.IF_cond_exp, {
                                     'i_offset': 0.11,
                                     'tau_refrac': 3.0,
                                     'v_thresh': -51.0
                                 },
                                 label='target_pop')
    ff_on_proj = pynn.Projection(source_on_pop,
                                 target_pop,
                                 connector=ff_conn,
                                 synapse_type=synapse_to_use,
                                 receptor_type='excitatory')
    ff_off_proj = pynn.Projection(source_off_pop,
                                  target_pop,
                                  connector=ff_conn,
                                  synapse_type=synapse_to_use,
                                  receptor_type='excitatory')
    lat_proj = pynn.Projection(target_pop,
                               target_pop,
                               connector=lat_conn,
                               synapse_type=synapse_to_use,
                               receptor_type='inhibitory')
    target_pop.record(['spikes'])
    [source_on_pop.record('spikes'),
     source_off_pop.record('spikes')] if args.record_source_spikes else None
    ff_on_weight_recorder = WeightRecorder(sampling_interval=1.0,
                                           projection=ff_on_proj)
    ff_off_weight_recorder = WeightRecorder(sampling_interval=1.0,
                                            projection=ff_off_proj)
    lat_weight_recorder = WeightRecorder(sampling_interval=1.0,
                                         projection=lat_proj)
    pynn.run(duration,
             callbacks=[
                 ff_on_weight_recorder, ff_off_weight_recorder,
                 lat_weight_recorder
             ])
    pynn.end()
    target_spikes = target_pop.get_data('spikes').segments[0].spiketrains
    if record_source_spikes:
        source_on_spikes = source_on_pop.get_data(
            'spikes').segments[0].spiketrains
        source_off_spikes = source_off_pop.get_data(
            'spikes').segments[0].spiketrains
    ff_on_weights = ff_on_proj.get('weight', format='array')
    ff_off_weights = ff_off_proj.get('weight', format='array')
    lat_weights = lat_proj.get('weight', format='array')
    ff_on_weights_over_time = ff_on_weight_recorder.get_weights()
    ff_off_weights_over_time = ff_off_weight_recorder.get_weights()
    lat_weights_over_time = lat_weight_recorder.get_weights()
    pynn.reset()
    return target_spikes, ff_on_weights, ff_off_weights, lat_weights, ff_on_weights_over_time, ff_off_weights_over_time, lat_weights_over_time