示例#1
0
	nengo.Connection(targetVelY,currPosY,transform=tau,synapse=tau/10)
	nengo.Connection(currPosY,currPosY,synapse=tau)

	synP = synC #0.01
	prb_ip1 = nengo.Probe(ip1,synapse=0.01)
	prb_ip2 = nengo.Probe(ip2,synapse=0.01)
	prb_posX = nengo.Probe(currPosX,synapse=synP)
	prb_posY = nengo.Probe(currPosY,synapse=synP)
	prb_velX = nengo.Probe(targetVelX,synapse=synP)
	prb_velY = nengo.Probe(targetVelY,synapse=synP)
	prb_posXn = nengo.Probe(currPosX.neurons)
	prb_posYn = nengo.Probe(currPosY.neurons)
	prb_velXn = nengo.Probe(targetVelX.neurons)
	prb_velYn = nengo.Probe(targetVelY.neurons)

with nengo.Simulator(eye_ctrl) as sim:
	sim.run(5)

t = sim.trange()

plt.figure()
plt.plot(t,sim.data[prb_posX],label='posX')
plt.plot(t,sim.data[prb_posY],label='posX')
plt.plot(t,sim.data[prb_ip1],label='inputX')
plt.plot(t,sim.data[prb_ip2],label='inputY')
plt.xlabel("time(s)")
plt.ylabel("rad")
plt.title("Eye Position")

plt.figure()
plt.plot(t,sim.data[prb_posX],label='posX')
示例#2
0
def test_conv_connection(channels, Simulator, seed, rng, plt, allclose):
    # channels_last = True
    channels_last = False
    if channels > 1:
        pytest.xfail("Cannot send population spikes to chip")

    # load data
    with open(os.path.join(test_dir, 'mnist10.pkl'), 'rb') as f:
        test10 = pickle.load(f)

    test_x = test10[0][0].reshape(28, 28)
    test_x = 1.999 * test_x - 0.999  # range (-1, 1)
    test_x = test_x[:, :, None]  # single channel
    input_shape = ImageShape(test_x.shape[0],
                             test_x.shape[1],
                             channels,
                             channels_last=channels_last)

    filters = Gabor(freq=Uniform(0.5, 1)).generate(8, (7, 7), rng=rng)
    filters = filters[None, :, :, :]  # single channel
    filters = np.transpose(filters, (0, 2, 3, 1))  # filters last
    strides = (2, 2)
    tau_rc = 0.02
    tau_ref = 0.002
    tau_s = 0.005
    dt = 0.001

    neuron_type = LoihiLIF(tau_rc=tau_rc, tau_ref=tau_ref)

    pres_time = 1.0

    with nengo.Network(seed=seed) as model:
        nengo_loihi.add_params(model)

        u = nengo.Node(nengo.processes.PresentInput([test_x.ravel()],
                                                    pres_time),
                       label='u')

        a = nengo.Ensemble(input_shape.size,
                           1,
                           neuron_type=LoihiSpikingRectifiedLinear(),
                           max_rates=nengo.dists.Choice([40 / channels]),
                           intercepts=nengo.dists.Choice([0]),
                           label='a')
        model.config[a].on_chip = False

        if channels == 1:
            nengo.Connection(u, a.neurons, transform=1, synapse=None)
        elif channels == 2:
            # encode image into spikes using two channels (on/off)
            if input_shape.channels_last:
                nengo.Connection(u, a.neurons[0::2], transform=1, synapse=None)
                nengo.Connection(u,
                                 a.neurons[1::2],
                                 transform=-1,
                                 synapse=None)
            else:
                k = input_shape.rows * input_shape.cols
                nengo.Connection(u, a.neurons[:k], transform=1, synapse=None)
                nengo.Connection(u, a.neurons[k:], transform=-1, synapse=None)

            filters = np.vstack([filters, -filters])
        else:
            raise ValueError("Test not configured for more than two channels")

        conv2d_transform = Conv2D.from_kernel(filters,
                                              input_shape,
                                              strides=strides)
        output_shape = conv2d_transform.output_shape

        gain, bias = neuron_type.gain_bias(max_rates=100, intercepts=0)
        gain = gain * 0.01  # account for `a` max_rates
        b = nengo.Ensemble(output_shape.size,
                           1,
                           neuron_type=neuron_type,
                           gain=nengo.dists.Choice([gain[0]]),
                           bias=nengo.dists.Choice([bias[0]]),
                           label='b')
        nengo.Connection(a.neurons,
                         b.neurons,
                         synapse=tau_s,
                         transform=conv2d_transform)

        bp = nengo.Probe(b.neurons)

    with nengo.Simulator(model, dt=dt, optimize=False) as sim:
        sim.run(pres_time)
    ref_out = sim.data[bp].mean(axis=0).reshape(output_shape.shape())

    # Currently, default TensorFlow does not support channels first in conv
    use_nengo_dl = nengo_dl is not None and channels_last
    ndl_out = np.zeros_like(ref_out)
    if use_nengo_dl:
        with nengo_dl.Simulator(model, dt=dt) as sim:
            sim.run(pres_time)
        ndl_out = sim.data[bp].mean(axis=0).reshape(output_shape.shape())

    with nengo_loihi.Simulator(model, dt=dt, target='simreal') as sim:
        sim.run(pres_time)
    real_out = sim.data[bp].mean(axis=0).reshape(output_shape.shape())

    with Simulator(model, dt=dt) as sim:
        sim.run(pres_time)
    sim_out = sim.data[bp].mean(axis=0).reshape(output_shape.shape())

    if not output_shape.channels_last:
        ref_out = np.transpose(ref_out, (1, 2, 0))
        ndl_out = np.transpose(ndl_out, (1, 2, 0))
        real_out = np.transpose(real_out, (1, 2, 0))
        sim_out = np.transpose(sim_out, (1, 2, 0))

    out_max = max(ref_out.max(), sim_out.max())

    # --- plot results
    rows = 2
    cols = 3

    ax = plt.subplot(rows, cols, 1)
    imshow(test_x, vmin=0, vmax=1, ax=ax)

    ax = plt.subplot(rows, cols, 2)
    tile(np.transpose(filters[0], (2, 0, 1)), cols=8, ax=ax)

    ax = plt.subplot(rows, cols, 3)
    plt.hist(ref_out.ravel(), bins=31)
    plt.hist(sim_out.ravel(), bins=31)

    ax = plt.subplot(rows, cols, 4)
    tile(np.transpose(ref_out, (2, 0, 1)), vmin=0, vmax=out_max, cols=8, ax=ax)

    ax = plt.subplot(rows, cols, 5)
    tile(np.transpose(ndl_out, (2, 0, 1)), vmin=0, vmax=out_max, cols=8, ax=ax)

    ax = plt.subplot(rows, cols, 6)
    tile(np.transpose(sim_out, (2, 0, 1)), vmin=0, vmax=out_max, cols=8, ax=ax)

    if use_nengo_dl:
        assert allclose(ndl_out, ref_out, atol=1e-5, rtol=1e-5)
    assert allclose(real_out, ref_out, atol=1, rtol=1e-3)
    assert allclose(sim_out, ref_out, atol=10, rtol=1e-3)
def evaluate_mnist_multiple_baseline_noise(args):

    #############################
    # load the data
    #############################
    input_nbr = args.input_nbr
    input_nbr = args.input_nbr

    probe_sample_rate = (input_nbr/10)/1000 #Probe sample rate. Proportional to input_nbr to scale down sampling rate of simulations 


    x = args.digit
    np.random.seed(args.seed)
    random.seed(args.seed)

    data = np.load('mnist_norm.npz', allow_pickle=True)
    image_train_filtered = data['image_train_filtered']/255
    label_train_filtered = data['label_train_filtered']
    image_test_filtered = data['image_test_filtered']/255
    label_test_filtered = data['label_test_filtered']


    image_assign_filtered = image_train_filtered
    label_assign_filtered = label_train_filtered

    image_train_filtered = np.tile(image_train_filtered,(args.iterations,1,1))
    label_train_filtered = np.tile(label_train_filtered,(args.iterations))

    #Simulation Parameters 
    #Presentation time
    presentation_time = args.presentation_time #0.20
    #Pause time
    # pause_time = args.pause_time + 0.0001
    pause_time = args.pause_time
    #Iterations
    iterations=args.iterations
    #Input layer parameters
    n_in = args.n_in
    # g_max = 1/784 #Maximum output contribution
    amp_neuron = args.amp_neuron
    n_neurons = args.n_neurons # Layer 1 neurons
    # inhib_factor = args.inhib_factor #Multiplication factor for lateral inhibition


    input_neurons_args = {
            "n_neurons":n_in,
            "dimensions":1,
            "label":"Input layer",
            "encoders":nengo.dists.Choice([[1]]),
            # "max_rates":nengo.dists.Uniform(22,22),
            # "intercepts":nengo.dists.Uniform(0,0),
            "gain":nengo.dists.Choice([args.gain_in]),
            "bias":nengo.dists.Choice([args.bias_in]),
            "noise":nengo.processes.WhiteNoise(dist=nengo.dists.Gaussian(0, args.noise_input), seed=args.seed), 

            "neuron_type":MyLIF_in_v2(tau_rc=args.tau_in,min_voltage=-1, amplitude=args.amp_neuron, tau_ref=args.tau_ref_in)
            # "neuron_type":nengo.neurons.SpikingRectifiedLinear()#SpikingRelu neuron. 
    }

    #Layer 1 parameters
    layer_1_neurons_args = {
            "n_neurons":n_neurons,
            "dimensions":1,
            "label":"Layer 1",
            "encoders":nengo.dists.Choice([[1]]),
            "gain":nengo.dists.Choice([args.gain_out]),
            "bias":nengo.dists.Choice([args.bias_out]),
            # "intercepts":nengo.dists.Choice([0]),
            # "max_rates":nengo.dists.Choice([args.rate_out,args.rate_out]),
            # "noise":nengo.processes.WhiteNoise(dist=nengo.dists.Gaussian(0, 0.5), seed=1), 
            # "neuron_type":nengo.neurons.LIF(tau_rc=args.tau_out, min_voltage=0)
            # "neuron_type":MyLIF_out(tau_rc=args.tau_out, min_voltage=-1)
            "neuron_type":STDPLIF(tau_rc=args.tau_out, min_voltage=-1, spiking_threshold=args.thr_out, inhibition_time=args.inhibition_time,tau_ref=args.tau_ref_out,inc_n=args.inc_n,tau_n=args.tau_n)
    }

    #Learning rule parameters
    learning_args = {
            "lr": args.lr,
            "alpha": args.alpha,
            "winit_min":0,
            "winit_max":args.winit_max,
            "sample_distance": int((presentation_time+pause_time)*200*10), #Store weight after 10 images
    }

    # argument_string = "presentation_time: "+ str(presentation_time)+ "\n pause_time: "+ str(pause_time)+ "\n input_neurons_args: " + str(input_neurons_args)+ " \n layer_1_neuron_args: " + str(layer_1_neurons_args)+"\n Lateral Inhibition parameters: " + str(lateral_inhib_args) + "\n learning parameters: " + str(learning_args)+ "\n g_max: "+ str(g_max) 

    images = image_train_filtered
    labels = label_train_filtered
    np.random.seed(args.seed)
    random.seed(args.seed) 

    model = nengo.Network("My network", seed = args.seed)
    #############################
    # Model construction
    #############################
    with model:
        # picture = nengo.Node(PresentInputWithPause(images, presentation_time, pause_time,0))
        picture = nengo.Node(nengo.processes.PresentInput(images, presentation_time=presentation_time))
        true_label = nengo.Node(nengo.processes.PresentInput(labels, presentation_time=presentation_time))
        # true_label = nengo.Node(PresentInputWithPause(labels, presentation_time, pause_time,-1))
        # input layer  
        input_layer = nengo.Ensemble(**input_neurons_args)
        input_conn = nengo.Connection(picture,input_layer.neurons,synapse=None)
        #first layer
        layer1 = nengo.Ensemble(**layer_1_neurons_args)
        #Weights between input layer and layer 1
        w = nengo.Node(CustomRule_post_baseline(**learning_args), size_in=n_in, size_out=n_neurons)
        nengo.Connection(input_layer.neurons, w, synapse=None)
        nengo.Connection(w, layer1.neurons, synapse=args.synapse_layer_1)
        weights = w.output.history
        
    # with nengo_ocl.Simulator(model) as sim :   
    with nengo.Simulator(model, dt=args.dt, optimize=True) as sim:

        
        w.output.set_signal_vmem(sim.signals[sim.model.sig[input_layer.neurons]["voltage"]])
        w.output.set_signal_out(sim.signals[sim.model.sig[layer1.neurons]["out"]])
        sim.run((presentation_time+pause_time) * labels.shape[0])

    last_weight = weights[-1]

    sim.close()

    pause_time = 0

    #Neuron class assingment

    images = image_assign_filtered
    labels = label_assign_filtered


    model = nengo.Network("My network", seed = args.seed)

    with model:

        # picture = nengo.Node(PresentInputWithPause(images, presentation_time, pause_time,0))
        picture = nengo.Node(nengo.processes.PresentInput(images, presentation_time=presentation_time))
        true_label = nengo.Node(nengo.processes.PresentInput(labels, presentation_time=presentation_time))
        # true_label = nengo.Node(PresentInputWithPause(labels, presentation_time, pause_time,-1))
        # input layer  
        input_layer = nengo.Ensemble(**input_neurons_args)
        input_conn = nengo.Connection(picture,input_layer.neurons,synapse=None)
        #first layer
        layer1 = nengo.Ensemble(**layer_1_neurons_args)
        nengo.Connection(input_layer.neurons, layer1.neurons,transform=last_weight,synapse=args.synapse_layer_1)
        #Probes
        p_true_label = nengo.Probe(true_label)
        p_layer_1 = nengo.Probe(layer1.neurons)

    # with nengo_ocl.Simulator(model) as sim :   
    with nengo.Simulator(model, dt=args.dt, optimize=True) as sim:
        
        sim.run((presentation_time+pause_time) * labels.shape[0])
    
    t_data = sim.trange()
    labels = sim.data[p_true_label][:,0]
    output_spikes = sim.data[p_layer_1]
    neuron_class = np.zeros((n_neurons, 1))
    n_classes = 10
    for j in range(n_neurons):
        spike_times_neuron_j = t_data[np.where(output_spikes[:,j] > 0)]
        max_spike_times = 0 
        for i in range(n_classes):
            class_presentation_times_i = t_data[np.where(labels == i)]
            #Normalized number of spikes wrt class presentation time
            num_spikes = len(np.intersect1d(spike_times_neuron_j,class_presentation_times_i))/(len(class_presentation_times_i)+1)
            if(num_spikes>max_spike_times):
                neuron_class[j] = i
                max_spike_times = num_spikes
    spikes_layer1_probe_train = sim.data[p_layer_1]



    #Testing

    images = image_test_filtered
    labels = label_test_filtered



    input_nbr = 10000
    
    model = nengo.Network(label="My network",)

    with model:

        # picture = nengo.Node(PresentInputWithPause(images, presentation_time, pause_time,0))
        picture = nengo.Node(nengo.processes.PresentInput(images, presentation_time=presentation_time))
        true_label = nengo.Node(nengo.processes.PresentInput(labels, presentation_time=presentation_time))
        # true_label = nengo.Node(PresentInputWithPause(labels, presentation_time, pause_time,-1))
        input_layer = nengo.Ensemble(**input_neurons_args)
        input_conn = nengo.Connection(picture,input_layer.neurons,synapse=None)
        #first layer
        layer1 = nengo.Ensemble(**layer_1_neurons_args)
        nengo.Connection(input_layer.neurons, layer1.neurons,transform=last_weight,synapse=args.synapse_layer_1)
        p_true_label = nengo.Probe(true_label)
        p_layer_1 = nengo.Probe(layer1.neurons)


    step_time = (presentation_time + pause_time) 

    with nengo.Simulator(model,dt=args.dt) as sim:
           
        sim.run(presentation_time * label_test_filtered.shape[0])

    accuracy_2 = evaluation_v2(10,n_neurons,int(((presentation_time * label_test_filtered.shape[0]) / sim.dt) / input_nbr),spikes_layer1_probe_train,label_train_filtered,sim.data[p_layer_1],label_test_filtered,sim.dt)


    labels = sim.data[p_true_label][:,0]
    t_data = sim.trange()
    output_spikes = sim.data[p_layer_1]
    n_classes = 10
    predicted_labels = []  
    true_labels = []
    correct_classified = 0
    wrong_classified = 0

    class_spikes = np.ones((10,1))

    for num in range(input_nbr):
        #np.sum(sim.data[my_spike_probe] > 0, axis=0)

        output_spikes_num = output_spikes[num*int((presentation_time + pause_time) /args.dt):(num+1)*int((presentation_time + pause_time) /args.dt),:] # 0.350/0.005
        num_spikes = np.sum(output_spikes_num > 0, axis=0)

        for i in range(n_classes):
            sum_temp = 0
            count_temp = 0
            for j in range(n_neurons):
                if((neuron_class[j]) == i) : 
                    sum_temp += num_spikes[j]
                    count_temp +=1
        
            if(count_temp==0):
                class_spikes[i] = 0
            else:
                class_spikes[i] = sum_temp
                # class_spikes[i] = sum_temp/count_temp

        # print(class_spikes)
        k = np.argmax(num_spikes)
        # predicted_labels.append(neuron_class[k])
        class_pred = np.argmax(class_spikes)
        predicted_labels.append(class_pred)

        true_class = labels[(num*int((presentation_time + pause_time) /args.dt))]

        if(class_pred == true_class):
            correct_classified+=1
        else:
            wrong_classified+=1

        
    accuracy = correct_classified/ (correct_classified+wrong_classified)*100
    print("Accuracy: ", accuracy)
    sim.close()

    del sim.data, labels, class_pred, spikes_layer1_probe_train

    return accuracy, accuracy_2, weights[-1]
示例#4
0
    def model_init(self):
        model = nengo.Network(label='Neuron Scheduler')
        with model:
            # running_processes = waiting_processes
            print(self.running_process_size())
            num_cores_node = nengo.Node(self.num_cores)
            running_size = nengo.Node(output=self.running_process_size()
                                      )  # output=running_process_size())
            running_procs_size_en = nengo.Ensemble(self.calc_n_neurons,
                                                   1,
                                                   radius=self.num_cores)

            num_cores_en = nengo.Ensemble(1024, 1, radius=self.num_cores)
            nengo.Connection(num_cores_node, num_cores_en)

            avail_procs = nengo.Ensemble(self.calc_n_neurons,
                                         1,
                                         radius=self.num_cores)

            nengo.Connection(num_cores_en, avail_procs)
            # nengo.Connection(running_procs_size_en, avail_procs.neurons, transform=[[-1]]*calc_n_neurons)
            nengo.Connection(running_procs_size_en, avail_procs, transform=-1)

            def can_add_thd_logic(x):
                x = np.round(x)
                return x

            ## New Process Addition

            waiting_proc_q_top_size = nengo.Node(1)
            waiting_proc_size = nengo.Ensemble(self.calc_n_neurons,
                                               1,
                                               radius=self.num_cores)

            def waiting_q_input_logic(x):
                if x <= 0:
                    return self.num_cores * 2
                else:
                    return x

            # nengo.Connection(waiting_proc_q_top_size,waiting_proc_size,function=waiting_q_input_logic)
            can_add_next_proc = nengo.Ensemble(self.calc_n_neurons, 1)

            next_proc_size_check = nengo.Ensemble(
                self.calc_n_neurons,
                1,
                radius=self.num_cores,
                neuron_type=nengo.neurons.SpikingRectifiedLinear())
            next_proc_size_check.intercepts = Choice([-.1])

            nengo.Connection(avail_procs, next_proc_size_check)
            nengo.Connection(waiting_proc_size,
                             next_proc_size_check,
                             transform=-1)

            def can_add_next_proc_logic(x):
                if x > 0:
                    return 1
                else:
                    return 0

            nengo.Connection(next_proc_size_check,
                             can_add_next_proc,
                             function=can_add_thd_logic)

            ### Add New
            ## Queue Repr:
            qsim_waiting = nengo.Node(self.queue_nodes.output_message,
                                      size_out=3)
            qsim_add_spike = nengo.Node(self.queue_nodes.input_message,
                                        size_in=1,
                                        size_out=1)

            v = nengo.Ensemble(32, 3, radius=128)
            nengo.Connection(qsim_waiting, v)
            nengo.Connection(qsim_waiting[0],
                             waiting_proc_size,
                             function=waiting_q_input_logic)
            # nengo.Connection(qsim_waiting[1] , running_procs_size_en)

            add_new_proc_system = nengo.Ensemble(
                self.calc_n_neurons,
                1,
                radius=2,
                neuron_type=nengo.Izhikevich(reset_recovery=0))
            nengo.Connection(can_add_next_proc,
                             add_new_proc_system,
                             function=np.ceil)

            nengo.Connection(running_size, running_procs_size_en)
            ## Filter for spikes:

            nengo.Connection(add_new_proc_system, qsim_add_spike)
            #  deepcode ignore MultiplyList: Pointing to the list is okay here since neurons are read-only
            nengo.Connection(qsim_add_spike,
                             add_new_proc_system.neurons,
                             transform=[[-1]] * self.calc_n_neurons)

            ## RR Clock
            clock_input = nengo.Node(size_out=1)
            check_proc_ens = nengo.Ensemble(self.calc_n_neurons, dimensions=2)

            rr_clock = nengo.Network()
            with rr_clock:

                def node_tick_clock(t):
                    t = np.round(t) % 10
                    return t

                def neuron_timer_check(x):

                    nv = x[0]
                    if x[0] > 10:
                        nv = 0
                    else:
                        nv = x[0]
                    return nv

                stim = nengo.Node(1)
                inhibition = nengo.Node(0)
                interupt_proc = nengo.Ensemble(n_neurons=1024, dimensions=1)
                c = nengo.Ensemble(n_neurons=1024, dimensions=1)
                nengo.Connection(stim, interupt_proc)
                nengo.Connection(c,
                                 interupt_proc.neurons,
                                 transform=[[-2.5]] * 1024)
                nengo.Connection(inhibition, c)
                clock = nengo.Node(node_tick_clock)
                time_slice = nengo.Node(10)

                cl_enc = nengo.Ensemble(1024, dimensions=1, radius=25)
                ts_enc = nengo.Ensemble(1024, dimensions=1, radius=25)
                nengo.Connection(time_slice,
                                 ts_enc,
                                 function=lambda x: x % self.time_slice_ticks)
                nengo.Connection(clock, cl_enc)
                time_check = nengo.Ensemble(1024, dimensions=1, radius=20)
                # summation = nengo.networks.
                tau = 0.1
                nengo.Connection(cl_enc,
                                 time_check,
                                 synapse=tau,
                                 function=neuron_timer_check)

                nengo.Connection(time_check, c)

            qsim_running_interrupt = nengo.Node(size_in=1, size_out=0)
            nengo.Connection(interupt_proc, qsim_running_interrupt)

        self.model = model
        self.sim = nengo.Simulator(model)
示例#5
0
    stim_blue = nengo.Node(Piecewise(dict(zip(time, input))))

    red = gbellmf([0.25, 8, -0.6])
    nengo.Connection(stim_red, red.I)

    green = gbellmf([0.35, 2, -0.1])
    nengo.Connection(stim_green, green.I)

    blue = gbellmf([0.25, 3, 0.3])
    nengo.Connection(stim_blue, blue.I)

with model:
    pr_red = nengo.Probe(red.O, synapse=0.01)
    pr_green = nengo.Probe(green.O, synapse=0.01)
    pr_blue = nengo.Probe(blue.O, synapse=0.01)

with nengo.Simulator(model) as sim:
    sim.run(2.5)

t = sim.trange()
print(len(sim.data[pr_red]))
plt.figure()
plt.plot(t[500:] - 1.5, sim.data[pr_red][500:], c='r', label="Red")
plt.plot(t[500:] - 1.5, sim.data[pr_green][500:], c='g', label="Green")
plt.plot(t[500:] - 1.5, sim.data[pr_blue][500:], c='b', label="Blue")
plt.xlim(right=1)
plt.xlabel("Input")
plt.ylabel("Degree of membership")
plt.legend(loc="best")
plt.show()
示例#6
0
def run_nn(simulator=None, model=None, time=10):
    sim = simulator
    if sim == None:
        sim = nengo.Simulator(model)
    sim.run(time)
    return [model, sim]
示例#7
0
with srf_network:
    p_output_spikes = nengo.Probe(srf_network.output.neurons)
    p_inh_weights = nengo.Probe(srf_network.conn_I,
                                'weights',
                                sample_every=1.0)
    p_exc_weights = nengo.Probe(srf_network.conn_E,
                                'weights',
                                sample_every=1.0)
    p_rec_weights = nengo.Probe(srf_network.conn_EE,
                                'weights',
                                sample_every=1.0)
    p_exc_rates = nengo.Probe(srf_network.exc.neurons)
    p_inh_rates = nengo.Probe(srf_network.inh.neurons)

with nengo.Simulator(srf_network, optimize=True) as sim:
    sim.run(0.01)

exc_weights = srf_network.conn_E.transform
inh_weights = srf_network.conn_I.transform
rec_weights = srf_network.conn_EE.transform

plt.imshow(np.asarray(exc_weights.sample()).T,
           aspect="auto",
           interpolation="nearest")
plt.colorbar()
plt.show()
plt.imshow(np.asarray(rec_weights.sample()).T,
           aspect="auto",
           interpolation="nearest")
plt.colorbar()
示例#8
0
def evaluate_mnist_single(args):

    #############################
    # load the data
    #############################
    input_nbr = args.input_nbr

    (image_train, label_train), (image_test, label_test) = (tf.keras.datasets.fashion_mnist.load_data())

    probe_sample_rate = (input_nbr/10)/1000 #Probe sample rate. Proportional to input_nbr to scale down sampling rate of simulations 
    # probe_sample_rate = 1000
    image_train_filtered = []
    label_train_filtered = []

    x = args.digit

    for i in range(0,input_nbr):
      if label_train[i] == x:
            image_train_filtered.append(image_train[i])
            label_train_filtered.append(label_train[i])

    image_train_filtered = np.array(image_train_filtered)
    label_train_filtered = np.array(label_train_filtered)


    #Simulation Parameters 
    #Presentation time
    presentation_time = args.presentation_time #0.20
    #Pause time
    pause_time = args.pause_time
    #Iterations
    iterations=args.iterations
    #Input layer parameters
    n_in = args.n_in
    # g_max = 1/784 #Maximum output contribution
    g_max = args.g_max
    n_neurons = args.n_neurons # Layer 1 neurons
    inhib_factor = -0*100 #Multiplication factor for lateral inhibition

    n_neurons = 1
    input_neurons_args = {
            "n_neurons":n_in,
            "dimensions":1,
            "label":"Input layer",
            "encoders":nengo.dists.Uniform(1,1),
            "gain":nengo.dists.Uniform(2,2),
            "bias":nengo.dists.Uniform(0,0),
            "neuron_type":MyLIF_in(tau_rc=args.tau_in,min_voltage=-1, amplitude=args.g_max)
            # "neuron_type":nengo.neurons.LIF(tau_rc=args.tau_in,min_voltage=0)#SpikingRelu neuron. 
    }

    #Layer 1 parameters
    layer_1_neurons_args = {
            "n_neurons":n_neurons,
            "dimensions":1,
            "label":"Layer 1",
            "encoders":nengo.dists.Uniform(1,1),
            # "gain":nengo.dists.Uniform(2,2),
            # "bias":nengo.dists.Uniform(0,0),
            "intercepts":nengo.dists.Choice([0]),
            "max_rates":nengo.dists.Choice([20,20]),
            "noise":nengo.processes.WhiteNoise(dist=nengo.dists.Gaussian(0, 1), seed=1), 
            # "neuron_type":nengo.neurons.LIF(tau_rc=args.tau_out, min_voltage=0)
            # "neuron_type":MyLIF_out(tau_rc=args.tau_out, min_voltage=-1)
            "neuron_type":STDPLIF(tau_rc=args.tau_out, min_voltage=-1),
    }


    # "noise":nengo.processes.WhiteNoise(dist=nengo.dists.Gaussian(0, 20), seed=1),     

    #Lateral Inhibition parameters
    lateral_inhib_args = {
            "transform": inhib_factor* (np.full((n_neurons, n_neurons), 1) - np.eye(n_neurons)),
            "synapse":0.01,
            "label":"Lateral Inhibition"
    }

    #Learning rule parameters
    learning_args = {
            "lr": args.lr,
            "winit_min":0,
            "winit_max":0.1,
    #         "tpw":50,
    #         "prev_flag":True,
            "sample_distance": int((presentation_time+pause_time)*200),
    }

    argument_string = "presentation_time: "+ str(presentation_time)+ "\n pause_time: "+ str(pause_time)+ "\n input_neurons_args: " + str(input_neurons_args)+ " \n layer_1_neuron_args: " + str(layer_1_neurons_args)+"\n Lateral Inhibition parameters: " + str(lateral_inhib_args) + "\n learning parameters: " + str(learning_args)+ "\n g_max: "+ str(g_max) 

    images = image_train_filtered
    labels = label_train_filtered


    model = nengo.Network("My network")
    #############################
    # Model construction
    #############################
    with model:

        picture = nengo.Node(nengo.processes.PresentInput(images, presentation_time))
        true_label = nengo.Node(nengo.processes.PresentInput(labels, presentation_time))
        # picture = nengo.Node(PresentInputWithPause(images, presentation_time, pause_time,0))
        # true_label = nengo.Node(PresentInputWithPause(labels, presentation_time, pause_time,-1))

        # input layer  
        input_layer = nengo.Ensemble(**input_neurons_args)
        input_conn = nengo.Connection(picture,input_layer.neurons,synapse=None)

        #first layer
        layer1 = nengo.Ensemble(**layer_1_neurons_args)

        #Weights between input layer and layer 1
        w = nengo.Node(CustomRule_post_v2(**learning_args), size_in=n_in, size_out=n_neurons)
        nengo.Connection(input_layer.neurons, w, synapse=None)
        nengo.Connection(w, layer1.neurons, synapse=None)

        #Lateral inhibition
        # inhib = nengo.Connection(layer1.neurons,layer1.neurons,**lateral_inhib_args) 

        #Probes
        # p_true_label = nengo.Probe(true_label, sample_every=probe_sample_rate)
        p_input_layer = nengo.Probe(input_layer.neurons, sample_every=probe_sample_rate)
        p_layer_1 = nengo.Probe(layer1.neurons, sample_every=probe_sample_rate)
        weights = w.output.history

        


    with nengo.Simulator(model, dt=0.005) as sim :   
    # with nengo_spinnaker.Simulator(model) as sim:

        
        w.output.set_signal_vmem(sim.signals[sim.model.sig[input_layer.neurons]["voltage"]])
        w.output.set_signal_out(sim.signals[sim.model.sig[layer1.neurons]["out"]])
        
        
        sim.run((presentation_time+pause_time) * labels.shape[0]*iterations)

    

    #save the model
    # now = time.strftime("%Y%m%d-%H%M%S")
    # folder = os.getcwd()+"/MNIST_VDSP"+now
    # os.mkdir(folder)
    last_weight = weights[-1]

    # pickle.dump(weights, open( folder+"/trained_weights", "wb" ))
    # pickle.dump(argument_string, open( folder+"/arguments", "wb" ))

    sim.close()

    return weights
示例#9
0
def go(NPre=100, N=100, t=10, c=None, seed=0, dt=0.001, Tff=0.3, tTrans=0.01,
        stage=None, alpha=3e-7, eMax=1e-1,
        fPre=DoubleExp(1e-3, 1e-1), fNMDA=DoubleExp(10.6e-3, 285e-3), fS=DoubleExp(1e-3, 1e-1),
        dPreA=None, dPreB=None, dPreC=None, dFdfw=None, dBio=None, dNeg=None, dInh=None,
        ePreA=None, ePreB=None, ePreC=None, eFdfw=None, eBio=None, eNeg=None, eInh=None,
        stimA=lambda t: 0, stimB=lambda t: 0, stimC=lambda t: 0, DA=lambda t: 0):

    if not c: c = t
    with nengo.Network(seed=seed) as model:
        inptA = nengo.Node(stimA)
        inptB = nengo.Node(stimB)
        inptC = nengo.Node(stimC)
        preA = nengo.Ensemble(NPre, 1, max_rates=Uniform(30, 30), seed=seed)
        preB = nengo.Ensemble(NPre, 1, max_rates=Uniform(30, 30), seed=seed)
        preC = nengo.Ensemble(NPre, 1, max_rates=Uniform(30, 30), seed=seed)
        fdfw = nengo.Ensemble(N, 1, neuron_type=Bio("Pyramidal", DA=DA), seed=seed)
        ens = nengo.Ensemble(N, 1, neuron_type=Bio("Pyramidal", DA=DA), seed=seed+1)
        inh = nengo.Ensemble(N, 1, neuron_type=Bio("Interneuron", DA=DA), seed=seed+2)
        tarFdfw = nengo.Ensemble(N, 1, max_rates=Uniform(30, 30), intercepts=Uniform(-0.8, 0.8), neuron_type=nengo.LIF(), seed=seed)
        tarEns = nengo.Ensemble(N, 1, max_rates=Uniform(30, 30), intercepts=Uniform(-0.8, 0.8), neuron_type=nengo.LIF(), seed=seed+1)
        tarInh = nengo.Ensemble(N, 1, max_rates=Uniform(30, 30), intercepts=Uniform(0.2, 0.8), encoders=Choice([[1]]), neuron_type=nengo.LIF(), seed=seed+2)
        cA = nengo.Connection(inptA, preA, synapse=None, seed=seed)
        cB = nengo.Connection(inptB, preB, synapse=None, seed=seed)
        cC = nengo.Connection(inptC, preC, synapse=None, seed=seed)
        pInptA = nengo.Probe(inptA, synapse=None)
        pInptB = nengo.Probe(inptB, synapse=None)
        pInptC = nengo.Probe(inptC, synapse=None)
        pPreA = nengo.Probe(preA.neurons, synapse=None)
        pPreB = nengo.Probe(preB.neurons, synapse=None)
        pPreC = nengo.Probe(preC.neurons, synapse=None)
        pFdfw = nengo.Probe(fdfw.neurons, synapse=None)
        pTarFdfw = nengo.Probe(tarFdfw.neurons, synapse=None)
        pEns = nengo.Probe(ens.neurons, synapse=None)
        pTarEns = nengo.Probe(tarEns.neurons, synapse=None)
        pInh = nengo.Probe(inh.neurons, synapse=None)
        pTarInh = nengo.Probe(tarInh.neurons, synapse=None)
        if stage==1:
            nengo.Connection(inptA, tarFdfw, synapse=fPre, seed=seed)
            nengo.Connection(inptB, tarEns, synapse=fPre, seed=seed)
            nengo.Connection(inptC, tarInh, synapse=fPre, seed=seed)
            c1 = nengo.Connection(preA, fdfw, synapse=fPre, solver=NoSolver(dPreA), seed=seed)
            c2 = nengo.Connection(preB, ens, synapse=fPre, solver=NoSolver(dPreB), seed=seed)
            c3 = nengo.Connection(preC, inh, synapse=fPre, solver=NoSolver(dPreC), seed=seed)
            learnEncoders(c1, tarFdfw, fS, alpha=alpha, eMax=eMax, tTrans=tTrans)
            learnEncoders(c2, tarEns, fS, alpha=3*alpha, eMax=10*eMax, tTrans=tTrans)
            learnEncoders(c3, tarInh, fS, alpha=alpha/3, eMax=eMax, tTrans=tTrans)
        if stage==2:
            c1 = nengo.Connection(preA, fdfw, synapse=fPre, solver=NoSolver(dPreA), seed=seed)
            c2 = nengo.Connection(preC, inh, synapse=fPre, solver=NoSolver(dPreC), seed=seed)
        if stage==3:
            cB.synapse = fNMDA
            nengo.Connection(inptA, tarFdfw, synapse=fPre, seed=seed)
            nengo.Connection(inptB, tarEns, synapse=fPre, seed=seed)
            nengo.Connection(tarFdfw, tarEns, synapse=fNMDA, transform=Tff, seed=seed)
            c1 = nengo.Connection(preA, fdfw, synapse=fPre, solver=NoSolver(dPreA), seed=seed)
            c2 = nengo.Connection(preB, ens, synapse=fPre, solver=NoSolver(dPreB), seed=seed)
            c3 = nengo.Connection(fdfw, ens, synapse=NMDA(), solver=NoSolver(dFdfw), seed=seed)
            learnEncoders(c3, tarEns, fS, alpha=alpha, eMax=eMax, tTrans=tTrans)
        if stage==4:
            cB.synapse = fNMDA
            c1 = nengo.Connection(preA, fdfw, synapse=fPre, solver=NoSolver(dPreA), seed=seed)
            c2 = nengo.Connection(preB, ens, synapse=fPre, solver=NoSolver(dPreB), seed=seed)
            c3 = nengo.Connection(fdfw, ens, synapse=NMDA(), solver=NoSolver(dFdfw), seed=seed)
        if stage==5:
            preB2 = nengo.Ensemble(NPre, 1, max_rates=Uniform(30, 30), seed=seed)
            ens2 = nengo.Ensemble(N, 1, neuron_type=Bio("Pyramidal", DA=DA), seed=seed+1)
            ens3 = nengo.Ensemble(N, 1, neuron_type=Bio("Pyramidal", DA=DA), seed=seed+1)
            nengo.Connection(inptB, preB2, synapse=fNMDA, seed=seed)
            c1 = nengo.Connection(preA, fdfw, synapse=fPre, solver=NoSolver(dPreA), seed=seed)
            c2 = nengo.Connection(fdfw, ens, synapse=NMDA(), solver=NoSolver(dFdfw), seed=seed)
            c3 = nengo.Connection(preB, ens2, synapse=fPre, solver=NoSolver(dPreB), seed=seed)
            c4 = nengo.Connection(ens2, ens, synapse=NMDA(), solver=NoSolver(dBio), seed=seed)
            c5 = nengo.Connection(fdfw, ens3, synapse=NMDA(), solver=NoSolver(dFdfw), seed=seed)
            c6 = nengo.Connection(preB2, ens3, synapse=fPre, solver=NoSolver(dPreB), seed=seed)
            learnEncoders(c4, ens3, fS, alpha=alpha, eMax=eMax, tTrans=tTrans)
            pTarEns = nengo.Probe(ens3.neurons, synapse=None)
        if stage==6:
            preA2 = nengo.Ensemble(NPre, 1, max_rates=Uniform(30, 30), seed=seed)
            fdfw2 = nengo.Ensemble(N, 1, neuron_type=Bio("Pyramidal", DA=DA), seed=seed)
            fdfw3 = nengo.Ensemble(N, 1, neuron_type=Bio("Pyramidal", DA=DA), seed=seed)
            fdfw4 = nengo.Ensemble(N, 1, neuron_type=Bio("Pyramidal", DA=DA), seed=seed)
            tarFdfw4 = nengo.Ensemble(N, 1, max_rates=Uniform(30, 30), intercepts=Uniform(-0.8, 0.8), neuron_type=nengo.LIF(), seed=seed)
            nengo.Connection(inptA, tarFdfw4, synapse=fPre, seed=seed)
            nengo.Connection(inptB, preA2, synapse=fNMDA, seed=seed)
            nengo.Connection(inptC, tarInh, synapse=fPre, seed=seed)
            nengo.Connection(tarInh, tarFdfw4.neurons, synapse=None, transform=-1e2*np.ones((N, 1)), seed=seed)
            c1 = nengo.Connection(preB, ens, synapse=fPre, solver=NoSolver(dPreB), seed=seed)
            c2 = nengo.Connection(ens, fdfw2, synapse=NMDA(), solver=NoSolver(dNeg), seed=seed)
            c3 = nengo.Connection(preA2, fdfw3, synapse=fPre, solver=NoSolver(dPreA), seed=seed)
            c4 = nengo.Connection(preA, fdfw4, synapse=fPre, solver=NoSolver(dPreA), seed=seed)
            c5 = nengo.Connection(preC, inh, synapse=fPre, solver=NoSolver(dPreC), seed=seed)
            c6 = nengo.Connection(inh, fdfw4, synapse=GABA(), solver=NoSolver(dInh), seed=seed)
            learnEncoders(c2, fdfw3, fS, alpha=alpha, eMax=eMax, tTrans=tTrans)
            learnEncoders(c6, tarFdfw4, fS, alpha=1e3*alpha, eMax=1e3*eMax, tTrans=tTrans, inh=True)
            pFdfw2 = nengo.Probe(fdfw2.neurons, synapse=None)
            pFdfw4 = nengo.Probe(fdfw4.neurons, synapse=None)
            pTarFdfw2 = nengo.Probe(fdfw3.neurons, synapse=None)
            pTarFdfw4 = nengo.Probe(tarFdfw4.neurons, synapse=None)
        if stage==7:
            c1 = nengo.Connection(preA, fdfw, synapse=fPre, solver=NoSolver(dPreA), seed=seed)
            c2 = nengo.Connection(fdfw, ens, synapse=NMDA(), solver=NoSolver(dFdfw), seed=seed)
            c3 = nengo.Connection(ens, ens, synapse=NMDA(), solver=NoSolver(dBio), seed=seed)
            c4 = nengo.Connection(ens, fdfw, synapse=NMDA(), solver=NoSolver(dNeg), seed=seed)
            c5 = nengo.Connection(preC, inh, synapse=fPre, solver=NoSolver(dPreC), seed=seed)
            c6 = nengo.Connection(inh, fdfw, synapse=GABA(), solver=NoSolver(dInh), seed=seed)

    with nengo.Simulator(model, seed=seed, dt=dt, progress_bar=False) as sim:
        if stage==1:
            setWeights(c1, dPreA, ePreA)
            setWeights(c2, dPreB, ePreB)
            setWeights(c3, dPreC, ePreC)
        if stage==2:
            setWeights(c1, dPreA, ePreA)
            setWeights(c2, dPreC, ePreC)
        if stage==3:
            setWeights(c1, dPreA, ePreA)
            setWeights(c2, dPreB, ePreB)
            setWeights(c3, dFdfw, eFdfw)
        if stage==4:
            setWeights(c1, dPreA, ePreA)
            setWeights(c2, dPreB, ePreB)
            setWeights(c3, dFdfw, eFdfw)
        if stage==5:
            setWeights(c1, dPreA, ePreA)
            setWeights(c2, dFdfw, eFdfw)
            setWeights(c3, dPreB, ePreB)
            setWeights(c4, dBio, eBio)
            setWeights(c5, dFdfw, eFdfw)
            setWeights(c6, dPreB, ePreB)
        if stage==6:
            setWeights(c1, dPreB, ePreB)
            setWeights(c2, dNeg, eNeg)
            setWeights(c3, dPreA, ePreA)
            setWeights(c4, dPreA, ePreA)
            setWeights(c5, dPreC, ePreC)
            setWeights(c6, dInh, eInh)
        if stage==7:
            setWeights(c1, dPreA, ePreA)
            setWeights(c2, dFdfw, eFdfw)
            setWeights(c3, dBio, eBio)
            setWeights(c4, dNeg, eNeg)
            setWeights(c5, dPreC, ePreC)
            setWeights(c6, dInh, eInh)
        neuron.h.init()
        sim.run(t, progress_bar=True)
        reset_neuron(sim, model) 

    ePreA = c1.e if stage==1 else ePreA
    ePreB = c2.e if stage==1 else ePreB
    ePreC = c3.e if stage==1 else ePreC
    eFdfw = c3.e if stage==3 else eFdfw
    eBio = c4.e if stage==5 else eBio
    eNeg = c2.e if stage==6 else eNeg
    eInh = c6.e if stage==6 else eInh

    return dict(
        times=sim.trange(),
        inptA=sim.data[pInptA],
        inptB=sim.data[pInptB],
        inptC=sim.data[pInptC],
        preA=sim.data[pPreA],
        preB=sim.data[pPreB],
        preC=sim.data[pPreC],
        fdfw=sim.data[pFdfw],
        ens=sim.data[pEns],
        inh=sim.data[pInh],
        tarFdfw=sim.data[pTarFdfw],
        tarEns=sim.data[pTarEns],
        tarInh=sim.data[pTarInh],
        fdfw2=sim.data[pFdfw2] if stage==6 else None,
        fdfw4=sim.data[pFdfw4] if stage==6 else None,
        tarFdfw2=sim.data[pTarFdfw2] if stage==6 else None,
        tarFdfw4=sim.data[pTarFdfw4] if stage==6 else None,
        ePreA=ePreA,
        ePreB=ePreB,
        ePreC=ePreC,
        eFdfw=eFdfw,
        eBio=eBio,
        eNeg=eNeg,
        eInh=eInh,
    )
示例#10
0
            f"\n\tEncoders: {pre.encoders}\n\tIntercepts: {pre.intercepts}\n\tMax rates: {pre.max_rates}" )
    f.write(
            f"Post:\n\t {post.neuron_type} \n\tNeurons: {post.n_neurons}\n\tGain: {post.gain} \n\tBias: {post.bias} "
            f"\n\tEncoders: {post.encoders}\n\tIntercepts: {post.intercepts}\n\tMax rates: {post.max_rates}" )
    f.write( f"Rule:\n\t {conn.learning_rule_type}\n" )

if args.level >= 1:
    print( "######################################################",
           "####################### TRAINING #####################",
           "######################################################",
           sep="\n" )
    
    print( f"Backend is {args.backend}, running on ", end="" )
    if args.backend == "nengo_core":
        print( "CPU" )
        with nengo.Simulator( model, seed=args.seed ) as sim_train:
            sim_train.run( sim_train_time )
    if args.backend == "nengo_dl":
        print( args.device )
        with nengo_dl.Simulator( model, seed=args.seed, device=args.device ) as sim_train:
            sim_train.run( sim_train_time )
    
    # print number of recorded spikes
    num_spikes_train = np.sum( sim_train.data[ post_probe ] > 0, axis=0 )
    for line in graph.graph( f"Spikes distribution (timestep={dt}):",
                             [ (str( i ), x / np.sum( num_spikes_train ) * 100) for i, x in
                               enumerate( num_spikes_train ) ] ):
        print( line )
    print( "\tTotal:", np.sum( num_spikes_train ) )
    print( f"\tNormalised standard dev.: {np.std( num_spikes_train ) / np.mean( num_spikes_train )}" )
    
示例#11
0
        nengo.Connection(dummy_ctx, cortex1, function=project)

        # prior is sent to the node 'cortex_in' to be processed
        # and converted to the posterior.
        nengo.Connection(cortex1, cortex_in)

        ### ------------------------------------------------------------------------------------------------------- ###
        # Probes

        # wta
        wta_doutp = nengo.Probe(wta.output, synapse=0.02)

        # ctx
        cortex1_p = nengo.Probe(cortex1, synapse=0.02)

    sim = nengo.Simulator(model, dt=dt)  # Create the simulator
    sim.run(training_size * 3)  # Run it for 1 second

    # Collect data and write it to a pickle file
    data_out = {}
    data_out[index] = [
        sim.data[wta_doutp], sim.data[cortex1_p], space._basis, space._scale
    ]

    pickleout = open("data_out/" + fname_output + "_" + str(index) + ".p",
                     'wb')
    pickle.dump(data_out, pickleout)
    pickleout.close()
    print("pickle complete for iteration: ", index, "in ", fname_output)
# ------------------------------------------------------------------------------------------------------- #
示例#12
0
def run_experiment(sacc, t):    

    sim = nengo.Simulator(model, dt = 0.001, builder=nengo.builder.Builder(copy=False))
    sim.run(t)

    return sim.data(MN_hr_p), sim.data(MN_vr_p)
示例#13
0
    def evaluate(self, p, plt):
        t_tone_start = 0.0
        t_tone_end = t_tone_start + p.t_tone
        t_puff_start = t_tone_end + p.t_delay
        t_puff_end = t_puff_start + p.t_puff

        def puff_func(t):
            if t_puff_start < t % p.period < t_puff_end:
                return 1
            else:
                return 0

        def tone_func(t):
            if t_tone_start < t % p.period < t_tone_end:
                return 1
            else:
                return 0

        model = nengo.Network()
        with model:

            ###########################################################################
            # Setup the conditioned stimulus (i.e., a tone) and the unconditioned     #
            # stimulus (i.e., a puff)                                                 #
            ###########################################################################
            nd_tone = nengo.Node(tone_func)
            nd_puff = nengo.Node(puff_func)

            ###########################################################################
            # Setup the reflex generator and the eye-motor system                     #
            ###########################################################################

            # The reflex pathway is across the Trigeminal nucleus in the brainstem;
            # we don't model this in this particular model

            # Scaling factor that has to be applied to the reflex trajectory to scale
            # it to a range from 0 to 1
            reflex_scale = 1.0 / 25.0

            # The reflex system takes an input and produces the reflex trajectory on
            # the rising edge (convolves the differential of the input with the
            # trajectory)
            nd_reflex = nengo.Node(EyeblinkReflex())  # Unscaled output
            nd_reflex_out = nengo.Node(size_in=1)  # Normalised output
            nengo.Connection(nd_reflex[0],
                             nd_reflex_out,
                             transform=reflex_scale,
                             synapse=None)

            if not p.do_minimal:
                # The eyelid component represents the state of the eye in the world.
                # It receives two inputs, an agonist input (closing the eye, dim 0) and an
                # antagonist input (opening the eye, dim 1).
                nd_eyelid = nengo.Node(Eyelid())  # Unscaled input
                eyelid_in = nengo.Ensemble(n_neurons=100, dimensions=2)

                nengo.Connection(eyelid_in,
                                 nd_eyelid[0],
                                 transform=1.0 / reflex_scale,
                                 function=lambda x: max(x[0], x[1]),
                                 synapse=0.005)

                # Constantly open the eye a little bit
                nd_eye_bias = nengo.Node(p.eye_bias)
                nengo.Connection(nd_eye_bias, nd_eyelid[1])

                # We can't detect the puff if the eye is closed, multiply the output from
                # nd_puff with the amount the eye is opened. This is our unconditioned
                # stimulus
                # NOTE: Currently disabled by commenting out the line below
                c0, c1 = nengo.Node(size_in=1), nengo.Node(
                    size_in=1)  # Only for GUI
                nengo.Connection(nd_eyelid, c0, synapse=None)
                nengo.Connection(c0, c1, synapse=None)
            #    nengo.Connection(c1, nd_us[1], synapse=None)

            # Connect the unconditioned stimulus to the reflex generator
            nd_us = nengo.Node(lambda t, x: x[0] * (1 - x[1]),
                               size_in=2,
                               size_out=1)
            nengo.Connection(nd_puff, nd_us[0], synapse=None)
            nengo.Connection(nd_us, nd_reflex)
            if not p.do_minimal:
                nengo.Connection(nd_reflex_out, eyelid_in[0])

            ###########################################################################
            # Generate a neural representation of the conditioned stimulus            #
            ###########################################################################

            nd_cs = nengo.Node(size_in=1)
            ens_pcn = nengo.Ensemble(n_neurons=100, dimensions=1)
            nengo.Connection(nd_tone, nd_cs, synapse=None)
            nengo.Connection(nd_cs, ens_pcn)

            ###########################################################################
            # Generate a LMU representation of the conditioned stimulus               #
            ###########################################################################

            # Build the LMU, feed the conditioned stimulus into it
            net_granule_golgi = GranuleGolgiCircuit(
                ens_pcn,
                tau=p.tau,
                q=p.q,
                theta=p.theta,
                n_granule=p.n_granule,
                n_golgi=p.n_granule // 10,
                golgi_intercepts=nengo.dists.CosineSimilarity(p.q + 2)
                if p.use_cosine else nengo.dists.Uniform(-1, 1),
                granule_intercepts=nengo.dists.CosineSimilarity(p.q + 2)
                if p.use_cosine else nengo.dists.Uniform(-1, 1),
                mode=p.mode,
            )

            ###########################################################################
            # Learn the connection from the Granule cells to the Purkinje cells via   #
            # input from the Interior Olive                                           #
            ###########################################################################

            # This is the US pathway; the data is relayed from the Trigeminal nucleus
            # to the Interior Olive.

            ens_cn = nengo.Ensemble(n_neurons=100, dimensions=1)
            ens_io = nengo.Ensemble(n_neurons=100, dimensions=1)
            ens_purkinje = nengo.Ensemble(n_neurons=100, dimensions=1)

            # Represent the error signal in ens_io
            nengo.Connection(nd_reflex_out[0], ens_io, transform=-1)
            nengo.Connection(
                ens_cn, ens_io, transform=1,
                synapse=p.tau_error)  # This connection does not exist

            # Project from the
            c_learn = nengo.Connection(
                net_granule_golgi.ens_granule.neurons,
                ens_purkinje,
                transform=np.zeros((ens_purkinje.dimensions,
                                    net_granule_golgi.ens_granule.n_neurons)),
                learning_rule_type=nengo.learning_rules.PES(
                    learning_rate=p.learning_rate, pre_synapse=p.tau_pre))
            nengo.Connection(ens_io, c_learn.learning_rule)

            ###########################################################################
            # Project from CN onto the motor system
            ###########################################################################

            nengo.Connection(ens_purkinje, ens_cn)
            if not p.do_minimal:
                nengo.Connection(ens_cn, eyelid_in[1])

            p_nd_reflex_out = nengo.Probe(nd_reflex_out,
                                          sample_every=p.sample_every)
            if not p.do_minimal:
                p_eyelid = nengo.Probe(nd_eyelid, sample_every=p.sample_every)
            p_purkinje = nengo.Probe(ens_purkinje,
                                     synapse=p.tau_purkinje,
                                     sample_every=p.sample_every)
            p_granule = nengo.Probe(net_granule_golgi.ens_granule,
                                    synapse=0.03,
                                    sample_every=p.sample_every)

        add_labels(model, locals=locals())

        sim = nengo.Simulator(model)
        with sim:
            sim.run(p.period * p.n_trials)

        dt = p.sample_every
        steps = int(p.period / dt)

        purk = sim.data[p_purkinje].reshape(-1, steps).T
        v = np.clip(purk[:, :], 0, np.inf) * dt / reflex_scale
        pos = np.cumsum(v, axis=0)

        if plt:
            t = np.arange(steps) * dt

            ax1 = plt.subplot(4, 1, 1)
            ax1.set_ylabel('granule')
            ax2 = plt.subplot(4, 1, 2)
            ax2.set_ylabel('purkinje')
            ax3 = plt.subplot(4, 1, 3)
            ax3.set_ylabel('eye position\n(due to reflex)')
            ax4 = plt.subplot(4, 1, 4)
            ax4.set_ylabel('eye position\n at puff start')
            ax4.set_xlabel('trial')

            n_steps = len(sim.data[p_purkinje])
            cmap = matplotlib.cm.get_cmap("viridis")
            for i in range(0, n_steps, steps):
                color = cmap(i / n_steps)
                #if not p.do_minimal:
                #    ax1.plot(t, sim.data[p_eyelid][i:i+steps], label='eyelid %d'%(i//steps), ls='--')
                ax2.plot(t, sim.data[p_purkinje][i:i + steps], color=color)
                ax3.plot(t,
                         np.cumsum(np.abs(sim.data[p_purkinje][i:i + steps])) *
                         dt / reflex_scale,
                         color=color)
            ax1.plot(t, sim.data[p_granule][:steps])
            ax2b = ax2.twinx()
            ax2b.plot(t, sim.data[p_nd_reflex_out][:steps], c='k', ls='--')

            ax4.plot(pos[int(t_puff_start / dt)])

        r = dict(
            final_pos=pos[int(t_puff_start / dt), -1],
            pos_at_puff_start=pos[int(t_puff_start / dt)],
        )
        if p.save_plot_data:
            r['purkinje'] = sim.data[p_purkinje]
            r['granule'] = sim.data[p_granule][:steps]
            r['reflex'] = sim.data[p_nd_reflex_out][:steps]
        return r
        p_exc_rates = nengo.Probe(srf_network.exc.neurons)
        p_inh_rates = nengo.Probe(srf_network.inh.neurons)
        p_inh_weights = nengo.Probe(srf_network.conn_I, 'weights', sample_every=1.0)
        p_exc_weights = nengo.Probe(srf_network.conn_E, 'weights', sample_every=1.0)
        if srf_network.conn_EE is not None:
            p_rec_weights = nengo.Probe(srf_network.conn_EE, 'weights', sample_every=1.0)

    p_weights_PV_E = nengo.Probe(conn_PV_E, 'weights', sample_every=1.0)
    p_weights_PV_I = nengo.Probe(conn_PV_I, 'weights', sample_every=1.0)
    p_weights_decoder_I = nengo.Probe(conn_decoder_I, 'weights', sample_every=1.0)
    p_decoder_spikes = nengo.Probe(decoder.neurons, synapse=None)
    p_decoder_inh_rates = nengo.Probe(decoder_inh.neurons, synapse=None)
    p_cd = nengo.Probe(coincidence_detection, synapse=None, sample_every=0.1)

    
with nengo.Simulator(model, optimize=True, progress_bar=TerminalProgressBar()) as sim:
    sim.run(np.max(t_end))


output_spikes = sim.data[p_output_spikes]
exc_rates = sim.data[p_exc_rates]
inh_rates = sim.data[p_inh_rates] 
decoder_spikes = sim.data[p_decoder_spikes]
decoder_inh_rates = sim.data[p_decoder_inh_rates]
output_rates = rates_kernel(sim.trange(), output_spikes, tau=0.1)
decoder_rates = rates_kernel(sim.trange(), decoder_spikes, tau=0.1)

exc_weights = sim.data[p_exc_weights]
#inh_weights = sim.data[p_inh_weights] 
np.save("output_spikes_dof.npy", output_spikes)
np.save("output_rates_dof.npy", output_rates)
示例#15
0
def test_conv_connection(channels, channels_last, Simulator, seed, rng, plt,
                         allclose):
    if channels_last:
        plt.saveas = None
        pytest.xfail("Blocked by CxBase cannot be > 256 bug")

    # load data
    with open(os.path.join(test_dir, 'mnist10.pkl'), 'rb') as f:
        test10 = pickle.load(f)

    test_x = test10[0][0].reshape(28, 28)
    test_x = 1.999 * test_x - 0.999  # range (-1, 1)
    input_shape = nengo_transforms.ChannelShape(
        (test_x.shape + (channels, )) if channels_last else
        ((channels, ) + test_x.shape),
        channels_last=channels_last)

    filters = Gabor(freq=Uniform(0.5, 1)).generate(8, (7, 7), rng=rng)
    filters = filters[None, :, :, :]  # single channel
    filters = np.transpose(filters, (2, 3, 0, 1))
    strides = (2, 2)
    tau_rc = 0.02
    tau_ref = 0.002
    tau_s = 0.005
    dt = 0.001

    neuron_type = LoihiLIF(tau_rc=tau_rc, tau_ref=tau_ref)

    pres_time = 0.1

    with nengo.Network(seed=seed) as model:
        nengo_loihi.add_params(model)

        u = nengo.Node(test_x.ravel(), label="u")

        a = nengo.Ensemble(input_shape.size,
                           1,
                           neuron_type=LoihiSpikingRectifiedLinear(),
                           max_rates=nengo.dists.Choice([40 / channels]),
                           intercepts=nengo.dists.Choice([0]),
                           label='a')
        model.config[a].on_chip = False

        if channels == 1:
            nengo.Connection(u, a.neurons, transform=1, synapse=None)
        elif channels == 2:
            # encode image into spikes using two channels (on/off)
            if input_shape.channels_last:
                nengo.Connection(u, a.neurons[0::2], transform=1, synapse=None)
                nengo.Connection(u,
                                 a.neurons[1::2],
                                 transform=-1,
                                 synapse=None)
            else:
                k = input_shape.spatial_shape[0] * input_shape.spatial_shape[1]
                nengo.Connection(u, a.neurons[:k], transform=1, synapse=None)
                nengo.Connection(u, a.neurons[k:], transform=-1, synapse=None)

            filters = np.concatenate([filters, -filters], axis=2)
        else:
            raise ValueError("Test not configured for more than two channels")

        conv2d_transform = nengo_transforms.Convolution(
            8,
            input_shape,
            strides=strides,
            kernel_size=(7, 7),
            channels_last=channels_last,
            init=filters)

        output_shape = conv2d_transform.output_shape

        gain, bias = neuron_type.gain_bias(max_rates=100, intercepts=0)
        gain = gain * 0.01  # account for `a` max_rates
        b = nengo.Ensemble(output_shape.size,
                           1,
                           neuron_type=neuron_type,
                           gain=nengo.dists.Choice([gain[0]]),
                           bias=nengo.dists.Choice([bias[0]]),
                           label='b')
        nengo.Connection(a.neurons,
                         b.neurons,
                         synapse=tau_s,
                         transform=conv2d_transform)

        bp = nengo.Probe(b.neurons)

    with nengo.Simulator(model, dt=dt, optimize=False) as sim:
        sim.run(pres_time)
    ref_out = sim.data[bp].mean(axis=0).reshape(output_shape.shape)

    # Currently, non-gpu TensorFlow does not support channels first in conv
    use_nengo_dl = HAS_DL and channels_last
    ndl_out = np.zeros_like(ref_out)
    if use_nengo_dl:
        with nengo_dl.Simulator(model, dt=dt) as sim_dl:
            sim_dl.run(pres_time)
        ndl_out = sim_dl.data[bp].mean(axis=0).reshape(output_shape.shape)

    with nengo_loihi.Simulator(model, dt=dt, target='simreal') as sim_real:
        sim_real.run(pres_time)
    real_out = sim_real.data[bp].mean(axis=0).reshape(output_shape.shape)

    with Simulator(model, dt=dt) as sim_loihi:
        if "loihi" in sim_loihi.sims:
            sim_loihi.sims["loihi"].snip_max_spikes_per_step = 800
        sim_loihi.run(pres_time)
    sim_out = sim_loihi.data[bp].mean(axis=0).reshape(output_shape.shape)

    if not output_shape.channels_last:
        ref_out = np.transpose(ref_out, (1, 2, 0))
        ndl_out = np.transpose(ndl_out, (1, 2, 0))
        real_out = np.transpose(real_out, (1, 2, 0))
        sim_out = np.transpose(sim_out, (1, 2, 0))

    out_max = max(ref_out.max(), sim_out.max())

    # --- plot results
    rows = 2
    cols = 3

    ax = plt.subplot(rows, cols, 1)
    imshow(test_x, vmin=0, vmax=1, ax=ax)

    ax = plt.subplot(rows, cols, 2)
    tile(np.transpose(filters[0], (2, 0, 1)), cols=8, ax=ax)

    ax = plt.subplot(rows, cols, 3)
    plt.hist(ref_out.ravel(), bins=31)
    plt.hist(sim_out.ravel(), bins=31)

    ax = plt.subplot(rows, cols, 4)
    tile(np.transpose(ref_out, (2, 0, 1)), vmin=0, vmax=out_max, cols=8, ax=ax)

    ax = plt.subplot(rows, cols, 5)
    tile(np.transpose(ndl_out, (2, 0, 1)), vmin=0, vmax=out_max, cols=8, ax=ax)

    ax = plt.subplot(rows, cols, 6)
    tile(np.transpose(sim_out, (2, 0, 1)), vmin=0, vmax=out_max, cols=8, ax=ax)

    if use_nengo_dl:
        assert allclose(ndl_out, ref_out, atol=1e-5, rtol=1e-5)
    assert allclose(real_out, ref_out, atol=1, rtol=1e-3)
    assert allclose(sim_out, ref_out, atol=10, rtol=1e-3)
示例#16
0
    def __init__(self,
                 mapping,
                 threshold=0.2,
                 learning_rate=1e-4,
                 DimVocab=256,
                 subdimensions=32,
                 learned_function_scale=2.0):
        model = spa.SPA()
        self.model = model
        self.mapping = mapping
        #self.vocab_category = spa.Vocabulary(D_category)
        #self.vocab_items = spa.Vocabulary(D_items)
        self.VocabUnified = spa.Vocabulary(DimVocab)
        for k in sorted(
                mapping.keys()):  #allocating verctors for categories name
            self.VocabUnified.parse(k)
            for v in mapping[k]:  # allocating vectors to the items
                self.VocabUnified.parse(v)

        with model:
            model.cue = spa.State(DimVocab,
                                  vocab=self.VocabUnified,
                                  subdimensions=subdimensions)
            model.target = spa.State(DimVocab,
                                     vocab=self.VocabUnified,
                                     subdimensions=subdimensions)
            c = []
            n_sub = len(model.cue.state_ensembles.ea_ensembles)
            for i in range(n_sub):
                cues = []
                targets = []
                for cue, vals in mapping.items():
                    for val in vals:
                        cues.append(
                            self.VocabUnified.parse(cue).v[i *
                                                           subdimensions:(i +
                                                                          1) *
                                                           subdimensions])
                        targets.append(
                            self.VocabUnified.parse(val).v / n_sub *
                            learned_function_scale)
                        cues.append(
                            self.VocabUnified.parse(val).v[i *
                                                           subdimensions:(i +
                                                                          1) *
                                                           subdimensions])
                        targets.append(
                            self.VocabUnified.parse(cue).v / n_sub *
                            learned_function_scale)

                cc = nengo.Connection(
                    model.cue.all_ensembles[i],
                    model.target.input,
                    learning_rule_type=nengo.PES(learning_rate=learning_rate),
                    **nengo.utils.connection.target_function(cues, targets))
                cc.eval_points = cues
                cc.function = targets
                c.append(cc)

                print i

            #model.error = spa.State(D_items, vocab=self.vocab_items)
            #nengo.Connection(model.items.output, model.error.input)
            #nengo.Connection(model.error.output, c.learning_rule)

            #I am not sure how this implements the learning
            model.error = spa.State(DimVocab,
                                    vocab=self.VocabUnified,
                                    subdimensions=subdimensions)
            nengo.Connection(model.target.output, model.error.input)
            print 'the loop ended, right?'
            for cc in c:
                nengo.Connection(model.error.output, cc.learning_rule)

            self.stim_cue_value = np.zeros(DimVocab)  #?
            self.stim_cue = nengo.Node(self.stim_cue_fun)  #?
            nengo.Connection(self.stim_cue, model.cue.input, synapse=None)

            self.stim_correct_value = np.zeros(DimVocab)
            self.stim_correct = nengo.Node(self.stim_correct)
            nengo.Connection(self.stim_correct,
                             model.error.input,
                             synapse=None,
                             transform=-1)

            self.stim_stoplearn_value = np.zeros(1)
            self.stim_stoplearn = nengo.Node(self.stim_stoplearn)
            for ens in model.error.all_ensembles:
                nengo.Connection(self.stim_stoplearn,
                                 ens.neurons,
                                 synapse=None,
                                 transform=-10 * np.ones((ens.n_neurons, 1)))

            self.stim_justmemorize_value = np.zeros(1)
            self.stim_justmemorize = nengo.Node(self.stim_justmemorize)
            for ens in model.target.all_ensembles:  #?
                nengo.Connection(self.stim_justmemorize,
                                 ens.neurons,
                                 synapse=None,
                                 transform=-10 * np.ones((ens.n_neurons, 1)))

            self.probe_target = nengo.Probe(model.target.output, synapse=0.01)

        if __name__ != '__builtin__':
            self.sim = nengo.Simulator(self.model)
示例#17
0
def test_conv_input(channels_last, Simulator, plt, allclose):
    input_shape = nengo_transforms.ChannelShape((4, 4, 1) if channels_last else
                                                (1, 4, 4),
                                                channels_last=channels_last)
    seed = 3  # fix seed to do the same computation for both channel positions
    rng = np.random.RandomState(seed + 1)

    with nengo.Network(seed=seed) as net:
        nengo_loihi.add_params(net)

        a = nengo.Node(rng.uniform(0, 1, size=input_shape.size))

        nc = 2
        kernel = np.array([1., -1.]).reshape((1, 1, 1, nc))
        transform = nengo_transforms.Convolution(nc,
                                                 input_shape,
                                                 channels_last=channels_last,
                                                 init=kernel,
                                                 kernel_size=(1, 1))
        b = nengo.Ensemble(transform.output_shape.size,
                           1,
                           neuron_type=nengo.SpikingRectifiedLinear(),
                           max_rates=nengo.dists.Choice([50]),
                           intercepts=nengo.dists.Choice([0]))
        net.config[b].on_chip = False
        nengo.Connection(a, b.neurons, transform=transform)
        output_shape = transform.output_shape

        nf = 4
        kernel = rng.uniform(-0.005, 0.005, size=(3, 3, nc, nf))
        transform = nengo_transforms.Convolution(nf,
                                                 output_shape,
                                                 channels_last=channels_last,
                                                 init=-kernel,
                                                 kernel_size=(3, 3))
        c = nengo.Ensemble(transform.output_shape.size,
                           1,
                           neuron_type=nengo.LIF(),
                           max_rates=nengo.dists.Choice([100]),
                           intercepts=nengo.dists.Choice([0]))
        nengo.Connection(b.neurons, c.neurons, transform=transform)
        output_shape = transform.output_shape

        p = nengo.Probe(c.neurons)

    with nengo.Simulator(net, optimize=False) as sim:
        sim.run(1.0)

    with Simulator(net, seed=seed) as sim_loihi:
        sim_loihi.run(1.0)

    p0 = np.sum(sim.data[p] > 0, axis=0).reshape(output_shape.shape)
    p1 = np.sum(sim_loihi.data[p] > 0, axis=0).reshape(output_shape.shape)
    if not channels_last:
        p0 = np.transpose(p0, (1, 2, 0))
        p1 = np.transpose(p1, (1, 2, 0))

    plt.plot(p0.ravel(), 'k')
    plt.plot(p1.ravel(), 'b--')

    # loihi spikes are not exactly the same, but should be close-ish
    assert allclose(p0, p1, rtol=0.15, atol=1)
示例#18
0
    def run_model(self):
        res = ProductionResult()
        rng = np.random.RandomState(self.seed)
        dt = self.model.trial.dt

        paths, freqs = analysis.get_syllables(self.n_syllables, self.minfreq,
                                              self.maxfreq, rng)

        t = 0.8  # 0.2 for initial stuff, 0.6 fudge factor
        gs_targets = []
        for path, freq in zip(paths, freqs):
            gs = vtl.parse_ges(path)
            gs_targets.append(gs)
            self.model.add_syllable(label=path2label(path),
                                    freq=freq,
                                    trajectory=gs.trajectory(dt))
            t += 1. / freq

        # Determine and save syllable sequence
        if self.model.trial.repeat:
            seq_ix = rng.randint(len(paths), size=self.sequence_len)
        else:
            seq_ix = rng.permutation(len(paths))[:self.sequence_len]
        seq = [path2label(paths[i]) for i in seq_ix]
        seq_str = " + ".join(
            ["%s*POS%d" % (label, i + 1) for i, label in enumerate(seq)])
        self.model.trial.sequence = seq_str
        res.seq = np.array(seq)

        # Use a minimum number of syllables
        self.model.sequence.n_positions = self.n_syllables

        # Save frequencies for that sequence
        res.freqs = np.array([self.model.syllables[i].freq for i in seq_ix])

        net = self.model.build(nengo.Network(seed=self.seed))
        with net:
            p_out = nengo.Probe(net.production_info.output, synapse=0.01)

        sim = nengo.Simulator(net)
        sim.run(t)

        # Get ideal trajectory; compare RMSE
        delay_frames = int(self.model.trial.t_release / dt)
        traj = ideal_traj(self.model, seq)
        res.traj = traj

        simtraj = sim.data[p_out][delay_frames:]
        simtraj = simtraj[:traj.shape[0]]
        traj = traj[:simtraj.shape[0]]
        res.simtraj = simtraj
        res.simrmse = npext.rmse(traj, simtraj)

        # Reconstruct gesture score; compare to originals
        gs = gesture_score(simtraj, dt)
        res.accuracy, res.n_sub, res.n_del, res.n_ins = (analysis.gs_accuracy(
            gs, gs_targets))
        res.timing_mean, res.timing_var = analysis.gs_timing(gs, gs_targets)
        res.cooccur, res.co_chance = analysis.gs_cooccur(gs, gs_targets)
        log("Accuracy: %.3f" % res.accuracy)

        # Get the reconstructed trajectory and audio
        reconstructed = gs.trajectory(dt=dt)
        res.reconstructed = reconstructed
        minsize = min(reconstructed.shape[0], traj.shape[0])
        res.reconstructedrmse = npext.rmse(traj[:minsize],
                                           reconstructed[:minsize])
        audio, fs = gs.synthesize()
        res.audio = audio
        res.fs = fs

        tgt_gs = analysis.gs_combine([vtl.parse_ges(paths[i]) for i in seq_ix])
        audio, _ = tgt_gs.synthesize()
        res.clean_audio = audio

        return res
示例#19
0
def run_trial(params,
              ens_seed=1337,
              train_trials_seed=1337,
              test_trials_seed=1234,
              train_trials_per_cond=5,
              test_trials_per_cond=5):
    # Create model for training
    srate = 1000
    for key in ['q', 'n_neurons']:
        params[key] = int(params[key])
    train_model, train_probes = make_lmu_dms(
        theta=params['theta'],
        q=params['q'],
        n_neurons=params['n_neurons'],
        seed=ens_seed,
        out_transform=None,
        n_trials_per_cond=train_trials_per_cond,
        trial_seed=train_trials_seed,
        tau=params['tau'])
    # Generate training data
    train_sim = nengo.Simulator(train_model)
    n_train_trials = train_trials_per_cond * 8 * 2  # 16 conditions
    train_sim.run(6 * n_train_trials)  # 6 seconds per trial
    train_sim.close()

    # Solve for weights - Data are generally too large for nengo's filt so we slice
    #  them to only evaluate specific ranges.
    eval_ranges = [(0, 0.25), (0.75, 1.75), (2.75, 3.75), (4.75, 6.0)]
    trial_tvec = np.arange(0, 6.0, 1 / srate)
    b_eval = np.zeros(trial_tvec.shape, dtype=bool)
    for t_win in eval_ranges:
        b_eval = np.logical_or(
            b_eval,
            np.logical_and(t_win[0] <= trial_tvec, trial_tvec < t_win[1]))
    b_eval = np.tile(b_eval, n_train_trials)

    filt = nengo.synapses.Lowpass(0.01)
    Y = filt.filt(train_sim.data[train_probes['ideal']][b_eval])
    A = filt.filt(train_sim.data[train_probes['ensemble']][b_eval])

    D, info = nengo.solvers.LstsqL2(solver=lss.LSMRScipy())(A, Y)
    Y = A = None

    # Create a new model with the learned weights
    test_model, test_probes = make_lmu_dms(
        theta=params['theta'],
        q=params['q'],
        n_neurons=params['n_neurons'],
        seed=ens_seed,
        out_transform=D.T,
        n_trials_per_cond=test_trials_per_cond,
        trial_seed=test_trials_seed,
        tau=params['tau'])

    # Run test simulation - break after N trials to report cumulative accuracy
    def get_labels_from_sim(sim, n_trials):
        samps = 6 * srate * n_trials
        Y = sim.data[test_probes['ideal']][-samps:]
        A = sim.data[test_probes['output']][-samps:]
        b_slice = Y != 0
        label = Y[b_slice].reshape(n_trials, -1)[:, 0]
        score = np.mean(A[b_slice].reshape(n_trials, -1), axis=-1)
        return label, score

    total_test_trials = test_trials_per_cond * 8 * 2
    test_sim = nengo.Simulator(test_model)

    test_ix = min(20, total_test_trials)
    test_sim.run(6 * test_ix)
    labels, scores = get_labels_from_sim(test_sim, test_ix)

    trial_step = 10
    while test_ix < total_test_trials:
        test_sim.run(6 * min(trial_step, total_test_trials - test_ix))
        label, score = get_labels_from_sim(test_sim, trial_step)
        labels = np.append(labels, label)
        scores = np.append(scores, score)
        fpr, tpr, thresholds = metrics.roc_curve(labels, scores)
        test_auc = metrics.auc(fpr, tpr)
        nni.report_intermediate_result(test_auc)
        test_ix += trial_step

    test_sim.close()
    logger.debug('test_sim.close() returned.')
    logger.debug('Final result is: %d', test_auc)
    nni.report_final_result(test_auc)
    logger.debug('Sent final result. Trial complete.')
示例#20
0
    def run_model(self):
        res = RecognitionResult()
        rng = np.random.RandomState(self.seed)

        paths, freqs = analysis.get_syllables(self.n_syllables, self.minfreq,
                                              self.maxfreq, rng)

        for path, freq in zip(paths, freqs):
            traj = vtl.parse_ges(path).trajectory(self.model.trial.dt)
            self.model.add_syllable(label=path2label(path),
                                    freq=freq,
                                    trajectory=traj)

        # Determine and save syllable sequence
        if self.model.trial.repeat:
            seq_ix = rng.randint(len(paths), size=self.sequence_len)
        else:
            seq_ix = rng.permutation(len(paths))[:self.sequence_len]
        seq = [path2label(paths[i]) for i in seq_ix]
        res.seq = np.array(seq)

        # Determine how long to run
        simt = 0.0
        tgt_time = []
        for label in res.seq:
            syllable = self.model.syllable_dict[label]
            simt += 1. / syllable.freq
            tgt_time.append(simt)

        # Set that sequence in the model
        traj = ideal_traj(self.model, seq)
        self.model.trial.trajectory = traj

        # Save frequencies for that sequence
        res.freqs = np.array([self.model.syllables[i].freq for i in seq_ix])

        # -- Run the model
        net = self.model.build(nengo.Network(seed=self.seed))
        with net:
            p_dmps = [
                nengo.Probe(dmp.state[0], synapse=0.01)
                for dmp in net.syllables
            ]
            p_class = nengo.Probe(net.classifier, synapse=0.01)
            p_mem = nengo.Probe(net.memory.output, synapse=0.01)

        sim = nengo.Simulator(net)
        sim.run(simt)

        # Save iDMP system states
        res.dmps = np.hstack([sim.data[p_d] for p_d in p_dmps])
        res.dmp_labels = np.array([s.label for s in self.model.syllables])

        # Save working memory similarities
        res.memory = spa.similarity(sim.data[p_mem], net.vocab, True)

        # Determine classification times and labels
        t_ix, class_ix = analysis.classinfo(sim.data[p_class], res.dmps)
        res.class_time = sim.trange()[t_ix]
        res.class_labels = np.array([path2label(paths[ix]) for ix in class_ix])

        # Calculate accuracy / timing metrics
        recinfo = [(t, l) for t, l in zip(res.class_time, res.class_labels)]
        tgtinfo = [(t, l) for t, l in zip(tgt_time, res.seq)]
        res.acc, res.n_sub, res.n_del, res.n_ins = (analysis.cl_accuracy(
            recinfo, tgtinfo))
        res.tdiff_mean, res.tdiff_var = analysis.cl_timing(recinfo, tgtinfo)
        log("Accuracy: %.3f" % res.acc)

        # Determine if memory representation is correct
        tgt_time = np.asarray(tgt_time)
        mem_times = (tgt_time[1:] + tgt_time[:-1]) * 0.5
        mem_ix = (mem_times / self.model.trial.dt).astype(int)
        mem_class = np.argmax(res.memory[mem_ix], axis=1)
        slabels = [s.label for s in self.model.syllables]
        actual = np.array([slabels.index(lbl) for lbl in res.seq[:-1]])
        res.memory_acc = np.mean(mem_class == actual)

        return res
示例#21
0
文件: nengo_arm.py 项目: vsilvar/blog
if run_in_GUI:
    # to run in GUI, comment out next 4 lines for running without GUI
    import nengo_gui
    nengo_gui.GUI(model=model, filename=__file__, locals=locals(), 
                  interactive=False, allow_file_change=False).start()
    import sys
    sys.exit()
else:  
    # to run in command line
    with model:    
        probe_input = nengo.Probe(input_node)
        probe_arm = nengo.Probe(arm_node[arm.DOF*2])
        
    print 'building model...'
    sim = nengo.Simulator(model, dt=.001)
    print 'build complete.'

    sim.run(10)

    t = sim.trange()
    x = sim.data[probe_arm]
    y = sim.data[probe_arm]

    # plot collected data
    import matplotlib.pyplot as plt

    plt.subplot(311)
    plt.plot(t, x)
    plt.xlabel('time')
    plt.ylabel('probe_arm0')
示例#22
0
    def __init__(
            self,
            n_input,  # Number of inputs (angles, velocities)
            n_output,  # Number of outputs (forces for joints)
            n_neurons=1000,  # Number of neurons / ensemble
            n_ensembles=1,  # Number of ensembles 
            seed=None,  # Seed for random number generation
            pes_learning_rate=1e-6,  # Adaptation learning rate
            means=None,  # Means and variances to scale data from -1 to 1 
            variances=None,  # Outliers will be scaled outside the -1 to 1
            **kwargs):

        self.n_neurons = n_neurons
        self.n_ensembles = n_ensembles
        self.pes_learning_rate = pes_learning_rate
        self.input_signal = np.zeros(n_input)
        self.training_signal = np.zeros(n_output)
        self.output = np.zeros(n_output)
        np.random.seed = 42  #seed

        # Accounting for spherical hyperspace
        n_input += 1

        # Accounting for unknow means and variances ---------------------------------
        if means is not None and variances is None:
            variances = np.ones(means.shape)
        elif means is None and variances is not None:
            means = np.zeros(variances.shape)
        self.means = np.asarray(means)
        self.variances = np.asarray(variances)

        # Setting synapse time constants --------------------------------------------
        self.tau_input = 0.012  # Time constant for input connection
        self.tau_training = 0.012  # Time constant for training signal
        self.tau_output = 0.2  # Time constant for the output

        # Setting intercepts for the ensembles --------------------------------------

        # Generates intercepts for a d-dimensional ensemble, such that, given a
        # random uniform input (from the interior of the d-dimensional ball), the
        # probability of a neuron firing has the probability density function given
        # by rng.triangular(left, mode, right, size=n)
        triangular = np.random.triangular(left=0.35,
                                          mode=0.45,
                                          right=0.55,
                                          size=n_neurons * n_ensembles)
        intercepts = nengo.dists.CosineSimilarity(n_input + 2).ppf(1 -
                                                                   triangular)
        intercepts = intercepts.reshape((n_ensembles, n_neurons))

        # Setting weights for the ensembles -----------------------------------------
        # TODO: using presaved weights
        weights = np.zeros((self.n_ensembles, n_output, self.n_neurons))

        # Setting encoders for the ensembles ----------------------------------------

        # if NengoLib is installed, use it to optimize encoder placement
        try:
            encoders_dist = ScatteredHypersphere(surface=True)
        except ImportError:
            encoders_dist = nengo.Default
            print("NengoLib not installed, encoder placement will " +
                  "be sub-optimal.")
        encoders = encoders_dist.sample(n_neurons * n_ensembles, n_input)
        encoders = encoders.reshape(n_ensembles, n_neurons, n_input)

        # Defining the Nengo adaptive model ------------------------------------------

        self.nengo_model = nengo.Network(seed=seed)
        self.nengo_model.config[nengo.Ensemble].neuron_type = nengo.LIF()

        with self.nengo_model:

            def input_signals_func(t):
                return self.input_signal

            input_signals = nengo.Node(input_signals_func, size_out=n_input)

            def training_signals_func(t):
                return -self.training_signal

            training_signals = nengo.Node(training_signals_func,
                                          size_out=n_output)

            def output_func(t, x):
                self.output = np.copy(x)

            output = nengo.Node(output_func, size_in=n_output, size_out=0)

            self.adapt_ens = []
            self.conn_learn = []
            for ii in range(self.n_ensembles):
                self.adapt_ens.append(
                    nengo.Ensemble(
                        n_neurons=self.n_neurons,
                        dimensions=n_input,
                        intercepts=intercepts[ii],
                        radius=np.sqrt(n_input),
                        encoders=encoders[ii],
                        **kwargs,
                    ))

                # hook up input signal to adaptive population to provide context
                nengo.Connection(
                    input_signals,
                    self.adapt_ens[ii],
                    synapse=self.tau_input,
                )

                self.conn_learn.append(
                    nengo.Connection(
                        self.adapt_ens[ii].neurons,
                        output,
                        learning_rule_type=nengo.PES(pes_learning_rate),
                        transform=weights[ii],
                        synapse=self.tau_output,
                    ))

                # hook up the training signal to the learning rule
                nengo.Connection(
                    training_signals,
                    self.conn_learn[ii].learning_rule,
                    synapse=self.tau_training,
                )

        nengo.rc.set("decoder_cache", "enabled", "False")
        self.sim = nengo.Simulator(self.nengo_model, dt=0.001)
示例#23
0
    # weights_probe = nengo.Probe(conn1,"weights",sample_every=probe_sample_rate)
    #if(not full_log):
    #    nengo.Node(log)

    #############################

step_time = (presentation_time + pause_time)
Args = {
    "Dataset": Dataset,
    "Labels": label_train_filtered,
    "step_time": step_time,
    "input_nbr": input_nbr
}

with nengo.Simulator(model, dt=dt) as sim:

    #if(not full_log):
    #    log.set(sim,Args,False,False)

    w.output.set_signal_vmem(
        sim.signals[sim.model.sig[input_layer.neurons]["voltage"]])
    w.output.set_signal_out(sim.signals[sim.model.sig[layer1.neurons]["out"]])

    sim.run(iterations * step_time * label_train_filtered.shape[0])

# weights = sim.data[weights_probe][-1]
weights = weights[-1]
#if(not full_log):
#    log.closeLog()
示例#24
0
def test_validation(Simulator):
    with nengo.Network() as net:
        # not a callable
        with pytest.raises(ValidationError):
            TensorNode([0])

        # size out < 1
        with pytest.raises(ValidationError):
            TensorNode(lambda t: t, size_out=0)

        # wrong call signature
        with pytest.raises(ValidationError):
            TensorNode(lambda a, b, c: a)

        # returning None
        with pytest.raises(ValidationError):
            TensorNode(lambda x: None)

        # returning non-tensor
        with pytest.raises(ValidationError):
            TensorNode(lambda x: [0])

        # returning wrong number of dimensions
        with pytest.raises(ValidationError):
            TensorNode(lambda t: tf.zeros((2, 2, 2)))

        # correct output
        n = TensorNode(lambda t: tf.zeros((5, 2)))
        assert n.size_out == 2

    # can't run tensornode in regular Nengo simulator
    with nengo.Simulator(net) as sim:
        with pytest.raises(SimulationError):
            sim.step()

    # these tensornodes won't be validated at creation, because size_out
    # is specified. instead the validation occurs when the network is built

    # None output
    with nengo.Network() as net:
        TensorNode(lambda t: None, size_out=2)
    with pytest.raises(ValidationError):
        Simulator(net)

    # wrong number of dimensions
    with nengo.Network() as net:
        TensorNode(lambda t: tf.zeros((1, 2, 2)), size_out=2)
    with pytest.raises(ValidationError):
        Simulator(net)

    # wrong minibatch size
    with nengo.Network() as net:
        TensorNode(lambda t: tf.zeros((3, 2)), size_out=2)
    with pytest.raises(ValidationError):
        Simulator(net, minibatch_size=2)

    # wrong output d
    with nengo.Network() as net:
        TensorNode(lambda t: tf.zeros((3, 2)), size_out=3)
    with pytest.raises(ValidationError):
        Simulator(net, minibatch_size=3)

    # wrong dtype
    with nengo.Network() as net:
        TensorNode(lambda t: tf.zeros((3, 2), dtype=tf.int32), size_out=2)
    with pytest.raises(ValidationError):
        Simulator(net, minibatch_size=3)

    # make sure that correct output _does_ pass
    with nengo.Network() as net:
        TensorNode(lambda t: tf.zeros((3, 2), dtype=t.dtype), size_out=2)
    with Simulator(net, minibatch_size=3):
        pass
示例#25
0
N = 10


def comparator_func(t, x):
    R1 = np.correlate(x[:N], x[N:])
    print(R1)
    return R1


with nengo.Network() as net:
    ens1 = nengo.Ensemble(10,
                          dimensions=1,
                          seed=0,
                          intercepts=nengo.dists.Choice([-0.1]),
                          max_rates=nengo.dists.Choice([100]))
    ens2 = nengo.Ensemble(10,
                          dimensions=1,
                          seed=0,
                          intercepts=nengo.dists.Choice([-0.1]),
                          max_rates=nengo.dists.Choice([100]))

    node = nengo.Node(size_in=20, output=comparator_func)

    # Neuron to neuron
    weights = np.eye(ens1.n_neurons, ens1.n_neurons)
    nengo.Connection(ens1.neurons, node[:10], transform=weights)
    nengo.Connection(ens2.neurons, node[10:], transform=weights)

with nengo.Simulator(net) as sim:
    sim.run(0.1)
def evaluate_mnist_multiple_local(args):

    #############################
    # load the data
    #############################
    input_nbr = args.input_nbr

    (image_train,
     label_train), (image_test,
                    label_test) = (keras.datasets.mnist.load_data())

    probe_sample_rate = (
        input_nbr / 10
    ) / 1000  #Probe sample rate. Proportional to input_nbr to scale down sampling rate of simulations
    # # probe_sample_rate = 1000
    image_train_filtered = []
    label_train_filtered = []

    x = args.digit

    for i in range(0, input_nbr):

        image_train_filtered.append(image_train[i])
        label_train_filtered.append(label_train[i])

    image_train_filtered = np.array(image_train_filtered)
    label_train_filtered = np.array(label_train_filtered)

    image_train_filtered = (image_train_filtered / 255 - 0.1308) / 0.3088
    image_train_filtered = 255 * (
        image_train_filtered - image_train_filtered.min()) / (
            image_train_filtered.max() - image_train_filtered.min())

    # np.save(
    #     'mnist.npz',
    #     image_train_filtered=image_train_filtered,
    #     label_train_filtered=label_train_filtered,
    #     image_test_filtered=image_test_filtered,
    #     label_test_filtered=label_test_filtered,

    # )

    # data = np.load('qmnist.npz', allow_pickle=True)
    # image_train_filtered = data['image_train_filtered']
    # label_train_filtered = data['label_train_filtered']
    # image_test_filtered = data['image_test_filtered']
    # label_test_filtered = data['label_test_filtered']

    # image_train_filtered = np.tile(image_train_filtered,(args.iterations,1,1))
    # label_train_filtered = np.tile(label_train_filtered,(args.iterations))

    #Simulation Parameters
    #Presentation time
    presentation_time = args.presentation_time  #0.20
    #Pause time
    pause_time = args.pause_time
    #Iterations
    iterations = args.iterations
    #Input layer parameters
    n_in = args.n_in
    # g_max = 1/784 #Maximum output contribution
    n_neurons = args.n_neurons  # Layer 1 neurons
    # inhib_factor = args.inhib_factor #Multiplication factor for lateral inhibition

    input_neurons_args = {
        "n_neurons":
        n_in,
        "dimensions":
        1,
        "label":
        "Input layer",
        "encoders":
        nengo.dists.Choice([[1]]),
        # "max_rates":nengo.dists.Uniform(22,22),
        # "intercepts":nengo.dists.Uniform(0,0),
        "gain":
        nengo.dists.Choice([args.gain_in]),
        "bias":
        nengo.dists.Choice([args.bias_in]),
        "neuron_type":
        MyLIF_in(tau_rc=args.tau_in, min_voltage=-1, amplitude=args.amp_neuron)
        # "neuron_type":nengo.neurons.SpikingRectifiedLinear()#SpikingRelu neuron.
    }

    #Layer 1 parameters
    layer_1_neurons_args = {
        "n_neurons":
        n_neurons,
        "dimensions":
        1,
        "label":
        "Layer 1",
        "encoders":
        nengo.dists.Choice([[1]]),
        "gain":
        nengo.dists.Choice([args.gain_out]),
        "bias":
        nengo.dists.Choice([args.bias_out]),
        # "intercepts":nengo.dists.Choice([0]),
        # "max_rates":nengo.dists.Choice([args.rate_out,args.rate_out]),
        # "noise":nengo.processes.WhiteNoise(dist=nengo.dists.Gaussian(0, 0.5), seed=1),
        # "neuron_type":nengo.neurons.LIF(tau_rc=args.tau_out, min_voltage=0)
        # "neuron_type":MyLIF_out(tau_rc=args.tau_out, min_voltage=-1)
        "neuron_type":
        STDPLIF(tau_rc=args.tau_out,
                min_voltage=-1,
                spiking_threshold=args.thr_out,
                inhibition_time=args.inhibition_time)
    }

    # "noise":nengo.processes.WhiteNoise(dist=nengo.dists.Gaussian(0, 20), seed=1),

    #Lateral Inhibition parameters
    # lateral_inhib_args = {
    #         "transform": inhib_factor* (np.full((n_neurons, n_neurons), 1) - np.eye(n_neurons)),
    #         "synapse":args.inhib_synapse,
    #         "label":"Lateral Inhibition"
    # }

    #Learning rule parameters
    learning_args = {
        "lr": args.lr,
        "winit_min": 0,
        "winit_max": 1,
        "vprog": args.vprog,
        "vthp": args.vthp,
        "vthn": args.vthn,
        #         "tpw":50,
        #         "prev_flag":True,
        "sample_distance": int((presentation_time + pause_time) * 200 *
                               10),  #Store weight after 10 images
    }

    # argument_string = "presentation_time: "+ str(presentation_time)+ "\n pause_time: "+ str(pause_time)+ "\n input_neurons_args: " + str(input_neurons_args)+ " \n layer_1_neuron_args: " + str(layer_1_neurons_args)+"\n Lateral Inhibition parameters: " + str(lateral_inhib_args) + "\n learning parameters: " + str(learning_args)+ "\n g_max: "+ str(g_max)

    images = image_train_filtered
    labels = label_train_filtered

    model = nengo.Network("My network", seed=args.seed)
    #############################
    # Model construction
    #############################
    with model:
        # picture = nengo.Node(PresentInputWithPause(images, presentation_time, pause_time,0))
        picture = nengo.Node(
            nengo.processes.PresentInput(images,
                                         presentation_time=presentation_time))
        true_label = nengo.Node(
            nengo.processes.PresentInput(labels,
                                         presentation_time=presentation_time))
        # true_label = nengo.Node(PresentInputWithPause(labels, presentation_time, pause_time,-1))

        # input layer
        input_layer = nengo.Ensemble(**input_neurons_args)
        input_conn = nengo.Connection(picture,
                                      input_layer.neurons,
                                      synapse=None)

        #first layer
        layer1 = nengo.Ensemble(**layer_1_neurons_args)

        #Weights between input layer and layer 1
        w = nengo.Node(CustomRule_post_v2(**learning_args),
                       size_in=n_in,
                       size_out=n_neurons)
        nengo.Connection(input_layer.neurons, w, synapse=None)
        nengo.Connection(w, layer1.neurons, synapse=None)
        # nengo.Connection(w, layer1.neurons,transform=g_max, synapse=None)
        init_weights = np.random.uniform(0, 1, (n_neurons, n_in))
        # conn1 = nengo.Connection(input_layer.neurons,layer1.neurons,learning_rule_type=VLR(learning_rate=args.lr,vprog=args.vprog, vthp=args.vthp,vthn=args.vthn),transform=init_weights)

        #Lateral inhibition
        # inhib = nengo.Connection(layer1.neurons,layer1.neurons,**lateral_inhib_args)

        #Probes
        p_true_label = nengo.Probe(true_label, sample_every=probe_sample_rate)
        p_input_layer = nengo.Probe(input_layer.neurons,
                                    sample_every=probe_sample_rate)
        p_layer_1 = nengo.Probe(layer1.neurons, sample_every=probe_sample_rate)
        # weights_probe = nengo.Probe(conn1,"weights",sample_every=probe_sample_rate)

        weights = w.output.history

    # with nengo_ocl.Simulator(model) as sim :
    with nengo.Simulator(model, dt=args.dt, optimize=True) as sim:

        w.output.set_signal_vmem(
            sim.signals[sim.model.sig[input_layer.neurons]["voltage"]])
        w.output.set_signal_out(
            sim.signals[sim.model.sig[layer1.neurons]["out"]])

        sim.run((presentation_time + pause_time) * labels.shape[0])

    #save the model
    # now = time.strftime("%Y%m%d-%H%M%S")
    # folder = os.getcwd()+"/MNIST_VDSP"+now
    # os.mkdir(folder)
    # print(weights)

    # weights = sim.data[weights_probe]

    last_weight = weights[-1]

    # pickle.dump(weights, open( folder+"/trained_weights", "wb" ))
    # pickle.dump(argument_string, open( folder+"/arguments", "wb" ))

    t_data = sim.trange(sample_every=probe_sample_rate)
    labels = sim.data[p_true_label][:, 0]
    output_spikes = sim.data[p_layer_1]
    neuron_class = np.zeros((n_neurons, 1))
    n_classes = 10
    for j in range(n_neurons):
        spike_times_neuron_j = t_data[np.where(output_spikes[:, j] > 0)]
        max_spike_times = 0
        for i in range(n_classes):
            class_presentation_times_i = t_data[np.where(labels == i)]
            #Normalized number of spikes wrt class presentation time
            num_spikes = len(
                np.intersect1d(spike_times_neuron_j,
                               class_presentation_times_i)) / (
                                   len(class_presentation_times_i) + 1)
            if (num_spikes > max_spike_times):
                neuron_class[j] = i
                max_spike_times = num_spikes

    # print("Neuron class: \n", neuron_class)

    sim.close()
    '''
    Testing
    '''

    # img_rows, img_cols = 28, 28
    input_nbr = 6000
    # input_nbr = int(args.input_nbr/6)

    # Dataset = "Mnist"
    # # (image_train, label_train), (image_test, label_test) = load_mnist()
    # (image_train, label_train), (image_test, label_test) = (tf.keras.datasets.mnist.load_data())

    # #select the 0s and 1s as the two classes from MNIST data
    image_test_filtered = []
    label_test_filtered = []

    for i in range(0, input_nbr):
        # #  if (label_train[i] == 1 or label_train[i] == 0):
        image_test_filtered.append(image_test[i])
        label_test_filtered.append(label_test[i])

    print("actual input", len(label_test_filtered))
    print(np.bincount(label_test_filtered))

    image_test_filtered = np.array(image_test_filtered)
    label_test_filtered = np.array(label_test_filtered)

    image_test_filtered = (image_test_filtered / 255 - 0.1308) / 0.3088
    image_test_filtered = 255 * (
        image_test_filtered - image_test_filtered.min()) / (
            image_test_filtered.max() - image_test_filtered.min())

    #############################

    model = nengo.Network(label="My network", )

    # Learning params

    with model:
        # input layer
        # picture = nengo.Node(PresentInputWithPause(images, presentation_time, pause_time,0))
        picture = nengo.Node(
            nengo.processes.PresentInput(image_test_filtered,
                                         presentation_time=presentation_time))
        true_label = nengo.Node(
            nengo.processes.PresentInput(label_test_filtered,
                                         presentation_time=presentation_time))
        # true_label = nengo.Node(PresentInputWithPause(labels, presentation_time, pause_time,-1))
        input_layer = nengo.Ensemble(**input_neurons_args)

        input_conn = nengo.Connection(picture,
                                      input_layer.neurons,
                                      synapse=None)

        #first layer
        layer1 = nengo.Ensemble(**layer_1_neurons_args)

        # w = nengo.Node(CustomRule_post_v2(**learning_args), size_in=784, size_out=n_neurons)

        nengo.Connection(input_layer.neurons,
                         layer1.neurons,
                         transform=last_weight)

        p_true_label = nengo.Probe(true_label)
        p_layer_1 = nengo.Probe(layer1.neurons)
        p_input_layer = nengo.Probe(input_layer.neurons)
        #if(not full_log):
        #    nengo.Node(log)

        #############################

    step_time = (presentation_time + pause_time)

    with nengo.Simulator(model, dt=args.dt) as sim:

        sim.run(step_time * label_test_filtered.shape[0])

    labels = sim.data[p_true_label][:, 0]
    output_spikes = sim.data[p_layer_1]
    n_classes = 10
    # rate_data = nengo.synapses.Lowpass(0.1).filtfilt(sim.data[p_layer_1])
    predicted_labels = []
    true_labels = []
    correct_classified = 0
    wrong_classified = 0

    class_spikes = np.ones((10, 1))

    for num in range(input_nbr):
        #np.sum(sim.data[my_spike_probe] > 0, axis=0)

        output_spikes_num = output_spikes[
            num * int(presentation_time / args.dt):(num + 1) *
            int(presentation_time / args.dt), :]  # 0.350/0.005
        num_spikes = np.sum(output_spikes_num > 0, axis=0)

        for i in range(n_classes):
            sum_temp = 0
            count_temp = 0
            for j in range(n_neurons):
                if ((neuron_class[j]) == i):
                    sum_temp += num_spikes[j]
                    count_temp += 1

            if (count_temp == 0):
                class_spikes[i] = 0
            else:
                class_spikes[i] = sum_temp
                # class_spikes[i] = sum_temp/count_temp

        # print(class_spikes)
        k = np.argmax(num_spikes)
        # predicted_labels.append(neuron_class[k])
        class_pred = np.argmax(class_spikes)
        predicted_labels.append(class_pred)

        true_class = labels[(num * int(presentation_time / args.dt))]
        # print(true_class)
        # print(class_pred)

        # if(neuron_class[k] == true_class):
        #     correct_classified+=1
        # else:
        #     wrong_classified+=1
        if (class_pred == true_class):
            correct_classified += 1
        else:
            wrong_classified += 1

    accuracy = correct_classified / (correct_classified +
                                     wrong_classified) * 100
    print("Accuracy: ", accuracy)
    sim.close()

    # nni.report_final_result(accuracy)

    del weights, sim.data, labels, output_spikes, class_pred, t_data

    return accuracy, last_weight
示例#27
0
def test_conv_split(Simulator, rng, plt, allclose):
    channels_last = False

    # load data
    with open(os.path.join(test_dir, 'mnist10.pkl'), 'rb') as f:
        test10 = pickle.load(f)

    input_shape = ImageShape(28, 28, 1, channels_last=channels_last)
    test_x = test10[0][0].reshape(input_shape.shape(channels_last=True))
    if not input_shape.channels_last:
        test_x = np.transpose(test_x, (2, 0, 1))

    n_filters = 8
    kernel_size = (7, 7)
    kernel = Gabor(freq=Uniform(0.5, 1)).generate(n_filters,
                                                  kernel_size,
                                                  rng=rng)
    kernel = kernel[None, :, :, :]  # single channel
    kernel = np.transpose(kernel, (0, 2, 3, 1))  # filters last
    strides = (2, 2)

    seed = 3  # fix seed to do the same computation for both channel positions
    rng = np.random.RandomState(seed + 1)

    with nengo.Network(seed=seed) as net:
        nengo_loihi.add_params(net)

        a = nengo.Node(test_x.ravel())

        # --- make population to turn image into spikes
        nc = 1
        in_kernel = np.array([1.]).reshape((1, 1, 1, nc))
        transform = nengo_loihi.Conv2D.from_kernel(in_kernel, input_shape)
        b = nengo.Ensemble(transform.output_shape.size,
                           1,
                           neuron_type=nengo.SpikingRectifiedLinear(),
                           max_rates=nengo.dists.Choice([50]),
                           intercepts=nengo.dists.Choice([0]))
        net.config[b].on_chip = False
        nengo.Connection(a, b.neurons, transform=transform)
        in_shape = transform.output_shape

        transform = nengo_loihi.Conv2D.from_kernel(kernel,
                                                   in_shape,
                                                   strides=strides)
        out_shape = transform.output_shape
        split_slices = out_shape.split_channels(max_size=1024, max_channels=4)

        # --- make convolution population, split across ensembles
        cc = []
        cp = []
        out_shapes = []
        xslice = ImageSlice(in_shape)
        for yslice in split_slices:
            transform_xy = split_transform(transform, xslice, yslice)
            out_shapes.append(transform_xy.output_shape)
            c = nengo.Ensemble(transform_xy.output_shape.size,
                               1,
                               neuron_type=nengo.LIF(),
                               max_rates=nengo.dists.Choice([15]),
                               intercepts=nengo.dists.Choice([0]))
            nengo.Connection(b.neurons, c.neurons, transform=transform_xy)
            cc.append(c)
            cp.append(nengo.Probe(c.neurons))

    with nengo.Simulator(net, optimize=False) as sim_nengo:
        sim_nengo.run(1.0)

    with Simulator(net, seed=seed) as sim_loihi:
        if "loihi" in sim_loihi.sims:
            sim_loihi.sims["loihi"].snip_max_spikes_per_step = 100
        sim_loihi.run(1.0)

    nengo_out = []
    loihi_out = []
    for p, out_shape_i in zip(cp, out_shapes):
        nengo_out.append(
            (sim_nengo.data[p] > 0).sum(axis=0).reshape(out_shape_i.shape()))
        loihi_out.append(
            (sim_loihi.data[p] > 0).sum(axis=0).reshape(out_shape_i.shape()))

    if channels_last:
        nengo_out = np.concatenate(nengo_out, axis=2)
        loihi_out = np.concatenate(loihi_out, axis=2)

        # put channels first to display them separately
        nengo_out = np.transpose(nengo_out, (2, 0, 1))
        loihi_out = np.transpose(loihi_out, (2, 0, 1))
    else:
        nengo_out = np.concatenate(nengo_out, axis=0)
        loihi_out = np.concatenate(loihi_out, axis=0)

    out_max = np.maximum(nengo_out.max(), loihi_out.max())

    # --- plot results
    rows = 2
    cols = 3

    ax = plt.subplot(rows, cols, 1)
    imshow(test_x[0, :, :], vmin=0, vmax=1, ax=ax)

    ax = plt.subplot(rows, cols, 2)
    tile(np.transpose(kernel[0], (2, 0, 1)), cols=8, ax=ax)

    ax = plt.subplot(rows, cols, 3)
    plt.hist(nengo_out.ravel(), bins=31)
    plt.hist(loihi_out.ravel(), bins=31)

    ax = plt.subplot(rows, cols, 4)
    tile(nengo_out, vmin=0, vmax=out_max, cols=8, ax=ax)

    ax = plt.subplot(rows, cols, 6)
    tile(loihi_out, vmin=0, vmax=out_max, cols=8, ax=ax)

    assert allclose(loihi_out, nengo_out, atol=0.05 * out_max, rtol=0.15)
示例#28
0
def plot_tuning_curves(filename, plot_decoding=False, show=False):
    """
    Plot tuning curves for an association population and for a standard
    subpopulation (of the neural extraction network).
    """
    import matplotlib as mpl
    mpl.rcParams['font.size'] = '10'

    if show:
        mpl.use('Qt4Agg')
    else:
        mpl.use('Agg')

    import matplotlib.pyplot as plt

    plt.figure(figsize=(5, 3))

    neurons_per_item = 20
    neurons_per_dim = 50
    intercepts_low = 0.29
    intercepts_range = 0.00108

    intercepts = Uniform(intercepts_low, intercepts_low + intercepts_range)

    tau_rc = 0.034
    tau_ref = 0.0026
    radius = 1.0
    assoc_encoders = np.ones((neurons_per_item, 1))
    standard_encoders = np.ones((neurons_per_dim, 1))

    threshold = 0.3
    threshold_func = lambda x: 1 if x > threshold else 0

    max_rates = Uniform(200, 350)

    model = nengo.Network("Associative Memory")
    with model:
        neuron_type = nengo.LIF(tau_rc=tau_rc, tau_ref=tau_ref)

        assoc = nengo.Ensemble(n_neurons=neurons_per_item,
                               dimensions=1,
                               intercepts=intercepts,
                               encoders=assoc_encoders,
                               label="assoc",
                               radius=radius,
                               max_rates=max_rates,
                               neuron_type=neuron_type)

        n_eval_points = 750
        eval_points = np.random.normal(0, 0.06, (n_eval_points, 1))
        eval_points.T[0].sort()
        radius = 5.0 / np.sqrt(512)
        standard = nengo.Ensemble(n_neurons=neurons_per_dim,
                                  dimensions=1,
                                  eval_points=eval_points,
                                  radius=radius,
                                  encoders=standard_encoders)

        if plot_decoding:
            dummy = nengo.Ensemble(1, 1)
            conn = nengo.Connection(assoc, dummy, function=threshold_func)
            dummy2 = nengo.Ensemble(1, 1)
            conn2 = nengo.Connection(standard, dummy2)

    sim = nengo.Simulator(model)

    if plot_decoding:
        gs = gridspec.GridSpec(3, 2)
    else:
        gs = gridspec.GridSpec(2, 2)

    plt.subplot(gs[0:2, 0])

    assoc_eval_points, assoc_activities = tuning_curves(assoc, sim)

    for neuron in assoc_activities.T:
        plt.plot(assoc_eval_points.T[0], neuron)
    plt.title("Association")
    plt.ylabel("Firing Rate (spikes/s)")
    plt.xlabel(r"$e_ix$")
    plt.ylim((0, 400))
    plt.yticks([0, 100, 200, 300, 400])

    ax = plt.subplot(gs[0:2, 1])

    # We want different eval points for display purposes than for
    # optimization purposes
    eval_points = Uniform(-radius, radius).sample(n_eval_points)
    eval_points.sort()
    eval_points = eval_points.reshape((n_eval_points, 1))

    # have to divide by radius on our own since tuning_curves skips that step
    _, activities = tuning_curves(standard, sim, eval_points / radius)
    for neuron in activities.T:
        plt.plot(eval_points, neuron)

    plt.title("Standard")
    plt.xlabel(r"$e_ix$")
    plt.xlim((-radius, radius))
    plt.ylim((0, 400))
    plt.setp(ax, yticks=[])

    if plot_decoding:
        plt.subplot(gs[2, 0])
        decoders = sim.data[conn].decoders
        plt.plot(assoc_eval_points.T[0],
                 0.001 * np.dot(assoc_activities, decoders.T))
        plt.axhline(y=1.0, ls='--')

        plt.subplot(gs[2, 1])
        x, activities2 = tuning_curves(standard, sim,
                                       assoc_eval_points / radius)
        decoders = sim.data[conn2].decoders
        plt.plot(assoc_eval_points.T[0],
                 0.001 * np.dot(activities2, decoders.T))
        plt.plot([-1.0, 1.0], [-1.0, 1.0], c='k', ls='--')
        plt.axvline(x=radius, c='k', ls='--')
        plt.axvline(x=-radius, c='k', ls='--')

    plt.tight_layout()

    plt.subplots_adjust(right=0.89, left=0.11)

    if filename:
        plt.savefig(filename)
    if show:
        plt.show()
    np.save("y_spikes", sim.data[y_spikes])'''

#fig=plt.figure()

#plt.subplot(2, 1, 1)
#plt.plot(sim.trange(), sim.data[x_spikes])

#plt.subplot(2, 1, 2)
#plot_spikes(sim.trange(), sim.data[y_spikes])
#plt.xlabel("Time [s]")
#plt.ylabel("Neuron number")
#plt.savefig('fig.png')

if __name__ == '__main__':

    print("starting simulator...")
    before = time.time()

    sim = nengo.Simulator(model)

    after = time.time()
    print("time to build:")
    print(after - before)

    print("running simulator...")
    before = time.time()

    while True:
        sim.step()
        time.sleep(0.0001)
示例#30
0
    def __init__(  # noqa: C901
        self,
        network,
        dt=0.001,
        seed=None,
        model=None,
        precompute=None,
        target=None,
        progress_bar=None,
        remove_passthrough=True,
        hardware_options=None,
    ):
        # initialize values used in __del__ and close() first
        self.closed = True
        self.network = network
        self.sims = OrderedDict()
        self.timers = Timers()
        self.timers.start("build")
        self.seed = seed
        self._n_steps = 0
        self._time = 0

        hardware_options = {} if hardware_options is None else hardware_options

        if progress_bar:
            warnings.warn("nengo-loihi does not support progress bars")

        if model is None:
            self.model = Model(dt=float(dt), label="%s, dt=%f" % (network, dt))
        else:
            assert isinstance(
                model, Model), "model is not type 'nengo_loihi.builder.Model'"
            self.model = model
            assert self.model.dt == dt

        if network is None:
            raise ValidationError("network parameter must not be None",
                                  attr="network")

        if target is None:
            target = "loihi" if HAS_NXSDK else "sim"
        self.target = target
        logger.info("Simulator target is %r", target)

        # Build the network into the model
        self.model.build(
            network,
            precompute=precompute,
            remove_passthrough=remove_passthrough,
            discretize=target != "simreal",
        )

        # Create host_pre and host simulators if necessary
        self.precompute = self.model.split.precompute
        logger.info("Simulator precompute is %r", self.precompute)
        assert precompute is None or precompute == self.precompute
        if self.model.split.precomputable() and not self.precompute:
            warnings.warn(
                "Model is precomputable. Setting precompute=False may slow execution."
            )

        if len(self.model.host_pre.params) > 0:
            assert self.precompute
            self.sims["host_pre"] = nengo.Simulator(
                network=None,
                dt=self.dt,
                model=self.model.host_pre,
                progress_bar=False,
                optimize=False,
            )

        if len(self.model.host.params) > 0:
            self.sims["host"] = nengo.Simulator(
                network=None,
                dt=self.dt,
                model=self.model.host,
                progress_bar=False,
                optimize=False,
            )

        self._probe_outputs = self.model.params
        self.data = SimulationData(self._probe_outputs)
        for sim in self.sims.values():
            self.data.add_fallback(sim.data)

        if seed is None:
            if network is not None and network.seed is not None:
                seed = network.seed + 1
            else:
                seed = np.random.randint(npext.maxint)

        if target in ("simreal", "sim"):
            self.sims["emulator"] = EmulatorInterface(self.model, seed=seed)
        elif target == "loihi":
            assert HAS_NXSDK, "Must have NxSDK installed to use Loihi hardware"
            use_snips = not self.precompute and self.sims.get("host",
                                                              None) is not None
            self.sims["loihi"] = HardwareInterface(self.model,
                                                   use_snips=use_snips,
                                                   seed=seed,
                                                   **hardware_options)
        else:
            raise ValidationError("Must be 'simreal', 'sim', or 'loihi'",
                                  attr="target")

        assert "emulator" in self.sims or "loihi" in self.sims

        self._runner = StepRunner(self.model, self.sims, self.precompute,
                                  self.timers)
        self.closed = False
        self.timers.stop("build")