Ejemplo n.º 1
0
def test_nomodulatory():
    """Make sure you cannot set modulatory=True on connections."""
    with nengo.Network():
        a = nengo.Ensemble(10, 1)
        with pytest.raises(ObsoleteError):
            nengo.Connection(a, a, modulatory=True)
Ejemplo n.º 2
0
def sense_to_ang_vel(x, n_sensors, k=2.0):
    rotation_weights = np.linspace(-1, 1, n_sensors)
    res = k * np.dot(rotation_weights, np.array(x))
    return res

def sense_to_lin_vel(x, n_sensors, v=0.5):
    max_dist = np.max(x)
    res = 0.
    if max_dist > 0.:
        res = v * max_dist
    else:
        res = -v
    return res


model = nengo.Network(seed=seed)

with model:

    map_selector = nengo.Node([0])

    environment = nengo.Node(
        NengoMazeEnvironment(
            n_sensors=n_sensors,
            height=15,
            width=15,
            fov=125,
            normalize_sensor_output=True
        ),
        size_in=4,
        size_out=n_sensors + n_t_sensors + 3,
Ejemplo n.º 3
0
import nengo
import numpy as np
from nengo_gym import GymEnv

model = nengo.Network(seed=13)

# x, dx, th, dth
control_matrix = np.array([[-1, 1, -1, 1]])


def control(x):

    # x, dx, th, dth

    return -x[0] - x[2] + x[1] + x[3]


with model:

    # dt of CartPole is 0.02
    # dt of Nengo is 0.001
    env = GymEnv(
        env_name='CartPole-v1',
        reset_signal=False,
        reset_when_done=True,
        return_reward=True,
        return_done=False,
        render=True,
        nengo_steps_per_update=20,
    )
Ejemplo n.º 4
0
def _test_pes(
    Simulator,
    AnyNeuronType,
    plt,
    seed,
    allclose,
    pre_neurons=False,
    post_neurons=False,
    weight_solver=False,
    vin=np.array([0.5, -0.5]),
    vout=None,
    n=200,
    function=None,
    transform=np.array(1.0),
    rate=1e-3,
):
    vout = np.array(vin) if vout is None else vout

    with nengo.Network(seed=seed) as model:
        model.config[nengo.Ensemble].neuron_type = AnyNeuronType()

        stim = nengo.Node(output=vin)
        target = nengo.Node(output=vout)
        pre = nengo.Ensemble(n, dimensions=stim.size_out)
        post = nengo.Ensemble(n, dimensions=stim.size_out)
        error = nengo.Ensemble(n, dimensions=target.size_out)

        nengo.Connection(stim, pre)

        postslice = post[: target.size_out] if target.size_out < stim.size_out else post
        pre = pre.neurons if pre_neurons else pre
        post = post.neurons if post_neurons else postslice

        conn = nengo.Connection(
            pre,
            post,
            function=function,
            transform=transform,
            learning_rule_type=PES(rate),
        )
        if weight_solver:
            conn.solver = nengo.solvers.LstsqL2(weights=True)

        nengo.Connection(target, error, transform=-1)
        nengo.Connection(postslice, error)
        nengo.Connection(error, conn.learning_rule)

        post_p = nengo.Probe(postslice, synapse=0.03)
        error_p = nengo.Probe(error, synapse=0.03)

        weights_p = nengo.Probe(conn, "weights", sample_every=0.01)

    with Simulator(model) as sim:
        sim.run(0.5)
    t = sim.trange()
    weights = sim.data[weights_p]

    plt.subplot(211)
    plt.plot(t, sim.data[post_p])
    plt.ylabel("Post decoded value")
    plt.subplot(212)
    plt.plot(t, sim.data[error_p])
    plt.ylabel("Error decoded value")
    plt.xlabel("Time (s)")

    tend = t > 0.4
    assert allclose(sim.data[post_p][tend], vout, atol=0.05)
    assert allclose(sim.data[error_p][tend], 0, atol=0.05)
    assert not allclose(weights[0], weights[-1], atol=1e-5, record_rmse=False)
Ejemplo n.º 5
0
#
# Nodes allow for all sorts of advanced behavior that is typically done by modifying of extending the code of a neural simulator. In Nengo, the `Node` object allows for custom code to run.
#
# In this example, we will implement an `n`-timestep delayed connection by using a node.

# In[ ]:

import numpy as np
import matplotlib.pyplot as plt

import nengo
from nengo.processes import WhiteSignal

# In[ ]:

model = nengo.Network(label="Delayed connection")
with model:
    # We'll use white noise as input
    inp = nengo.Node(WhiteSignal(2, high=5), size_out=1)
    A = nengo.Ensemble(40, dimensions=1)
    nengo.Connection(inp, A)


# We'll make a simple object to implement the delayed connection
class Delay(object):
    def __init__(self, dimensions, timesteps=50):
        self.history = np.zeros((timesteps, dimensions))

    def step(self, t, x):
        self.history = np.roll(self.history, -1)
        self.history[-1] = x
Ejemplo n.º 6
0
def mnist(use_tensor_layer=True):
    """
    A network designed to stress-test tensor layers (based on mnist net).

    Parameters
    ----------
    use_tensor_layer : bool
        If True, use individual tensor_layers to build the network, as opposed
        to a single TensorNode containing all layers.

    Returns
    -------
    net : `nengo.Network`
        benchmark network
    """

    with nengo.Network() as net:
        # create node to feed in images
        net.inp = nengo.Node(np.ones(28 * 28))

        if use_tensor_layer:
            nengo_nl = nengo.RectifiedLinear()

            ensemble_params = dict(max_rates=nengo.dists.Choice([100]),
                                   intercepts=nengo.dists.Choice([0]))
            amplitude = 1
            synapse = None

            x = nengo_dl.Layer(
                tf.keras.layers.Conv2D(filters=32,
                                       kernel_size=3))(net.inp,
                                                       shape_in=(28, 28, 1))
            x = nengo_dl.Layer(nengo_nl)(x, **ensemble_params)

            x = nengo_dl.Layer(
                tf.keras.layers.Conv2D(filters=32,
                                       kernel_size=3))(x,
                                                       shape_in=(26, 26, 32),
                                                       transform=amplitude)
            x = nengo_dl.Layer(nengo_nl)(x, **ensemble_params)

            x = nengo_dl.Layer(
                tf.keras.layers.AveragePooling2D(pool_size=2, strides=2))(
                    x,
                    shape_in=(24, 24, 32),
                    synapse=synapse,
                    transform=amplitude)

            x = nengo_dl.Layer(tf.keras.layers.Dense(units=128))(x)
            x = nengo_dl.Layer(nengo_nl)(x, **ensemble_params)

            x = nengo_dl.Layer(tf.keras.layers.Dropout(rate=0.4))(
                x, transform=amplitude)

            x = nengo_dl.Layer(tf.keras.layers.Dense(units=10))(x)
        else:
            nl = tf.nn.relu

            # def softlif_layer(x, sigma=1, tau_ref=0.002, tau_rc=0.02,
            #                   amplitude=1):
            #     # x -= 1
            #     z = tf.nn.softplus(x / sigma) * sigma
            #     z += 1e-10
            #     rates = amplitude / (tau_ref + tau_rc * tf.log1p(1 / z))
            #     return rates

            def mnist_node(x):  # pragma: no cover
                x = tf.keras.layers.Conv2D(filters=32,
                                           kernel_size=3,
                                           activation=nl)(x)
                x = tf.keras.layers.Conv2D(filters=32,
                                           kernel_size=3,
                                           activation=nl)(x)
                x = tf.keras.layers.AveragePooling2D(pool_size=2, strides=2)(x)
                x = tf.keras.layers.Flatten()(x)
                x = tf.keras.layers.Dense(128, activation=nl)(x)
                x = tf.keras.layers.Dropout(rate=0.4)(x)
                x = tf.keras.layers.Dense(10)(x)

                return x

            node = nengo_dl.TensorNode(mnist_node,
                                       shape_in=(28, 28, 1),
                                       shape_out=(10, ))
            x = node
            nengo.Connection(net.inp, node, synapse=None)

        net.p = nengo.Probe(x)

    return net
Ejemplo n.º 7
0
def evaluate_mnist_multiple_var_v2(args):

    #############################
    # load the data
    #############################
    input_nbr = args.input_nbr

    # (image_train, label_train), (image_test, label_test) = (keras.datasets.mnist.load_data())

    probe_sample_rate = (
        input_nbr / 10
    ) / 1000  #Probe sample rate. Proportional to input_nbr to scale down sampling rate of simulations
    # # probe_sample_rate = 1000
    # image_train_filtered = []
    # label_train_filtered = []

    x = args.digit

    # for i in range(0,input_nbr):

    #     image_train_filtered.append(image_train[i])
    #     label_train_filtered.append(label_train[i])

    # image_train_filtered = np.array(image_train_filtered)
    # label_train_filtered = np.array(label_train_filtered)

    # np.save(
    #     'mnist.npz',
    #     image_train_filtered=image_train_filtered,
    #     label_train_filtered=label_train_filtered,
    #     image_test_filtered=image_test_filtered,
    #     label_test_filtered=label_test_filtered,

    # )

    data = np.load('mnist.npz', allow_pickle=True)
    image_train_filtered = data['image_train_filtered']
    label_train_filtered = data['label_train_filtered']
    image_test_filtered = data['image_test_filtered']
    label_test_filtered = data['label_test_filtered']

    #Simulation Parameters
    #Presentation time
    presentation_time = args.presentation_time  #0.20
    #Pause time
    pause_time = args.pause_time
    #Iterations
    iterations = args.iterations
    #Input layer parameters
    n_in = args.n_in
    # g_max = 1/784 #Maximum output contribution
    g_max = args.g_max
    n_neurons = args.n_neurons  # Layer 1 neurons
    # inhib_factor = args.inhib_factor #Multiplication factor for lateral inhibition

    input_neurons_args = {
        "n_neurons":
        n_in,
        "dimensions":
        1,
        "label":
        "Input layer",
        "encoders":
        nengo.dists.Uniform(1, 1),
        # "max_rates":nengo.dists.Uniform(22,22),
        # "intercepts":nengo.dists.Uniform(0,0),
        "gain":
        nengo.dists.Uniform(args.gain_in, args.gain_in),
        "bias":
        nengo.dists.Uniform(args.bias_in, args.bias_in),
        "neuron_type":
        MyLIF_in(tau_rc=args.tau_in, min_voltage=-1, amplitude=args.g_max)
        # "neuron_type":nengo.neurons.SpikingRectifiedLinear()#SpikingRelu neuron.
    }

    #Layer 1 parameters
    layer_1_neurons_args = {
        "n_neurons":
        n_neurons,
        "dimensions":
        1,
        "label":
        "Layer 1",
        "encoders":
        nengo.dists.Uniform(1, 1),
        "gain":
        nengo.dists.Uniform(args.gain_out, args.gain_out),
        "bias":
        nengo.dists.Uniform(args.bias_out, args.bias_out),
        # "intercepts":nengo.dists.Choice([0]),
        # "max_rates":nengo.dists.Choice([args.rate_out,args.rate_out]),
        # "noise":nengo.processes.WhiteNoise(dist=nengo.dists.Gaussian(0, 0.5), seed=1),
        # "neuron_type":nengo.neurons.LIF(tau_rc=args.tau_out, min_voltage=0)
        # "neuron_type":MyLIF_out(tau_rc=args.tau_out, min_voltage=-1)
        "neuron_type":
        STDPLIF(tau_rc=args.tau_out,
                min_voltage=-1,
                spiking_threshold=args.thr_out,
                inhibition_time=args.inhibition_time)
    }

    # "noise":nengo.processes.WhiteNoise(dist=nengo.dists.Gaussian(0, 20), seed=1),

    #Lateral Inhibition parameters
    # lateral_inhib_args = {
    #         "transform": inhib_factor* (np.full((n_neurons, n_neurons), 1) - np.eye(n_neurons)),
    #         "synapse":args.inhib_synapse,
    #         "label":"Lateral Inhibition"
    # }

    #Learning rule parameters

    vthp = 0.25
    vthn = 0.25
    # np.random.seed(0)
    # vth_var = (2 * np.random.rand(n_neurons,n_in)) -1 #between -1 to 1 of shape W
    # var_ratio=args.var_ratio
    # vthp = vthp + (vthp*var_ratio*vth_var)
    # vthn = vthn + (vthn*var_ratio*vth_var)

    learning_args = {
        "lr": args.lr,
        "winit_min": 0,
        "winit_max": 1,
        "vprog": args.vprog,
        "vthp": vthp,
        "vthn": vthn,
        "weight_quant": args.weight_quant,
        # "var_ratio":args.var_ratio,
        #         "tpw":50,
        #         "prev_flag":True,
        "sample_distance": int((presentation_time + pause_time) * 200 *
                               10),  #Store weight after 10 images
    }

    # argument_string = "presentation_time: "+ str(presentation_time)+ "\n pause_time: "+ str(pause_time)+ "\n input_neurons_args: " + str(input_neurons_args)+ " \n layer_1_neuron_args: " + str(layer_1_neurons_args)+"\n Lateral Inhibition parameters: " + str(lateral_inhib_args) + "\n learning parameters: " + str(learning_args)+ "\n g_max: "+ str(g_max)

    images = image_train_filtered
    labels = label_train_filtered

    model = nengo.Network("My network")
    #############################
    # Model construction
    #############################
    with model:
        # picture = nengo.Node(PresentInputWithPause(images, presentation_time, pause_time,0))
        picture = nengo.Node(
            nengo.processes.PresentInput(images,
                                         presentation_time=presentation_time))
        true_label = nengo.Node(
            nengo.processes.PresentInput(labels,
                                         presentation_time=presentation_time))
        # true_label = nengo.Node(PresentInputWithPause(labels, presentation_time, pause_time,-1))

        # input layer
        input_layer = nengo.Ensemble(**input_neurons_args)
        input_conn = nengo.Connection(picture,
                                      input_layer.neurons,
                                      synapse=None)

        #first layer
        layer1 = nengo.Ensemble(**layer_1_neurons_args)

        #Weights between input layer and layer 1
        w = nengo.Node(CustomRule_post_v3(**learning_args),
                       size_in=n_in,
                       size_out=n_neurons)
        nengo.Connection(input_layer.neurons, w, synapse=None)
        nengo.Connection(w, layer1.neurons, synapse=None)
        # nengo.Connection(w, layer1.neurons,transform=g_max, synapse=None)
        # init_weights = np.random.uniform(0, 1, (n_neurons, n_in))

        # conn1 = nengo.Connection(input_layer.neurons,layer1.neurons,learning_rule_type=VLR(learning_rate=args.lr,vprog=-0.6, var_ratio = args.var_ratio),transform=init_weights)

        #Lateral inhibition
        # inhib = nengo.Connection(layer1.neurons,layer1.neurons,**lateral_inhib_args)

        #Probes
        p_true_label = nengo.Probe(true_label, sample_every=probe_sample_rate)
        p_input_layer = nengo.Probe(input_layer.neurons,
                                    sample_every=probe_sample_rate)
        p_layer_1 = nengo.Probe(layer1.neurons, sample_every=probe_sample_rate)
        # weights_probe = nengo.Probe(conn1,"weights",sample_every=probe_sample_rate)

        weights = w.output.history

    # with nengo_ocl.Simulator(model) as sim :
    with nengo.Simulator(model, dt=0.005) as sim:

        w.output.set_signal_vmem(
            sim.signals[sim.model.sig[input_layer.neurons]["voltage"]])
        w.output.set_signal_out(
            sim.signals[sim.model.sig[layer1.neurons]["out"]])

        sim.run(
            (presentation_time + pause_time) * labels.shape[0] * iterations)

    #save the model
    # now = time.strftime("%Y%m%d-%H%M%S")
    # folder = os.getcwd()+"/MNIST_VDSP"+now
    # os.mkdir(folder)
    # print(weights)

    # weights = sim.data[weights_probe]
    last_weight = weights[-1]

    # pickle.dump(weights, open( folder+"/trained_weights", "wb" ))
    # pickle.dump(argument_string, open( folder+"/arguments", "wb" ))

    t_data = sim.trange(sample_every=probe_sample_rate)
    labels = sim.data[p_true_label][:, 0]
    output_spikes = sim.data[p_layer_1]
    neuron_class = np.zeros((n_neurons, 1))
    n_classes = 10
    for j in range(n_neurons):
        spike_times_neuron_j = t_data[np.where(output_spikes[:, j] > 0)]
        max_spike_times = 0
        for i in range(n_classes):
            class_presentation_times_i = t_data[np.where(labels == i)]
            #Normalized number of spikes wrt class presentation time
            num_spikes = len(
                np.intersect1d(spike_times_neuron_j,
                               class_presentation_times_i)) / (
                                   len(class_presentation_times_i) + 1)
            if (num_spikes > max_spike_times):
                neuron_class[j] = i
                max_spike_times = num_spikes

    # print("Neuron class: \n", neuron_class)

    sim.close()
    '''
    Testing
    '''

    # img_rows, img_cols = 28, 28
    input_nbr = 10000
    # input_nbr = int(args.input_nbr/6)

    # Dataset = "Mnist"
    # # (image_train, label_train), (image_test, label_test) = load_mnist()
    # (image_train, label_train), (image_test, label_test) = (tf.keras.datasets.mnist.load_data())

    # #select the 0s and 1s as the two classes from MNIST data
    # image_test_filtered = []
    # label_test_filtered = []

    # for i in range(0,input_nbr):
    # #  if (label_train[i] == 1 or label_train[i] == 0):
    #     image_test_filtered.append(image_test[i])
    #     label_test_filtered.append(label_test[i])

    # print("actual input",len(label_test_filtered))
    # print(np.bincount(label_test_filtered))

    # image_test_filtered = np.array(image_test_filtered)
    # label_test_filtered = np.array(label_test_filtered)

    #############################

    model = nengo.Network(label="My network", )

    # Learning params

    with model:
        # input layer
        # picture = nengo.Node(PresentInputWithPause(images, presentation_time, pause_time,0))
        picture = nengo.Node(
            nengo.processes.PresentInput(image_test_filtered,
                                         presentation_time=presentation_time))
        true_label = nengo.Node(
            nengo.processes.PresentInput(label_test_filtered,
                                         presentation_time=presentation_time))
        # true_label = nengo.Node(PresentInputWithPause(labels, presentation_time, pause_time,-1))
        input_layer = nengo.Ensemble(**input_neurons_args)

        input_conn = nengo.Connection(picture,
                                      input_layer.neurons,
                                      synapse=None)

        #first layer
        layer1 = nengo.Ensemble(**layer_1_neurons_args)

        # w = nengo.Node(CustomRule_post_v2(**learning_args), size_in=784, size_out=n_neurons)

        nengo.Connection(input_layer.neurons,
                         layer1.neurons,
                         transform=last_weight)

        p_true_label = nengo.Probe(true_label)
        p_layer_1 = nengo.Probe(layer1.neurons)
        p_input_layer = nengo.Probe(input_layer.neurons)
        #if(not full_log):
        #    nengo.Node(log)

        #############################

    step_time = (presentation_time + pause_time)

    with nengo.Simulator(model, dt=0.005) as sim:

        sim.run(step_time * label_test_filtered.shape[0])

    labels = sim.data[p_true_label][:, 0]
    output_spikes = sim.data[p_layer_1]
    n_classes = 10
    # rate_data = nengo.synapses.Lowpass(0.1).filtfilt(sim.data[p_layer_1])
    predicted_labels = []
    true_labels = []
    correct_classified = 0
    wrong_classified = 0

    class_spikes = np.ones((10, 1))

    for num in range(input_nbr):
        #np.sum(sim.data[my_spike_probe] > 0, axis=0)

        output_spikes_num = output_spikes[
            num * int(presentation_time / 0.005):(num + 1) *
            int(presentation_time / 0.005), :]  # 0.350/0.005
        num_spikes = np.sum(output_spikes_num > 0, axis=0)

        for i in range(n_classes):
            sum_temp = 0
            count_temp = 0
            for j in range(n_neurons):
                if ((neuron_class[j]) == i):
                    sum_temp += num_spikes[j]
                    count_temp += 1

            if (count_temp == 0):
                class_spikes[i] = 0
            else:
                class_spikes[i] = sum_temp / count_temp
        # print(class_spikes)
        k = np.argmax(num_spikes)
        # predicted_labels.append(neuron_class[k])
        class_pred = np.argmax(class_spikes)
        predicted_labels.append(class_pred)

        true_class = labels[(num * int(presentation_time / 0.005))]
        # print(true_class)
        # print(class_pred)

        # if(neuron_class[k] == true_class):
        #     correct_classified+=1
        # else:
        #     wrong_classified+=1
        if (class_pred == true_class):
            correct_classified += 1
        else:
            wrong_classified += 1

    accuracy = correct_classified / (correct_classified +
                                     wrong_classified) * 100
    print("Accuracy: ", accuracy)
    sim.close()

    # nni.report_final_result(accuracy)

    del weights, sim.data, labels, output_spikes, class_pred, t_data

    return accuracy, last_weight
Ejemplo n.º 8
0
DIM = 64

states = ['S0', 'S1', 'S2']

actions = ['L', 'R']

input_keys = ['S0*L', 'S0*R', 'S1*L', 'S1*R', 'S2*L', 'S2*R']
output_keys = ['0.7*S1 + 0.3*S2', '0.3*S1 + 0.7*S2', 'S0', 'S0', 'S0', 'S0']

vocab = spa.Vocabulary(dimensions=DIM)

# TODO: these vectors might need to be chosen in a smarter way
for sp in states+actions:
    vocab.parse(sp)

model = nengo.Network('RL', seed=13)
with model:

    model.assoc_mem = spa.AssociativeMemory(input_vocab=vocab,
                                            output_vocab=vocab,
                                            input_keys=input_keys,
                                            output_keys=output_keys,
                                            wta_output=True,
                                            threshold_output=True
                                           )

    model.state = spa.State(DIM, vocab=vocab)
    model.action = spa.State(DIM, vocab=vocab)
    model.probability = spa.State(DIM, vocab=vocab)

    # State and selected action convolved together
Ejemplo n.º 9
0
def test_missing_attribute():
    with nengo.Network():
        a = nengo.Ensemble(10, 1)

        with pytest.warns(SyntaxWarning):
            a.new_attribute = 9
def sensor(t):
    '''Return current x,y coordinates of agent as one hot representation'''
    data = np.zeros(25)
    idx = 5 * (agent.x - 1) + (agent.y - 1)
    data[idx] = 1

    return data


def reward(t):
    '''Call to get current reward signal provided to agent'''
    return agent.reward


with nengo.Network(seed=2) as model:
    env = td_grid.GridNode(environment, dt=0.001)

    # define nodes for plotting data, managing agent's interface with environment
    reward_node = nengo.Node(reward, size_out=1, label='reward')
    sensor_node = nengo.Node(sensor, size_out=25, label='sensor')
    update_node = nengo.Node(env_update.step, size_in=4, size_out=12, label='env')
    qvalue_node = nengo.Node(size_in=4)

    # define neurons to encode state representations
    state = nengo.Ensemble(n_neurons=n_neurons, dimensions=25,
                           intercepts=nengo.dists.Choice([0.15]), radius=2)

    # define neurons that compute the learning signal
    learn_signal = nengo.Ensemble(n_neurons=1000, dimensions=4)
Ejemplo n.º 11
0
C = np.array([[1]])
D = np.zeros_like(A)
Q = np.array([[0.1]])
R = np.array([[0.01]])

STIM_PARAMS = {
    0: 1,
    10*TAU_SYS: 0,
    20*TAU_SYS: 1,
    30*TAU_SYS: 0,
    40*TAU_SYS: 1,
}

fig, ax = plt.subplots(figsize=(16, 12))
# run underlying dynamical system beforehand
state_model = nengo.Network()
with state_model:
    lds_net = LDSNet(A, B, C, D, Q, R)
    stim = nengo.Node(nengo.utils.functions.piecewise(STIM_PARAMS))
    nengo.Connection(stim, lds_net.input, synapse=None)
    stim_probe = nengo.Probe(stim)
    lds_state_probe = nengo.Probe(lds_net.state)
    lds_out_probe = nengo.Probe(lds_net.output)

sim = nengo.Simulator(state_model)
sim.run(SIM_TIME)
# plot underlying dynamical system results
ax.plot(sim.trange(), sim.data[stim_probe], alpha=0.5, label="stim")
ax.plot(sim.trange(), sim.data[lds_state_probe], alpha=0.5, label="system state")
ax.plot(sim.trange(), sim.data[lds_out_probe], alpha=0.3, label="measurement")
Ejemplo n.º 12
0
def generate(arm,
             kv=1,
             learning_rate=None,
             direct_mode=False,
             learned_weights=None,
             means=None,
             scales=None):
    """ Cerebellum model. Compensates for the inertia in the system.
    If dynamics compensation is set True, then it also generates a
    nonlinear adaptive control signal, using an efferent copy of the
    outgoing motor control signal as a training signal.

    input: [q, dq, u]
    output: [u_dynamics, u_adapt]
    """
    dim = arm.DOF * 2

    # NOTE: This function will scale the input so that each dimensions is
    # in the range of -1 to 1. Since we know the operating space of the arm
    # we can set these specifically. This is a hack so that we don't need
    # 100k neurons to be able to simulate accurately generated movement,
    # you can think of it as choosing a relevant part of motor cortex to run.
    # Without this scaling, it would work still, but it would require
    # significantly more neurons to achieve the same level of performance.
    means = np.zeros(dim) if means is None else means
    scales = np.ones(dim) if scales is None else scales
    scale_down, scale_up = generate_scaling_functions(np.asarray(means),
                                                      np.asarray(scales))

    net = nengo.Network('CB')
    if direct_mode:
        net.config[nengo.Ensemble].neuron_type = nengo.Direct()
    with net:
        # create / connect up CB ----------------------------------------------
        net.CB = nengo.Ensemble(n_neurons=1000,
                                dimensions=dim,
                                radius=np.sqrt(dim),
                                intercepts=AreaIntercepts(
                                    dimensions=dim,
                                    base=nengo.dists.Uniform(-1, .1)))
        # expecting input in form [q, dq, u]
        net.input = nengo.Node(output=scale_down, size_in=dim + arm.DOF + 2)
        cb_input = nengo.Node(size_in=dim, label='CB input')
        nengo.Connection(net.input[:dim], cb_input)
        # output is [-Mdq, u_adapt]
        net.output = nengo.Node(size_in=arm.DOF * 2)

        def CB_func(x):
            """ calculate the dynamic component of the OSC signal """
            x = scale_up(x)
            q = x[:arm.DOF]
            dq = x[arm.DOF:arm.DOF * 2]

            # calculate inertia matrix
            M = arm.M(q=q)
            return -np.dot(M, kv * dq)

        # connect system feedback, don't account for synapses twice
        nengo.Connection(net.input[:dim], net.CB)
        nengo.Connection(net.CB,
                         net.output[:arm.DOF],
                         function=CB_func,
                         synapse=None)
        # dynamics adaptation
        if learning_rate is not None:
            net.CB_adapt = nengo.Ensemble(
                n_neurons=1000,
                dimensions=arm.DOF * 2,
                radius=np.sqrt(arm.DOF * 2),
                # enforce spiking neurons
                neuron_type=nengo.LIF(),
                intercepts=AreaIntercepts(dimensions=arm.DOF,
                                          base=nengo.dists.Uniform(-.5, .2)))

            net.learn_encoders = nengo.Connection(
                net.input[:arm.DOF * 2],
                net.CB_adapt,
            )

            # if no saved weights were passed in start from zero
            weights = (learned_weights if learned_weights is not None else
                       np.zeros((arm.DOF, net.CB_adapt.n_neurons)))
            net.learn_conn = nengo.Connection(
                # connect directly to arm so that adaptive signal
                # is not included in the training signal
                net.CB_adapt.neurons,
                net.output[arm.DOF:],
                # set up to initially have 0 output from adaptive connection
                transform=weights,
                learning_rule_type=nengo.PES(learning_rate=learning_rate),
                synapse=None)

            # hook up training signal
            # NOTE: using a large synapse (i.e. low pass filter time constant)
            # here, because sudden changes in the training signal (as caused
            # when the target changes suddenly) will disrupt the learned
            # adaptive throughout state space
            nengo.Connection(net.input[dim:dim + 2],
                             net.learn_conn.learning_rule,
                             transform=-1,
                             synapse=.01)  #-1 for error (not reward)

    return net
Ejemplo n.º 13
0
def test_multidim(Simulator, plt, seed, rng):
    """Tests with multiple dimensions per ensemble"""
    dims = 3
    n_neurons = 60
    radius = 1.0

    a = rng.uniform(low=-0.7, high=0.7, size=dims)
    b = rng.uniform(low=-0.7, high=0.7, size=dims)
    c = np.zeros(2 * dims)
    c[::2] = a
    c[1::2] = b

    model = nengo.Network(seed=seed)
    with model:
        inputA = nengo.Node(a)
        inputB = nengo.Node(b)
        A = nengo.networks.EnsembleArray(n_neurons, dims, radius=radius)
        B = nengo.networks.EnsembleArray(n_neurons, dims, radius=radius)
        C = nengo.networks.EnsembleArray(n_neurons * 2,
                                         dims,
                                         ens_dimensions=2,
                                         radius=radius)
        nengo.Connection(inputA, A.input)
        nengo.Connection(inputB, B.input)
        nengo.Connection(A.output, C.input[::2])
        nengo.Connection(B.output, C.input[1::2])

        A_p = nengo.Probe(A.output, synapse=0.03)
        B_p = nengo.Probe(B.output, synapse=0.03)
        C_p = nengo.Probe(C.output, synapse=0.03)

    with Simulator(model) as sim:
        sim.run(0.4)

    t = sim.trange()

    def plot(sim, a, p, title=""):
        a_ref = np.tile(a, (len(t), 1))
        a_sim = sim.data[p]
        colors = ['b', 'g', 'r', 'c', 'm', 'y']
        for i in range(a_sim.shape[1]):
            plt.plot(t, a_ref[:, i], '--', color=colors[i % 6])
            plt.plot(t, a_sim[:, i], '-', color=colors[i % 6])
        plt.xticks(np.linspace(0, 0.4, 5))
        plt.xlim(right=t[-1])
        plt.title(title)

    plt.subplot(131)
    plot(sim, a, A_p, title="A")
    plt.subplot(132)
    plot(sim, b, B_p, title="B")
    plt.subplot(133)
    plot(sim, c, C_p, title="C")

    a_sim = sim.data[A_p][t > 0.2].mean(axis=0)
    b_sim = sim.data[B_p][t > 0.2].mean(axis=0)
    c_sim = sim.data[C_p][t > 0.2].mean(axis=0)

    rtol, atol = 0.1, 0.05
    assert np.allclose(a, a_sim, atol=atol, rtol=rtol)
    assert np.allclose(b, b_sim, atol=atol, rtol=rtol)
    assert np.allclose(c, c_sim, atol=atol, rtol=rtol)
Ejemplo n.º 14
0
def test_matrix_mul(Simulator, plt, seed):
    N = 100

    Amat = np.asarray([[0.5, -0.5]])
    Bmat = np.asarray([[0.8, 0.3], [0.2, 0.7]])
    radius = 1

    model = nengo.Network(label='Matrix Multiplication', seed=seed)
    with model:
        A = nengo.networks.EnsembleArray(N,
                                         Amat.size,
                                         radius=radius,
                                         label="A")
        B = nengo.networks.EnsembleArray(N,
                                         Bmat.size,
                                         radius=radius,
                                         label="B")

        inputA = nengo.Node(output=Amat.ravel())
        inputB = nengo.Node(output=Bmat.ravel())
        nengo.Connection(inputA, A.input)
        nengo.Connection(inputB, B.input)
        A_p = nengo.Probe(A.output, synapse=0.03)
        B_p = nengo.Probe(B.output, synapse=0.03)

        Cdims = Amat.size * Bmat.shape[1]
        C = nengo.networks.EnsembleArray(N,
                                         Cdims,
                                         ens_dimensions=2,
                                         radius=np.sqrt(2) * radius,
                                         encoders=Choice([[1, 1], [-1, 1],
                                                          [1, -1], [-1, -1]]))

        transformA, transformB = _mmul_transforms(Amat.shape, Bmat.shape,
                                                  C.dimensions)

        nengo.Connection(A.output, C.input, transform=transformA)
        nengo.Connection(B.output, C.input, transform=transformB)

        D = nengo.networks.EnsembleArray(N,
                                         Amat.shape[0] * Bmat.shape[1],
                                         radius=radius)

        transformC = np.zeros((D.dimensions, Bmat.size))
        for i in range(Bmat.size):
            transformC[i // Bmat.shape[0]][i] = 1

        prod = C.add_output("product", lambda x: x[0] * x[1])

        nengo.Connection(prod, D.input, transform=transformC)
        D_p = nengo.Probe(D.output, synapse=0.03)

    with Simulator(model) as sim:
        sim.run(0.3)

    t = sim.trange()
    tmask = (t >= 0.2)

    plt.plot(t, sim.data[D_p])
    for d in np.dot(Amat, Bmat).flatten():
        plt.axhline(d, color='k')

    tols = dict(atol=0.1, rtol=0.01)
    for i in range(Amat.size):
        assert np.allclose(sim.data[A_p][tmask, i], Amat.flat[i], **tols)
    for i in range(Bmat.size):
        assert np.allclose(sim.data[B_p][tmask, i], Bmat.flat[i], **tols)

    Dmat = np.dot(Amat, Bmat)
    for i in range(Amat.shape[0]):
        for k in range(Bmat.shape[1]):
            data_ik = sim.data[D_p][tmask, i * Bmat.shape[1] + k]
            assert np.allclose(data_ik, Dmat[i, k], **tols)
Ejemplo n.º 15
0
def test_time_absolute(Simulator):
    m = nengo.Network()
    with Simulator(m) as sim:
        # modify runtime so that it is a multiple of unroll_simulations
        sim.run(0.01)
    assert np.allclose(sim.trange(), np.arange(sim.dt, 0.01 + sim.dt, sim.dt))
Ejemplo n.º 16
0
def test_neurons_readonly():
    with nengo.Network():
        ens = nengo.Ensemble(10, 1)
        with pytest.raises(ReadonlyError, match="neurons"):
            ens.neurons = "test"
Ejemplo n.º 17
0
# Classical conditioning
# There are three different unconditioned stimuli (US) that are provided
# to the model, one after the other.  Each has a different hardwired
# unconditioned response (UR).
# There is also a conditioned stimulus (CS) provided, and there is a different
# one for each US.  The model attempts to learn to trigger the correct
# conditioned response (CR) in response to the CS.
# After learning, the CR should start to respond before the corresponding UR.
import nengo
import numpy as np
D = 3
N = D * 50
model = nengo.Network(label="My Network")
with model:
    def us_stim(t):
        # cycle through the three US
        t = t % 3
        if 0.9 < t < 1: return [1, 0, 0]
        if 1.9 < t < 2: return [0, 1, 0]
        if 2.9 < t < 3: return [0, 0, 1]
        return [0, 0, 0]
    us_stim = nengo.Node(us_stim)
    def cs_stim(t):
        # cycle through the three CS
        t = t % 3
        if 0.7 < t < 1: return [1, 0, 0]
        if 1.7 < t < 2: return [0, 1, 0]
        if 2.7 < t < 3: return [0, 0, 1]
        return [0, 0, 0]
    cs_stim = nengo.Node(cs_stim)
Ejemplo n.º 18
0
def test_model_attribute_is_deprecated(RefSimulator):
    with warns(DeprecationWarning):
        with RefSimulator(nengo.Network()) as sim:
            pass
        assert sim.model
Ejemplo n.º 19
0
def lmu(theta, input_d, native_nengo=False, dtype="float32"):
    """
    A network containing a single Legendre Memory Unit cell and dense readout.

    See [1]_ for more details.

    Parameters
    ----------
    theta : int
        Time window parameter for LMU.
    input_d : int
        Dimensionality of input signal.
    native_nengo : bool
        If True, build the LMU out of Nengo objects. Otherwise, build the LMU
        directly in TensorFlow, and use a `.TensorNode` to wrap the whole cell.
    dtype : str
        Float dtype to use for internal parameters of LMU when ``native_nengo=False``
        (``native_nengo=True`` will use the dtype of the Simulator).

    Returns
    -------
    net : `nengo.Network`
        Benchmark network

    References
    ----------
    .. [1] Aaron R. Voelker, Ivana Kajić, and Chris Eliasmith. Legendre memory units:
       continuous-time representation in recurrent neural networks.
       In Advances in Neural Information Processing Systems. 2019.
       https://papers.nips.cc/paper/9689-legendre-memory-units-continuous-time-representation-in-recurrent-neural-networks.
    """
    if native_nengo:
        # building LMU cell directly out of Nengo objects

        class LMUCell(nengo.Network):
            """Implements an LMU cell as a Nengo network."""
            def __init__(self, units, order, theta, input_d, **kwargs):
                super().__init__(**kwargs)

                # compute the A and B matrices according to the LMU's mathematical
                # derivation (see the paper for details)
                Q = np.arange(order, dtype=np.float64)
                R = (2 * Q + 1)[:, None] / theta
                j, i = np.meshgrid(Q, Q)

                A = np.where(i < j, -1, (-1.0)**(i - j + 1)) * R
                B = (-1.0)**Q[:, None] * R
                C = np.ones((1, order))
                D = np.zeros((1, ))

                A, B, _, _, _ = cont2discrete((A, B, C, D),
                                              dt=1.0,
                                              method="zoh")

                with self:
                    nengo_dl.configure_settings(trainable=None)

                    # create objects corresponding to the x/u/m/h variables in LMU
                    self.x = nengo.Node(size_in=input_d)
                    self.u = nengo.Node(size_in=1)
                    self.m = nengo.Node(size_in=order)
                    self.h = nengo_dl.TensorNode(tf.nn.tanh,
                                                 shape_in=(units, ),
                                                 pass_time=False)

                    # compute u_t
                    # note that setting synapse=0 (versus synapse=None) adds a
                    # one-timestep delay, so we can think of any connections with
                    # synapse=0 as representing value_{t-1}
                    nengo.Connection(self.x,
                                     self.u,
                                     transform=np.ones((1, input_d)),
                                     synapse=None)
                    nengo.Connection(self.h,
                                     self.u,
                                     transform=np.zeros((1, units)),
                                     synapse=0)
                    nengo.Connection(self.m,
                                     self.u,
                                     transform=np.zeros((1, order)),
                                     synapse=0)

                    # compute m_t
                    # in this implementation we'll make A and B non-trainable, but they
                    # could also be optimized in the same way as the other parameters
                    conn = nengo.Connection(self.m,
                                            self.m,
                                            transform=A,
                                            synapse=0)
                    self.config[conn].trainable = False
                    conn = nengo.Connection(self.u,
                                            self.m,
                                            transform=B,
                                            synapse=None)
                    self.config[conn].trainable = False

                    # compute h_t
                    nengo.Connection(
                        self.x,
                        self.h,
                        transform=np.zeros((units, input_d)),
                        synapse=None,
                    )
                    nengo.Connection(self.h,
                                     self.h,
                                     transform=np.zeros((units, units)),
                                     synapse=0)
                    nengo.Connection(
                        self.m,
                        self.h,
                        transform=nengo_dl.dists.Glorot(distribution="normal"),
                        synapse=None,
                    )

        with nengo.Network(seed=0) as net:
            # remove some unnecessary features to speed up the training
            nengo_dl.configure_settings(
                trainable=None,
                stateful=False,
                keep_history=False,
            )

            # input node
            net.inp = nengo.Node(np.zeros(input_d))

            # lmu cell
            lmu_cell = LMUCell(units=212,
                               order=256,
                               theta=theta,
                               input_d=input_d)
            conn = nengo.Connection(net.inp, lmu_cell.x, synapse=None)
            net.config[conn].trainable = False

            # dense linear readout
            out = nengo.Node(size_in=10)
            nengo.Connection(lmu_cell.h,
                             out,
                             transform=nengo_dl.dists.Glorot(),
                             synapse=None)

            # record output. note that we set keep_history=False above, so this will
            # only record the output on the last timestep (which is all we need
            # on this task)
            net.p = nengo.Probe(out)
    else:
        # putting everything in a tensornode

        # define LMUCell
        class LMUCell(tf.keras.layers.AbstractRNNCell):
            """Implement LMU as Keras RNN cell."""
            def __init__(self, units, order, theta, **kwargs):
                super().__init__(**kwargs)

                self.units = units
                self.order = order
                self.theta = theta

                Q = np.arange(order, dtype=np.float64)
                R = (2 * Q + 1)[:, None] / theta
                j, i = np.meshgrid(Q, Q)

                A = np.where(i < j, -1, (-1.0)**(i - j + 1)) * R
                B = (-1.0)**Q[:, None] * R
                C = np.ones((1, order))
                D = np.zeros((1, ))

                self._A, self._B, _, _, _ = cont2discrete((A, B, C, D),
                                                          dt=1.0,
                                                          method="zoh")

            @property
            def state_size(self):
                """Size of RNN state variables."""
                return self.units, self.order

            @property
            def output_size(self):
                """Size of cell output."""
                return self.units

            def build(self, input_shape):
                """Set up all the weight matrices used inside the cell."""

                super().build(input_shape)

                input_dim = input_shape[-1]
                self.input_encoders = self.add_weight(
                    shape=(input_dim, 1),
                    initializer=tf.initializers.ones(),
                )
                self.hidden_encoders = self.add_weight(
                    shape=(self.units, 1),
                    initializer=tf.initializers.zeros(),
                )
                self.memory_encoders = self.add_weight(
                    shape=(self.order, 1),
                    initializer=tf.initializers.zeros(),
                )
                self.input_kernel = self.add_weight(
                    shape=(input_dim, self.units),
                    initializer=tf.initializers.zeros(),
                )
                self.hidden_kernel = self.add_weight(
                    shape=(self.units, self.units),
                    initializer=tf.initializers.zeros(),
                )
                self.memory_kernel = self.add_weight(
                    shape=(self.order, self.units),
                    initializer=tf.initializers.glorot_normal(),
                )
                self.AT = self.add_weight(
                    shape=(self.order, self.order),
                    initializer=tf.initializers.constant(self._A.T),
                    trainable=False,
                )
                self.BT = self.add_weight(
                    shape=(1, self.order),
                    initializer=tf.initializers.constant(self._B.T),
                    trainable=False,
                )

            def call(self, inputs, states):
                """Compute cell output and state updates."""

                h_prev, m_prev = states

                # compute u_t from the above diagram
                u = (tf.matmul(inputs, self.input_encoders) +
                     tf.matmul(h_prev, self.hidden_encoders) +
                     tf.matmul(m_prev, self.memory_encoders))

                # compute updated memory state vector (m_t in diagram)
                m = tf.matmul(m_prev, self.AT) + tf.matmul(u, self.BT)

                # compute updated hidden state vector (h_t in diagram)
                h = tf.nn.tanh(
                    tf.matmul(inputs, self.input_kernel) +
                    tf.matmul(h_prev, self.hidden_kernel) +
                    tf.matmul(m, self.memory_kernel))

                return h, [h, m]

        with nengo.Network(seed=0) as net:
            # remove some unnecessary features to speed up the training
            # we could set use_loop=False as well here, but leaving it for parity
            # with native_nengo
            nengo_dl.configure_settings(stateful=False)

            net.inp = nengo.Node(np.zeros(theta))

            rnn = nengo_dl.Layer(
                tf.keras.layers.RNN(
                    LMUCell(units=212, order=256, theta=theta, dtype=dtype),
                    return_sequences=False,
                ))(net.inp, shape_in=(theta, input_d))

            out = nengo.Node(size_in=10)
            nengo.Connection(rnn,
                             out,
                             transform=nengo_dl.dists.Glorot(),
                             synapse=None)

            net.p = nengo.Probe(out)

    return net
Ejemplo n.º 20
0
def test_time_absolute(Simulator):
    m = nengo.Network()
    with Simulator(m) as sim:
        sim.run(0.003)
    assert np.allclose(sim.trange(), [0.001, 0.002, 0.003])
Ejemplo n.º 21
0
def test_full_transform():
    N = 30

    with nengo.Network():
        neurons3 = nengo.Ensemble(3, dimensions=1).neurons
        ens1 = nengo.Ensemble(N, dimensions=1)
        ens2 = nengo.Ensemble(N, dimensions=2)
        ens3 = nengo.Ensemble(N, dimensions=3)
        node1 = nengo.Node(output=[0])
        node2 = nengo.Node(output=[0, 0])
        node3 = nengo.Node(output=[0, 0, 0])

        # Pre slice with default transform -> 1x3 transform
        conn = nengo.Connection(node3[2], ens1)
        assert np.all(conn.transform == np.array(1))
        assert np.all(full_transform(conn) == np.array([[0, 0, 1]]))

        # Post slice with 1x1 transform -> 1x2 transform
        conn = nengo.Connection(node2[0], ens1, transform=-2)
        assert np.all(conn.transform == np.array(-2))
        assert np.all(full_transform(conn) == np.array([[-2, 0]]))

        # Post slice with 2x1 tranfsorm -> 3x1 transform
        conn = nengo.Connection(node1, ens3[::2], transform=[[1], [2]])
        assert np.all(conn.transform == np.array([[1], [2]]))
        assert np.all(full_transform(conn) == np.array([[1], [0], [2]]))

        # Both slices with 2x1 transform -> 3x2 transform
        conn = nengo.Connection(ens2[-1], neurons3[1:], transform=[[1], [2]])
        assert np.all(conn.transform == np.array([[1], [2]]))
        assert np.all(full_transform(conn) == np.array(
            [[0, 0], [0, 1], [0, 2]]))

        # Full slices that can be optimized away
        conn = nengo.Connection(ens3[:], ens3, transform=2)
        assert np.all(conn.transform == np.array(2))
        assert np.all(full_transform(conn) == np.array(2))

        # Pre slice with 1x1 transform on 2x2 slices -> 2x3 transform
        conn = nengo.Connection(neurons3[:2], ens2, transform=-1)
        assert np.all(conn.transform == np.array(-1))
        assert np.all(full_transform(conn) == np.array(
            [[-1, 0, 0], [0, -1, 0]]))

        # Both slices with 1x1 transform on 2x2 slices -> 3x3 transform
        conn = nengo.Connection(neurons3[1:], neurons3[::2], transform=-1)
        assert np.all(conn.transform == np.array(-1))
        assert np.all(full_transform(conn) == np.array([[0, -1, 0],
                                                       [0, 0, 0],
                                                       [0, 0, -1]]))

        # Both slices with 2x2 transform -> 3x3 transform
        conn = nengo.Connection(node3[[0, 2]], neurons3[1:],
                                transform=[[1, 2], [3, 4]])
        assert np.all(conn.transform == np.array([[1, 2], [3, 4]]))
        assert np.all(full_transform(conn) == np.array([[0, 0, 0],
                                                       [1, 0, 2],
                                                       [3, 0, 4]]))

        # Both slices with 2x3 transform -> 3x3 transform... IN REVERSE!
        conn = nengo.Connection(neurons3[::-1], neurons3[[2, 0]],
                                transform=[[1, 2, 3], [4, 5, 6]])
        assert np.all(conn.transform == np.array([[1, 2, 3], [4, 5, 6]]))
        assert np.all(full_transform(conn) == np.array([[6, 5, 4],
                                                       [0, 0, 0],
                                                       [3, 2, 1]]))

        # Both slices using lists
        conn = nengo.Connection(neurons3[[1, 0, 2]], neurons3[[2, 1]],
                                transform=[[1, 2, 3], [4, 5, 6]])
        assert np.all(conn.transform == np.array([[1, 2, 3], [4, 5, 6]]))
        assert np.all(full_transform(conn) == np.array([[0, 0, 0],
                                                       [5, 4, 6],
                                                       [2, 1, 3]]))

        # using vector
        conn = nengo.Connection(ens3[[1, 0, 2]], ens3[[2, 0, 1]],
                                transform=[1, 2, 3])
        assert np.all(conn.transform == np.array([1, 2, 3]))
        assert np.all(full_transform(conn) == np.array([[2, 0, 0],
                                                       [0, 0, 3],
                                                       [0, 1, 0]]))

        # using vector 1D
        conn = nengo.Connection(ens1, ens1, transform=[5])
        assert full_transform(conn).ndim != 1
        assert np.all(full_transform(conn) == 5)

        # using vector and lists
        conn = nengo.Connection(ens3[[1, 0, 2]], ens3[[2, 0, 1]],
                                transform=[1, 2, 3])
        assert np.all(conn.transform == np.array([1, 2, 3]))
        assert np.all(full_transform(conn) == np.array([[2, 0, 0],
                                                       [0, 0, 3],
                                                       [0, 1, 0]]))

        # using multi-index lists
        conn = nengo.Connection(ens3, ens2[[0, 1, 0]])
        assert np.all(full_transform(conn) == np.array([[1, 0, 1],
                                                       [0, 1, 0]]))
Ejemplo n.º 22
0
def _test_pes(Simulator,
              nl,
              plt,
              seed,
              pre_neurons=False,
              post_neurons=False,
              weight_solver=False,
              vin=np.array([0.5, -0.5]),
              vout=None,
              n=200,
              function=None,
              transform=np.array(1.),
              rate=1e-3):

    vout = np.array(vin) if vout is None else vout

    model = nengo.Network(seed=seed)
    with model:
        model.config[nengo.Ensemble].neuron_type = nl()

        u = nengo.Node(output=vin)
        v = nengo.Node(output=vout)
        a = nengo.Ensemble(n, dimensions=u.size_out)
        b = nengo.Ensemble(n, dimensions=u.size_out)
        e = nengo.Ensemble(n, dimensions=v.size_out)

        nengo.Connection(u, a)

        bslice = b[:v.size_out] if v.size_out < u.size_out else b
        pre = a.neurons if pre_neurons else a
        post = b.neurons if post_neurons else bslice

        conn = nengo.Connection(pre,
                                post,
                                function=function,
                                transform=transform,
                                learning_rule_type=PES(rate))
        if weight_solver:
            conn.solver = nengo.solvers.LstsqL2(weights=True)

        nengo.Connection(v, e, transform=-1)
        nengo.Connection(bslice, e)
        nengo.Connection(e, conn.learning_rule)

        b_p = nengo.Probe(bslice, synapse=0.03)
        e_p = nengo.Probe(e, synapse=0.03)

        weights_p = nengo.Probe(conn, 'weights', sample_every=0.01)
        corr_p = nengo.Probe(conn.learning_rule, 'correction', synapse=0.03)

    with Simulator(model) as sim:
        sim.run(0.5)
    t = sim.trange()
    weights = sim.data[weights_p]

    plt.subplot(311)
    plt.plot(t, sim.data[b_p])
    plt.ylabel("Post decoded value")
    plt.subplot(312)
    plt.plot(t, sim.data[e_p])
    plt.ylabel("Error decoded value")
    plt.subplot(313)
    plt.plot(t, sim.data[corr_p] / rate)
    plt.ylabel("PES correction")
    plt.xlabel("Time (s)")

    tend = t > 0.4
    assert np.allclose(sim.data[b_p][tend], vout, atol=0.05)
    assert np.allclose(sim.data[e_p][tend], 0, atol=0.05)
    assert np.allclose(sim.data[corr_p][tend] / rate, 0, atol=0.05)
    assert not np.allclose(weights[0], weights[-1], atol=1e-5)
Ejemplo n.º 23
0
def test_voja_encoders(Simulator, PositiveNeuronType, rng, seed, allclose):
    """Tests that voja changes active encoders to the input."""
    n = 200
    learned_vector = np.asarray([0.3, -0.4, 0.6])
    learned_vector /= np.linalg.norm(learned_vector)
    n_change = n // 2  # modify first half of the encoders

    # Set the first half to always fire with random encoders, and the
    # remainder to never fire due to their encoder's dot product with the input
    intercepts = np.asarray([-1] * n_change + [0.99] * (n - n_change))
    rand_encoders = UniformHypersphere(surface=True).sample(
        n_change, len(learned_vector), rng=rng
    )
    encoders = np.append(rand_encoders, [-learned_vector] * (n - n_change), axis=0)

    m = nengo.Network(seed=seed)
    with m:
        m.config[nengo.Ensemble].neuron_type = PositiveNeuronType()
        u = nengo.Node(output=learned_vector)
        x = nengo.Ensemble(
            n,
            dimensions=len(learned_vector),
            intercepts=intercepts,
            encoders=encoders,
            max_rates=nengo.dists.Uniform(300.0, 400.0),
            radius=2.0,
        )  # to test encoder scaling

        conn = nengo.Connection(
            u, x, synapse=None, learning_rule_type=Voja(learning_rate=1e-1)
        )
        p_enc = nengo.Probe(conn.learning_rule, "scaled_encoders")
        p_enc_ens = nengo.Probe(x, "scaled_encoders")

    with Simulator(m) as sim:
        sim.run(1.0)
    t = sim.trange()
    tend = t > 0.5

    # Voja's rule relies on knowing exactly how the encoders were scaled
    # during the build process, because it modifies the scaled_encoders signal
    # proportional to this factor. Therefore, we should check that its
    # assumption actually holds.
    encoder_scale = (sim.data[x].gain / x.radius)[:, np.newaxis]
    assert allclose(sim.data[x].encoders, sim.data[x].scaled_encoders / encoder_scale)

    # Check that the last half kept the same encoders throughout the simulation
    assert allclose(sim.data[p_enc][0, n_change:], sim.data[p_enc][:, n_change:])
    # and that they are also equal to their originally assigned value
    assert allclose(
        sim.data[p_enc][0, n_change:] / encoder_scale[n_change:], -learned_vector
    )

    # Check that the first half converged to the input
    assert allclose(
        sim.data[p_enc][tend, :n_change] / encoder_scale[:n_change],
        learned_vector,
        atol=0.01,
    )
    # Check that encoders probed from ensemble equal encoders probed from Voja
    assert allclose(sim.data[p_enc], sim.data[p_enc_ens])
Ejemplo n.º 24
0
def test_alif_neurons(Simulator, plt, rng):
    """Test that the adaptive LIF dynamic model matches the predicted rates

    Tests an Ensemble of neurons at a single input value
    """
    dt = 0.001
    n = 100
    x = .5
    encoders = np.ones((n, 1))
    max_rates = rng.uniform(low=10, high=200, size=n)
    intercepts = rng.uniform(low=-1, high=1, size=n)

    net = nengo.Network()
    with net:
        ins = nengo.Node(x)
        ens = nengo.Ensemble(n,
                             dimensions=1,
                             encoders=encoders,
                             max_rates=max_rates,
                             intercepts=intercepts,
                             neuron_type=AdaptiveLIF(tau_n=.1, inc_n=.1))
        nengo.Connection(ins,
                         ens.neurons,
                         transform=np.ones((n, 1)),
                         synapse=None)
        spike_probe = nengo.Probe(ens.neurons)
        voltage_probe = nengo.Probe(ens.neurons, 'voltage')
        adaptation_probe = nengo.Probe(ens.neurons, 'adaptation')
        ref_probe = nengo.Probe(ens.neurons, 'refractory_time')

    sim = Simulator(net, dt=dt)

    t_final = 3.0
    t_ss = 1.0  # time to consider neurons at steady state
    sim.run(t_final)

    n_select = rng.choice(n)  # pick a random neuron
    t = sim.trange()
    idx = t < t_ss
    plt.figure(figsize=(10, 6))
    plt.subplot(411)
    plt.plot(t[idx], sim.data[spike_probe][idx, n_select])
    plt.ylabel('spikes')
    plt.subplot(412)
    plt.plot(t[idx], sim.data[voltage_probe][idx, n_select])
    plt.ylabel('voltage')
    plt.subplot(413)
    plt.plot(t[idx], sim.data[adaptation_probe][idx, n_select])
    plt.ylabel('adaptation')
    plt.subplot(414)
    plt.plot(t[idx], sim.data[ref_probe][idx, n_select])
    plt.ylim([-dt, ens.neuron_type.tau_ref + dt])
    plt.xlabel('time')
    plt.ylabel('ref time')

    # check rates against analytic rates
    math_rates = ens.neuron_type.rates(
        x, *ens.neuron_type.gain_bias(max_rates, intercepts))
    idx = t >= t_ss
    spikes = sim.data[spike_probe][idx, :]
    sim_rates = (spikes > 0).sum(0) / (t_final - t_ss)
    logger.debug("ME = %f", (sim_rates - math_rates).mean())
    logger.debug("RMSE = %f",
                 rms(sim_rates - math_rates) / (rms(math_rates) + 1e-20))
    assert np.sum(math_rates > 0) > 0.5 * n, (
        "At least 50% of neurons must fire")
    assert np.allclose(sim_rates, math_rates, atol=1, rtol=0.001)
Ejemplo n.º 25
0
import nengo
import numpy as np

model = nengo.Network(label="NetworkName")

with model:
    pass
Ejemplo n.º 26
0
            # lin_vels[n, s] = 0

        cartesian_vels[s, 0] = np.cos(angles[s - 1]) * lin_vels[s]
        cartesian_vels[s, 1] = np.sin(angles[s - 1]) * lin_vels[s]
        positions[s, 0] = positions[s - 1, 0] + cartesian_vels[s, 0] * dt
        positions[s, 1] = positions[s - 1, 1] + cartesian_vels[s, 1] * dt
        angles[s] = angles[s - 1] + ang_vels[s] * dt
        if angles[s] > np.pi:
            angles[s] -= 2 * np.pi
        elif angles[s] < -np.pi:
            angles[s] += 2 * np.pi

    return positions


model = nengo.Network(seed=args.seed)

dt = 0.001
n_samples = int(args.duration / dt)

# set of positions to visit on a space filling curve, one for each timestep
if args.trajectory_type == 'hilbert':
    # positions = hilbert_2d(-args.limit, args.limit, n_samples, rng, p=8, N=2, normal_std=0)
    positions = hilbert_2d(-args.limit,
                           args.limit,
                           n_samples,
                           rng,
                           p=6,
                           N=2,
                           normal_std=0)
elif args.trajectory_type == 'random':
Ejemplo n.º 27
0
    def initialize_model(self):
        """Generate the Nengo model that will control the arm."""

        config_file = importlib.import_module(
            '_spaun.arms.%s.config' % self.arm_class_name, 'OSCConfig')
        config = config_file.OSCConfig(self.adaptation)
        self.config = config

        # ----------------------------------------------------------------

        model = nengo.Network('OSC', seed=2)
        model.config[nengo.Connection].synapse = nengo.synapses.Lowpass(.001)

        with model:
            # model.config[nengo.Ensemble].neuron_type = nengo.Direct()

            # create input nodes
            arm_node = nengo.Node(self.get_arm_state, size_out=8)

            # def get_target(t):
            #     return model.target
            # model.target = nengo.Node(output=get_target)
            model.target = nengo.Node(output=self.set_target, size_in=2)

            # create neural ensembles
            CB = nengo.Ensemble(**config.CB)
            M1 = nengo.Ensemble(**config.M1)
            M1_mult = nengo.networks.EnsembleArray(**config.M1_mult)

            model.CB = CB
            model.M1 = M1

            # create summation / output ensembles
            # u_relay = nengo.Ensemble(n_neurons=1, dimensions=3,
            #                          neuron_type=nengo.Direct())
            u_relay = nengo.Node(size_in=3)
            model.output = u_relay

            def set_output(t, x):
                self.u = x

            output_node = nengo.Node(output=set_output, size_in=3)

            # connect up arm feedback to Cerebellum
            nengo.Connection(arm_node[:6],
                             CB,
                             function=lambda x: config.CB_scaledown(x))

            def gen_Mqdq(signal, kv):
                """Generate inertia compensation signal, np.dot(Mq,dq)"""
                # scale things back
                signal = config.CB_scaleup(signal)

                q = signal[:3]
                dq = signal[3:6]

                Mq = self.arm.gen_Mq(q=q)
                # return np.dot(Mq, kv * dq).flatten()
                return np.dot(Mq, self.kv * dq).flatten()

            # connect up Cerebellum inertia compensation to summation node
            nengo.Connection(CB,
                             u_relay,
                             function=lambda x: gen_Mqdq(x, self.kv),
                             transform=-1,
                             synapse=None)  # , synapse=.005)

            model.CB2_inhibit = nengo.Node(size_in=1)
            if self.kv2 != 0:
                # Additional KV ensemble so that we can have another KV value
                # (if necessary) to use when moving arm to start of target
                # trajectory (can reduce time between written digits).
                CB2 = nengo.Ensemble(**config.CB)
                nengo.Connection(arm_node[:6],
                                 CB2,
                                 function=lambda x: config.CB_scaledown(x))
                nengo.Connection(CB2,
                                 u_relay,
                                 function=lambda x: gen_Mqdq(x, self.kv2),
                                 transform=-1,
                                 synapse=None)  # , synapse=.005)
                nengo.Connection(model.CB2_inhibit,
                                 CB2.neurons,
                                 transform=([[-config.CB['radius'] * 2.5]] *
                                            config.CB['n_neurons']),
                                 synapse=None)

            # connect up the array for calculating np.dot(JeeTMx, u)
            M1_mult_output = \
                M1_mult.add_output('mult_scaled',
                                   function=config.DP_scaleup_list)

            # connect up control signal input
            for ii in range(0, 6, 2):
                # control is (goal - state) (kp scaling happens on output)
                # connect up goal
                nengo.Connection(model.target[0],
                                 M1_mult.input[ii * 2],
                                 transform=1. / config.u_scaling[0])
                nengo.Connection(model.target[1],
                                 M1_mult.input[ii * 2 + 2],
                                 transform=1. / config.u_scaling[1])
                # connect up state (-1 on transform)
                nengo.Connection(arm_node[6],
                                 M1_mult.input[ii * 2],
                                 transform=-1. / config.u_scaling[0])
                nengo.Connection(arm_node[7],
                                 M1_mult.input[ii * 2 + 2],
                                 transform=-1. / config.u_scaling[1])
            # connect up dot product output (post scaling) to summation node
            block_node = \
                nengo.Node(output=lambda t, x: x * (not self.block_output),
                           size_in=3, size_out=3)
            nengo.Connection(M1_mult_output[::2],
                             block_node,
                             transform=self.kp)
            nengo.Connection(M1_mult_output[1::2],
                             block_node,
                             transform=self.kp)
            nengo.Connection(block_node, u_relay)

            # connect up summation node u_relay to arm
            nengo.Connection(u_relay, output_node, synapse=None)

            # connect up arm feedback to M1
            # pass in sin and cos of x[0], x[0]+x[1], x[0]+x[1]+x[2]
            nengo.Connection(
                arm_node[:3],
                M1[:6],
                function=lambda x: config.M1_scaledown(
                    np.hstack([np.sin(np.cumsum(x)),
                               np.cos(np.cumsum(x))])))

            def gen_JEETMx(signal, use_incorrect_values=False):
                """Generate Jacobian weighted by task-space inertia matrix"""
                # scale things back
                signal = config.M1_scaleup(signal)

                sinq = signal[:3]
                cosq = signal[3:6]

                Mx = self.arm.gen_Mx_sinq_cosq(sinq=sinq, cosq=cosq)
                JEE = self.arm.gen_jacEE_sinq_cosq(
                    sinq=sinq,
                    cosq=cosq,
                    use_incorrect_values=use_incorrect_values)  # noqa
                JEETMx = np.dot(JEE.T, Mx)
                return JEETMx.flatten()

            def scaled_gen_JEETMx(signal, **kwargs):
                return config.DP_scaledown(gen_JEETMx(signal, **kwargs))

            if self.adaptation != 'kinematic':
                # set up regular transform connection
                nengo.Connection(M1[:6],
                                 M1_mult.input[1::2],
                                 function=scaled_gen_JEETMx,
                                 synapse=.005)

            # ------------------ set up null control ------------------
            if self.null_control:

                def gen_null_signal(signal):
                    """Generate the null space control signal"""

                    # calculate our secondary control signal
                    q = config.M1null_scaleup(signal[:3])
                    u_null = (((self.arm.rest_angles - q) + np.pi) %
                              (np.pi * 2) - np.pi)

                    Mq = self.arm.gen_Mq(q=q)
                    JEE = self.arm.gen_jacEE(q=q)
                    Mx = self.arm.gen_Mx(q=q)

                    u_null = np.dot(Mq, self.kp * u_null)

                    # calculate the null space filter
                    Jdyn_inv = np.dot(Mx, np.dot(JEE, np.linalg.inv(Mq)))
                    null_filter = np.eye(3) - np.dot(JEE.T, Jdyn_inv)

                    return np.dot(null_filter, u_null).flatten()

                M1_null = nengo.Ensemble(**config.M1_null)

                nengo.Connection(arm_node[:3],
                                 M1_null,
                                 function=config.M1null_scaledown)
                nengo.Connection(M1_null, block_node, function=gen_null_signal)
            # --------------------------------------------------------
        return model
Ejemplo n.º 28
0
def go(ePExc=None, eIExc=None, ePInh=None, eIInh=None, ePP=None, ePI=None, eIP=None, eII=None, wPExc=None, wIExc=None, wPInh=None, wIInh=None, wPP=None, wPI=None, wIP=None, wII=None, dNMDA=None, dAMPA=None, dGABA=None, f_NMDA=None, f_AMPA=None, f_GABA=None, f_s=None, stim=lambda t: 0, DA=lambda t: 0, n_pre=200, n_neurons=30, t=10, dt=0.001, m=Uniform(30, 40), i=Uniform(-1, 0.8), kFF=-2, kFB=-2, seed=0, stage=0):
    
    wDaInpt = kFF*np.ones((n_pre, 1))
    wDaFdbk = kFB*np.ones((n_neurons, 1))
    with nengo.Network(seed=seed) as model:
        # Stimulus and Nodes
        u = nengo.Node(stim)
        uDA = nengo.Node(DA)
        # Ensembles
        pre = nengo.Ensemble(n_pre, 1, seed=seed, label="pre")
        P = nengo.Ensemble(n_neurons, 1, max_rates=m, intercepts=i, neuron_type=BioNeuron("Pyramidal", DA=DA), seed=seed, label="P")
        I = nengo.Ensemble(n_neurons, 1, max_rates=m, intercepts=i, neuron_type=BioNeuron("Interneuron", DA=DA), seed=seed, label="I")
        supv = nengo.Ensemble(n_neurons, 1, max_rates=m, intercepts=i, neuron_type=nengo.LIF(), seed=seed)
        gate = nengo.Ensemble(n_pre, 1, seed=seed, label="gate")
        buffer = nengo.Ensemble(n_neurons, 1, max_rates=m, intercepts=i, neuron_type=nengo.LIF(), seed=seed)
        fdbk = nengo.Ensemble(n_neurons, 1, max_rates=m, intercepts=i, neuron_type=nengo.LIF(), seed=seed)
        # Connections
        uPre = nengo.Connection(u, pre, synapse=None, seed=seed)
        prePExc = nengo.Connection(pre, P, synapse=AMPA(), seed=seed)
        preIExc = nengo.Connection(pre, I, synapse=AMPA(), seed=seed)
        prePInh = nengo.Connection(pre, P, synapse=GABA(), seed=seed)
        preIInh = nengo.Connection(pre, I, synapse=GABA(), seed=seed)
        nengo.Connection(u, gate, synapse=None)
        nengo.Connection(gate, buffer, synapse=f_AMPA)
        nengo.Connection(buffer, fdbk, synapse=f_NMDA)
        nengo.Connection(fdbk, buffer, synapse=f_AMPA)
        nengo.Connection(uDA, gate.neurons, transform=wDaInpt, function=lambda x: x)
        nengo.Connection(uDA, fdbk.neurons, transform=wDaFdbk, function=lambda x: 1-x)
        # Probes
        p_u = nengo.Probe(u, synapse=None)
        p_DA = nengo.Probe(uDA, synapse=None)
        p_P = nengo.Probe(P.neurons, synapse=None)
        p_I = nengo.Probe(I.neurons, synapse=None)
        p_supv = nengo.Probe(supv.neurons, synapse=None)
        p_supv_x = nengo.Probe(supv, synapse=f_NMDA)
        p_gate = nengo.Probe(gate.neurons, synapse=None)
        p_gate_x = nengo.Probe(gate, synapse=f_AMPA)
        p_buffer = nengo.Probe(buffer.neurons, synapse=None)
        p_buffer_x = nengo.Probe(buffer, synapse=f_NMDA)
        p_fdbk = nengo.Probe(fdbk.neurons, synapse=None)
        p_fdbk_x = nengo.Probe(fdbk, synapse=f_NMDA)
        # Training
        if stage == 1:
            nengo.Connection(u, supv, synapse=f_AMPA, seed=seed)
            node = WNode(prePExc, alpha=1e-4, exc=True)
            nengo.Connection(pre.neurons, node[0:n_pre], synapse=f_AMPA)
            nengo.Connection(P.neurons, node[n_pre:n_pre+n_neurons], synapse=f_s)
            nengo.Connection(supv.neurons, node[n_pre+n_neurons: n_pre+2*n_neurons], synapse=f_s)
            node2 = WNode(preIExc, alpha=1e-5, exc=True)
            nengo.Connection(pre.neurons, node2[0:n_pre], synapse=f_AMPA)
            nengo.Connection(I.neurons, node2[n_pre:n_pre+n_neurons], synapse=f_s)
            nengo.Connection(supv.neurons, node2[n_pre+n_neurons: n_pre+2*n_neurons], synapse=f_s)
            node3 = WNode(prePInh, alpha=1e-4, inh=True)
            nengo.Connection(pre.neurons, node3[0:n_pre], synapse=f_GABA)
            nengo.Connection(P.neurons, node3[n_pre:n_pre+n_neurons], synapse=f_s)
            nengo.Connection(supv.neurons, node3[n_pre+n_neurons: n_pre+2*n_neurons], synapse=f_s)
            node4 = WNode(preIInh, alpha=1e-5, inh=True)
            nengo.Connection(pre.neurons, node4[0:n_pre], synapse=f_GABA)
            nengo.Connection(I.neurons, node4[n_pre:n_pre+n_neurons], synapse=f_s)
            nengo.Connection(supv.neurons, node4[n_pre+n_neurons: n_pre+2*n_neurons], synapse=f_s)
        if stage == 2:
            nengo.Connection(u, supv, synapse=f_AMPA, seed=seed)
        if stage == 3:
            #PTar, ITar have target activities from a gated integrator
            #PDrive, IDrive drive P, I with input from "feedback" pop
#             PTar = nengo.Ensemble(n_neurons, 1, max_rates=m, intercepts=i, neuron_type=BioNeuron("Pyramidal", DA=lambda t: 0), seed=seed, label="PTar")
#             ITar = nengo.Ensemble(n_neurons, 1, max_rates=m, intercepts=i, neuron_type=BioNeuron("Interneuron", DA=lambda t: 0), seed=seed, label="ITar")
#             PDrive = nengo.Ensemble(n_neurons, 1, max_rates=m, intercepts=i, neuron_type=BioNeuron("Pyramidal", DA=DA), seed=seed, label="PDrive")  # DA?
#             IDrive = nengo.Ensemble(n_neurons, 1, max_rates=m, intercepts=i, neuron_type=BioNeuron("Interneuron", DA=DA), seed=seed, label="IDrive")  # DA?
            # gated integrator populations drive PDrive/IDrive and PTar/ITar
#             bufferPPTar = nengo.Connection(buffer, PTar, synapse=f_AMPA, seed=seed)
#             bufferPITar = nengo.Connection(buffer, ITar, synapse=f_AMPA, seed=seed)
#             bufferIPTar = nengo.Connection(buffer, PTar, synapse=f_GABA, seed=seed)
#             bufferIITar = nengo.Connection(buffer, ITar, synapse=f_GABA, seed=seed)
#             fdbkPPDrive = nengo.Connection(fdbk, PDrive, synapse=f_AMPA, seed=seed) 
#             fdbkPIDrive = nengo.Connection(fdbk, IDrive, synapse=f_AMPA, seed=seed)
#             fdbkIPDrive = nengo.Connection(fdbk, PDrive, synapse=f_GABA, seed=seed) 
#             fdbkIIDrive = nengo.Connection(fdbk, IDrive, synapse=f_GABA, seed=seed)
#             # PDrive, IDrive drive P and I with ideal feedback activities given current gate state
#             PDriveP = nengo.Connection(PDrive, P, synapse=NMDA(), solver=NoSolver(dNMDA), seed=seed) 
#             PDriveI = nengo.Connection(PDrive, I, synapse=NMDA(), solver=NoSolver(dNMDA), seed=seed) 
#             IDriveP = nengo.Connection(IDrive, P, synapse=GABA(), solver=NoSolver(dGABA), seed=seed) 
#             IDriveI = nengo.Connection(IDrive, I, synapse=GABA(), solver=NoSolver(dGABA), seed=seed) 
            # P and I are driven with ideal feedback from fdbk
            fdbkPExc = nengo.Connection(fdbk, P, synapse=NMDA(), seed=seed) 
            fdbkIExc = nengo.Connection(fdbk, I, synapse=NMDA(), seed=seed) 
            fdbkPInh = nengo.Connection(fdbk, P, synapse=GABA(), seed=seed) 
            fdbkIInh = nengo.Connection(fdbk, I, synapse=GABA(), seed=seed) 
            # P and I have target activities from buffer
            node = WNode(fdbkPExc, alpha=1e-5, exc=True)
            nengo.Connection(fdbk.neurons, node[0:n_neurons], synapse=f_NMDA)
            nengo.Connection(P.neurons, node[n_neurons:2*n_neurons], synapse=f_s)
            nengo.Connection(buffer.neurons, node[2*n_neurons: 3*n_neurons], synapse=f_s)
            node2 = WNode(fdbkIExc, alpha=1e-6, exc=True)
            nengo.Connection(fdbk.neurons, node2[0:n_neurons], synapse=f_NMDA)
            nengo.Connection(I.neurons, node2[n_neurons:2*n_neurons], synapse=f_s)
            nengo.Connection(buffer.neurons, node2[2*n_neurons: 3*n_neurons], synapse=f_s)
            node3 = WNode(fdbkPInh, alpha=1e-5, inh=True)
            nengo.Connection(fdbk.neurons, node3[0:n_neurons], synapse=f_GABA)
            nengo.Connection(P.neurons, node3[n_neurons:2*n_neurons], synapse=f_s)
            nengo.Connection(buffer.neurons, node3[2*n_neurons: 3*n_neurons], synapse=f_s)
            node4 = WNode(fdbkIInh, alpha=1e-6, inh=True)
            nengo.Connection(fdbk.neurons, node4[0:n_neurons], synapse=f_GABA)
            nengo.Connection(I.neurons, node4[n_neurons:2*n_neurons], synapse=f_s)
            nengo.Connection(buffer.neurons, node4[2*n_neurons: 3*n_neurons], synapse=f_s)
#             p_PDrive = nengo.Probe(PDrive.neurons, synapse=None)
#             p_IDrive = nengo.Probe(IDrive.neurons, synapse=None)
#             p_PTar = nengo.Probe(PTar.neurons, synapse=None)
#             p_ITar = nengo.Probe(ITar.neurons, synapse=None)
        if stage == 4:
            PP = nengo.Connection(P, P, synapse=f_NMDA, seed=seed, solver=NoSolver(dNMDA))
            PI = nengo.Connection(P, I, synapse=f_NMDA, seed=seed, solver=NoSolver(dNMDA))
            IP = nengo.Connection(I, P, synapse=f_GABA, seed=seed, solver=NoSolver(dGABA))
            II = nengo.Connection(I, I, synapse=f_GABA, seed=seed, solver=NoSolver(dGABA))

    with nengo.Simulator(model, seed=seed, progress_bar=False) as sim:
        if stage > 1:
            for pre in range(n_pre):
                for post in range(n_neurons):
                    prePExc.weights[pre, post] = wPExc[pre, post]
                    prePExc.netcons[pre, post].weight[0] = np.abs(wPExc[pre, post])
                    prePExc.netcons[pre, post].syn().e = 0 if wPExc[pre, post] > 0 else -70
                    preIExc.weights[pre, post] = wIExc[pre, post]
                    preIExc.netcons[pre, post].weight[0] = np.abs(wIExc[pre, post])
                    preIExc.netcons[pre, post].syn().e = 0 if wIExc[pre, post] > 0 else -70
                    prePInh.weights[pre, post] = wPInh[pre, post]
                    prePInh.netcons[pre, post].weight[0] = np.abs(wPInh[pre, post])
                    prePInh.netcons[pre, post].syn().e = 0 if wPInh[pre, post] > 0 else -70
                    preIInh.weights[pre, post] = wIInh[pre, post]
                    preIInh.netcons[pre, post].weight[0] = np.abs(wIInh[pre, post])
                    preIInh.netcons[pre, post].syn().e = 0 if wIInh[pre, post] > 0 else -70
#         if stage==3:
#             for pre in range(n_pre):
#                 for post in range(n_neurons):
#                     bufferPPTar.weights[pre, post] = wpPP[pre, post]
#                     bufferPPTar.netcons[pre, post].weight[0] = np.abs(wpPP[pre, post])
#                     bufferPPTar.netcons[pre, post].syn().e = 0 if wpPP[pre, post] > 0 else -70
#                     bufferPITar.weights[pre, post] = wpPI[pre, post]
#                     bufferPITar.netcons[pre, post].weight[0] = np.abs(wpPI[pre, post])
#                     bufferPITar.netcons[pre, post].syn().e = 0 if wpPI[pre, post] > 0 else -70
#                     bufferIPTar.weights[pre, post] = wpIP[pre, post]
#                     bufferIPTar.netcons[pre, post].weight[0] = np.abs(wpIP[pre, post])
#                     bufferIPTar.netcons[pre, post].syn().e = 0 if wpIP[pre, post] > 0 else -70
#                     bufferIITar.weights[pre, post] = wpII[pre, post]
#                     bufferIITar.netcons[pre, post].weight[0] = np.abs(wpII[pre, post])
#                     bufferIITar.netcons[pre, post].syn().e = 0 if wpII[pre, post] > 0 else -70
#                     fdbkPPDrive.weights[pre, post] = wpPP[pre, post]
#                     fdbkPPDrive.netcons[pre, post].weight[0] = np.abs(wpPP[pre, post])
#                     fdbkPPDrive.netcons[pre, post].syn().e = 0 if wpPP[pre, post] > 0 else -70
#                     fdbkPIDrive.weights[pre, post] = wpPI[pre, post]
#                     fdbkPIDrive.netcons[pre, post].weight[0] = np.abs(wpPI[pre, post])
#                     fdbkPIDrive.netcons[pre, post].syn().e = 0 if wpPI[pre, post] > 0 else -70
#                     fdbkIPDrive.weights[pre, post] = wpIP[pre, post]
#                     fdbkIPDrive.netcons[pre, post].weight[0] = np.abs(wpIP[pre, post])
#                     fdbkIPDrive.netcons[pre, post].syn().e = 0 if wpIP[pre, post] > 0 else -70
#                     fdbkIIDrive.weights[pre, post] = wpII[pre, post]
#                     fdbkIIDrive.netcons[pre, post].weight[0] = np.abs(wpII[pre, post])
#                     fdbkIIDrive.netcons[pre, post].syn().e = 0 if wpII[pre, post] > 0 else -70
        if stage==4:
            for pre in range(n_neurons):
                for post in range(n_neurons):
                    PP.weights[pre, post] = wPP[pre, post]
                    PP.netcons[pre, post].weight[0] = np.abs(wPP[pre, post])
                    PP.netcons[pre, post].syn().e = 0 if wPP[pre, post] > 0 else -70
                    PI.weights[pre, post] = wPI[pre, post]
                    PI.netcons[pre, post].weight[0] = np.abs(wPI[pre, post])
                    PI.netcons[pre, post].syn().e = 0 if wPI[pre, post] > 0 else -70
                    IP.weights[pre, post] = wIP[pre, post]
                    IP.netcons[pre, post].weight[0] = np.abs(wIP[pre, post])
                    IP.netcons[pre, post].syn().e = 0 if wIP[pre, post] > 0 else -70
                    II.weights[pre, post] = wII[pre, post]
                    II.netcons[pre, post].weight[0] = np.abs(wII[pre, post])
                    II.netcons[pre, post].syn().e = 0 if wII[pre, post] > 0 else -70 
        if stage==1:
            if np.any(ePExc): prePExc.e = ePExc
            if np.any(eIExc): preIExc.e = eIExc
            if np.any(ePInh): prePInh.e = ePInh
            if np.any(eIInh): preIInh.e = eIInh
        if stage==3:
            if np.any(ePP): fdbkPExc.e = ePP
            if np.any(ePI): fdbkIExc.e = ePI
            if np.any(eIP): fdbkPInh.e = eIP
            if np.any(eII): fdbkIInh.e = eII
        neuron.h.init()
        sim.run(t, progress_bar=True)
        reset_neuron(sim, model) 
        
    if stage == 1:
        ePExc = prePExc.e
        wPExc = prePExc.weights
        eIExc = preIExc.e
        wIExc = preIExc.weights
        ePInh = prePInh.e
        wPInh = prePInh.weights
        eIInh = preIInh.e
        wIInh = preIInh.weights
    if stage == 3:
        ePP = fdbkPExc.e
        wPP = fdbkPExc.weights
        ePI = fdbkIExc.e
        wPI = fdbkIExc.weights
        eIP = fdbkPInh.e
        wIP = fdbkPInh.weights
        eII = fdbkIInh.e
        wII = fdbkIInh.weights
    return dict(
        times=sim.trange(),
        u=sim.data[p_u],
        uDA=sim.data[p_DA],
        P=sim.data[p_P],
        I=sim.data[p_I],
        supv=sim.data[p_supv],
        supv_x=sim.data[p_supv_x],
        gate=sim.data[p_gate],
        gate_x=sim.data[p_gate_x],
        buffer=sim.data[p_buffer],
        buffer_x=sim.data[p_buffer_x],
        fdbk=sim.data[p_fdbk],
        fdbk_x=sim.data[p_fdbk_x],
        ePExc=ePExc,
        wPExc=wPExc,
        eIExc=eIExc,
        wIExc=wIExc,
        ePInh=ePInh,
        wPInh=wPInh,
        eIInh=eIInh,
        wIInh=wIInh,
        ePP=ePP,
        wPP=wPP,
        ePI=ePI,
        wPI=wPI,
        eIP=eIP,
        wIP=wIP,
        eII=eII,
        wII=wII,
#         PDrive=sim.data[p_PDrive] if stage==3 else None,
#         IDrive=sim.data[p_IDrive] if stage==3 else None,
#         PTar=sim.data[p_PTar] if stage==3 else None,
#         ITar=sim.data[p_ITar] if stage==3 else None,
    )
Ejemplo n.º 29
0
    def f(t):
        i = int(round((t - dt) / dt))  # t starts at dt
        return x[(i / i_every) % len(x)]

    return f


# Model constants
n_neurons = 200
dt = 0.001
period = 0.3
T = period * num_items * 2

# Model network
model = nengo.Network()
with model:

    # Create the inputs/outputs
    stim_keys = nengo.Node(output=cycle_array(keys, period, dt))
    stim_values = nengo.Node(output=cycle_array(values, period, dt))
    learning = nengo.Node(output=lambda t: -int(t >= T / 2))
    recall = nengo.Node(size_in=d_value)

    # Create the memory
    memory = nengo.Ensemble(n_neurons,
                            d_key,
                            intercepts=[intercept] * n_neurons)

    # Learn the encoders/keys
    voja = nengo.Voja(post_tau=None, learning_rate=5e-2)
Ejemplo n.º 30
0
def test_slicing(Simulator, nl, plt, seed):
    N = 300

    x = np.array([-1, -0.25, 1])

    s1a = slice(1, None, -1)
    s1b = [2, 0]
    T1 = [[-1, 0.5], [2, 0.25]]
    y1 = np.zeros(3)
    y1[s1b] = np.dot(T1, x[s1a])

    s2a = [0, 2]
    s2b = slice(0, 2)
    T2 = [[-0.5, 0.25], [0.5, 0.75]]
    y2 = np.zeros(3)
    y2[s2b] = np.dot(T2, x[s2a])

    s3a = [2, 0]
    s3b = np.asarray([0, 2])  # test slicing with numpy array
    T3 = [0.5, 0.75]
    y3 = np.zeros(3)
    y3[s3b] = np.dot(np.diag(T3), x[s3a])

    sas = [s1a, s2a, s3a]
    sbs = [s1b, s2b, s3b]
    Ts = [T1, T2, T3]
    ys = [y1, y2, y3]

    weight_solver = nengo.solvers.LstsqL2(weights=True)

    with nengo.Network(seed=seed) as m:
        m.config[nengo.Ensemble].neuron_type = nl()

        u = nengo.Node(output=x)
        a = nengo.Ensemble(N, dimensions=3, radius=1.7)
        nengo.Connection(u, a)

        probes = []
        weight_probes = []
        for sa, sb, T in zip(sas, sbs, Ts):
            b = nengo.Ensemble(N, dimensions=3, radius=1.7)
            nengo.Connection(a[sa], b[sb], transform=T)
            probes.append(nengo.Probe(b, synapse=0.03))

            # also test on weight solver
            b = nengo.Ensemble(N, dimensions=3, radius=1.7)
            nengo.Connection(a[sa], b[sb], transform=T, solver=weight_solver)
            weight_probes.append(nengo.Probe(b, synapse=0.03))

    with Simulator(m) as sim:
        sim.run(0.2)
    t = sim.trange()

    for i, [y, p] in enumerate(zip(ys, probes)):
        plt.subplot(len(ys), 1, i + 1)
        plt.plot(t, np.tile(y, (len(t), 1)), "--")
        plt.plot(t, sim.data[p])

    atol = 0.01 if nl is nengo.Direct else 0.1
    for i, [y, p, wp] in enumerate(zip(ys, probes, weight_probes)):
        assert np.allclose(y, sim.data[p][-20:], atol=atol), "Failed %d" % i
        assert np.allclose(y, sim.data[wp][-20:], atol=atol), "Weights %d" % i