Ejemplo n.º 1
0
def FeedforwardDeriv(n_neurons,
                     dimensions,
                     tau_fast=0.005,
                     tau_slow=0.1,
                     net=None):
    if net is None:
        net = nengo.Network(label="Derivative (two feedforward connections)")

    assert tau_slow > tau_fast, ("tau_fast (%s) must be > tau_slow (%s)" %
                                 (tau_fast, tau_slow))

    with net:
        net.ea_in = EnsembleArray(n_neurons, n_ensembles=dimensions)
        net.ea_out = EnsembleArray(n_neurons, n_ensembles=dimensions)
        nengo.Connection(net.ea_in.output,
                         net.ea_out.input,
                         synapse=tau_fast,
                         transform=1. / tau_slow)
        nengo.Connection(net.ea_in.output,
                         net.ea_out.input,
                         synapse=tau_slow,
                         transform=-1. / tau_slow)
        net.input = net.ea_in.input
        net.output = net.ea_out.output
    return net
Ejemplo n.º 2
0
    def __init__(self, n_neurons, dimensions, fdbk_synapse=0.1,
                 conn_synapse=0.005, fdbk_scale=1.0, gate_gain=2.0,
                 reset_gain=3, **ens_args):

        self.input = nengo.Node(size_in=dimensions)
        self.output = nengo.Node(size_in=dimensions)

        # gate control signal (if gate==0, update stored value, otherwise
        # retain stored value)
        self.gate = nengo.Node(size_in=1)

        # integrator to store value
        self.mem = EnsembleArray(n_neurons, dimensions, label="mem",
                                 **ens_args)

        # ensemble to gate feedback
        self.fdbk = EnsembleArray(n_neurons, dimensions, label="fdbk",
                                  **ens_args)

        # ensemble to gate input
        self.in_gate = EnsembleArray(n_neurons, dimensions, label="in_gate",
                                     **ens_args)

        # calculate gating control signal
        self.ctrl = nengo.Ensemble(n_neurons, 1, label="ctrl")

        # Connection from mem to fdbk, and from fdbk to mem
        nengo.Connection(self.mem.output, self.fdbk.input,
                         synapse=fdbk_synapse - conn_synapse)
        nengo.Connection(self.fdbk.output, self.mem.input,
                         transform=np.eye(dimensions) * fdbk_scale,
                         synapse=conn_synapse)

        # Connection from input to in_gate, and from in_gate to mem
        nengo.Connection(self.input, self.in_gate.input, synapse=None)
        nengo.Connection(self.in_gate.output, self.mem.input,
                         synapse=conn_synapse)

        # Connection from gate to ctrl
        nengo.Connection(self.gate, self.ctrl, synapse=None)

        # Connection from ctrl to fdbk and in_gate
        for e in self.fdbk.ensembles:
            nengo.Connection(self.ctrl, e.neurons,
                             function=lambda x: [1 - x[0]],
                             transform=[[-gate_gain]] * e.n_neurons)
        for e in self.in_gate.ensembles:
            nengo.Connection(self.ctrl, e.neurons,
                             transform=[[-gate_gain]] * e.n_neurons)

        # Connection from mem to output
        nengo.Connection(self.mem.output, self.output, synapse=None)

        # reset input (if reset=1, remove all values stored, and set values
        # to 0)
        self.reset = nengo.Node(size_in=1)
        for e in self.mem.ensembles:
            nengo.Connection(self.reset, e.neurons,
                             transform=[[-reset_gain]] * e.n_neurons)
Ejemplo n.º 3
0
    def build_unbind(self, model):
        A_input_func = make_func(self, "A_input_vector")
        B_input_func = make_func(self, "B_input_vector")

        neurons_per_dim = self.neurons_per_dim
        radius = self.radius
        synapse = self.synapse
        dimension = self.dimension

        with model:
            A_input = nengo.Node(output=A_input_func, size_out=dimension)
            B_input = nengo.Node(output=B_input_func, size_out=dimension)

            A = EnsembleArray(n_neurons=neurons_per_dim,
                              n_ensembles=dimension,
                              label="A",
                              radius=radius)

            B = EnsembleArray(n_neurons=neurons_per_dim,
                              n_ensembles=dimension,
                              label="B",
                              radius=radius)

            cconv = CircularConvolution(n_neurons=int(2 * neurons_per_dim),
                                        dimensions=dimension,
                                        invert_b=True)

            D = EnsembleArray(n_neurons=neurons_per_dim,
                              n_ensembles=dimension,
                              label="D",
                              radius=radius)

            A_output = A.output
            B_output = B.output
            D_output = D.output
            cconv_output = cconv.output

            nengo.Connection(A_input, A.input)
            nengo.Connection(B_input, B.input)

            nengo.Connection(A_output, cconv.A, synapse=synapse)
            nengo.Connection(B_output, cconv.B, synapse=synapse)
            nengo.Connection(cconv_output, D.input, synapse=synapse)

            assoc_synapse = self.assoc_params.synapse

            self.D_probe = nengo.Probe(D_output,
                                       'output',
                                       synapse=assoc_synapse)

            self.input_probe = nengo.Probe(A_output, 'output', synapse=synapse)

            self.D_output = D_output

            self.A = A
            self.B = B
            self.cconv = cconv
            self.D = D
Ejemplo n.º 4
0
    def __init__(self,
                 n_neurons,
                 dimensions,
                 feedback=1.0,
                 difference_gain=1.0,
                 recurrent_synapse=0.1,
                 difference_synapse=None,
                 **kwargs):

        if 'net' in kwargs:
            raise ObsoleteError("The 'net' argument is no longer supported.")
        kwargs.setdefault('label', "Input gated memory")
        super(InputGatedMemory, self).__init__(**kwargs)

        if difference_synapse is None:
            difference_synapse = recurrent_synapse

        n_total_neurons = n_neurons * dimensions

        with self:
            # integrator to store value
            self.mem = EnsembleArray(n_neurons, dimensions, label="mem")
            nengo.Connection(self.mem.output,
                             self.mem.input,
                             transform=feedback,
                             synapse=recurrent_synapse)

            # calculate difference between stored value and input
            self.diff = EnsembleArray(n_neurons, dimensions, label="diff")
            nengo.Connection(self.mem.output, self.diff.input, transform=-1)

            # feed difference into integrator
            nengo.Connection(self.diff.output,
                             self.mem.input,
                             transform=difference_gain,
                             synapse=difference_synapse)

            # gate difference (if gate==0, update stored value,
            # otherwise retain stored value)
            self.gate = nengo.Node(size_in=1)
            self.diff.add_neuron_input()
            nengo.Connection(self.gate,
                             self.diff.neuron_input,
                             transform=np.ones((n_total_neurons, 1)) * -10,
                             synapse=None)

            # reset input (if reset=1, remove all values, and set to 0)
            self.reset = nengo.Node(size_in=1)
            nengo.Connection(self.reset,
                             self.mem.add_neuron_input(),
                             transform=np.ones((n_total_neurons, 1)) * -3,
                             synapse=None)

        self.input = self.diff.input
        self.output = self.mem.output
Ejemplo n.º 5
0
    def __init__(self, num_func_points, func_value_range=1.0,
                 func_output_dimensions=1, n_neurons=500, label=None,
                 seed=None, add_to_container=None):
        super(DifferenceFunctionEvaluator, self).__init__(label, seed,
                                                          add_to_container)

        intercept_interval = 2.0 / (num_func_points - 1)

        self.func_output_dimensions = func_output_dimensions
        self.n_neurons = n_neurons

        with self:
            bias_node = nengo.Node(1)

            self.func_input = nengo.Node(size_in=1)
            self.func_output = nengo.Node(size_in=func_output_dimensions)

            self.diff_func_pts = []
            self.diff_func_outputs = []
            self.func_gate_eas = []

            func_domain_inhib_ea = EA(25, num_func_points - 1,
                                      encoders=Choice([[-1]]),
                                      intercepts=Uniform(0,
                                                         intercept_interval))

            # Generate inhibit signal based of the function domain input value
            func_domain_inhib_ea.add_output('const', lambda x: 1)
            inhib_trfm = np.array([np.linspace(-1, 1, num_func_points)[:-1] +
                                   intercept_interval / 2.0])
            nengo.Connection(bias_node, func_domain_inhib_ea.input,
                             transform=-1 - inhib_trfm.T)
            nengo.Connection(self.func_input,
                             func_domain_inhib_ea.input,
                             transform=2 * np.ones((num_func_points - 1, 1)),
                             synapse=None)

            for n in range(func_output_dimensions):
                func_gate_ea = EA(n_neurons, num_func_points,
                                  radius=func_value_range)

                for i, gate in enumerate(func_gate_ea.all_ensembles[1:]):
                    nengo.Connection(func_domain_inhib_ea.const[i],
                                     gate.neurons,
                                     transform=[[-5]] * n_neurons)

                self.func_gate_eas.append(func_gate_ea)
                self.diff_func_pts.append(func_gate_ea.input)
                self.diff_func_outputs.append(func_gate_ea.output)

                nengo.Connection(func_gate_ea.output, self.func_output[n],
                                 transform=np.ones((1, num_func_points)),
                                 synapse=None)
    def build_output(self, model):

        assoc_probes = OrderedDict()
        threshold_probes = OrderedDict()
        assoc_spike_probes = OrderedDict()

        synapse = self.assoc_params.synapse

        with model:
            input = nengo.Node(output=self.assoc_output_func)
            output = EnsembleArray(n_neurons=self.neurons_per_dim,
                                   n_ensembles=self.dimension,
                                   label="output",
                                   radius=self.radius)

            nengo.Connection(input, output.input, synapse=synapse)
            self.output_probe = nengo.Probe(output.output,
                                            'output',
                                            synapse=0.02)

            for k in self.probe_keys:
                n = nengo.Node(output=self.assoc_memory.probe_func(k))
                probe = nengo.Probe(n, synapse=synapse)

                threshold_probes[k] = probe

                node = nengo.Node(output=self.assoc_memory.spike_func(k))
                assoc_spike_probes[k] = nengo.Probe(node, synapse=None)

        self.assoc_probes = assoc_probes
        self.threshold_probes = threshold_probes
        self.assoc_spike_probes = assoc_spike_probes
Ejemplo n.º 7
0
 def make_ens_array(self, **args):
     ens_args = dict(args)
     ens_args['n_neurons'] = args.get('n_neurons', self.n_neurons_ens)
     n_ensembles = ens_args.pop('dimensions', vocab.sp_dim)
     ens_args['n_ensembles'] = args.get('n_ensembles', n_ensembles)
     ens_args['radius'] = args.get('radius', self.get_optimal_sp_radius())
     return EnsembleArray(**ens_args)
Ejemplo n.º 8
0
def InputGatedMemory(n_neurons, dimensions, feedback=1.0,
                     difference_gain=1.0, recurrent_synapse=0.1,
                     difference_synapse=None, net=None):
    """Stores a given vector in memory, with input controlled by a gate."""
    if net is None:
        net = nengo.Network(label="Input Gated Memory")

    if difference_synapse is None:
        difference_synapse = recurrent_synapse

    n_total_neurons = n_neurons * dimensions

    with net:
        # integrator to store value
        net.mem = EnsembleArray(n_neurons, dimensions, label="mem")
        nengo.Connection(net.mem.output, net.mem.input,
                         transform=feedback,
                         synapse=recurrent_synapse)

        # calculate difference between stored value and input
        net.diff = EnsembleArray(n_neurons, dimensions, label="diff")
        nengo.Connection(net.mem.output, net.diff.input, transform=-1)

        # feed difference into integrator
        nengo.Connection(net.diff.output, net.mem.input,
                         transform=difference_gain,
                         synapse=difference_synapse)

        # gate difference (if gate==0, update stored value,
        # otherwise retain stored value)
        net.gate = nengo.Node(size_in=1)
        net.diff.add_neuron_input()
        nengo.Connection(net.gate, net.diff.neuron_input,
                         transform=np.ones((n_total_neurons, 1)) * -10,
                         synapse=None)

        # reset input (if reset=1, remove all values, and set to 0)
        net.reset = nengo.Node(size_in=1)
        nengo.Connection(net.reset, net.mem.add_neuron_input(),
                         transform=np.ones((n_total_neurons, 1)) * -3,
                         synapse=None)

    net.input = net.diff.input
    net.output = net.mem.output

    return net
Ejemplo n.º 9
0
def delayed_synapse():
    a = 0.1  # desired delay
    b = 0.01  # synapse delay
    tau = 0.01  # recurrent tau
    hz = 15  # input frequency
    t = 1.0  # simulation time
    dt = 0.00001  # simulation timestep
    order = 6  # order of pade approximation
    tau_probe = 0.02

    dexp_synapse = DoubleExp(tau, tau / 5)

    sys_lambert = lambert_delay(a, b, tau, order - 1, order)
    synapse = (cont2discrete(Lowpass(tau), dt=dt) *
               DiscreteDelay(int(b / dt)))

    n_neurons = 2000
    neuron_type = PerfectLIF()

    A, B, C, D = sys_lambert.observable.transform(5*np.eye(order)).ss

    sys_normal = PadeDelay(a, order)
    assert len(sys_normal) == order

    with Network(seed=0) as model:
        stim = Node(output=WhiteSignal(t, high=hz, y0=0))

        x = EnsembleArray(n_neurons / order, len(A), neuron_type=neuron_type)
        output = Node(size_in=1)

        Connection(x.output, x.input, transform=A, synapse=synapse)
        Connection(stim, x.input, transform=B, synapse=synapse)
        Connection(x.output, output, transform=C, synapse=None)
        Connection(stim, output, transform=D, synapse=None)

        lowpass_delay = LinearNetwork(
            sys_normal, n_neurons_per_ensemble=n_neurons / order,
            synapse=tau, input_synapse=tau,
            dt=None, neuron_type=neuron_type, radii=1.0)
        Connection(stim, lowpass_delay.input, synapse=None)

        dexp_delay = LinearNetwork(
            sys_normal, n_neurons_per_ensemble=n_neurons / order,
            synapse=dexp_synapse, input_synapse=dexp_synapse,
            dt=None, neuron_type=neuron_type, radii=1.0)
        Connection(stim, dexp_delay.input, synapse=None)

        p_stim = Probe(stim, synapse=tau_probe)
        p_output_delayed = Probe(output, synapse=tau_probe)
        p_output_lowpass = Probe(lowpass_delay.output, synapse=tau_probe)
        p_output_dexp = Probe(dexp_delay.output, synapse=tau_probe)

    with Simulator(model, dt=dt, seed=0) as sim:
        sim.run(t)

    return (a, dt, sim.trange(), sim.data[p_stim],
            sim.data[p_output_delayed], sim.data[p_output_lowpass],
            sim.data[p_output_dexp])
Ejemplo n.º 10
0
 def make_ens_array(self, **args):
     ens_args = dict(args)
     ens_args['radius'] = args.get('radius', 1)
     ens_args['ens_dimensions'] = args.get('ens_dimensions',
                                           self.ens_array_subdim)
     n_ensembles = (ens_args.pop('dimensions', vocab.sp_dim) //
                    ens_args['ens_dimensions'])
     ens_args['n_neurons'] = (args.get('n_neurons', self.n_neurons_ens) *
                              ens_args['ens_dimensions'])
     ens_args['n_ensembles'] = args.get('n_ensembles', n_ensembles)
     return EnsembleArray(**ens_args)
Ejemplo n.º 11
0
def IntermediateDeriv(n_neurons, dimensions, tau=0.1, net=None):
    if net is None:
        net = nengo.Network(label="Derivative (intermediate ensemble)")

    with net:
        net.ea_in = EnsembleArray(n_neurons, n_ensembles=dimensions)
        net.ea_interm = EnsembleArray(n_neurons, n_ensembles=dimensions)
        net.ea_out = EnsembleArray(n_neurons, n_ensembles=dimensions)
        nengo.Connection(net.ea_in.output, net.ea_interm.input, synapse=tau)
        nengo.Connection(net.ea_in.output,
                         net.ea_out.input,
                         synapse=tau,
                         transform=1. / tau)
        nengo.Connection(net.ea_interm.output,
                         net.ea_out.input,
                         synapse=tau,
                         transform=-1. / tau)
        net.input = net.ea_in.input
        net.output = net.ea_out.output
    return net
Ejemplo n.º 12
0
def Product(n_neurons, dimensions, input_magnitude=1):
    encoders = nengo.dists.Choice([[1, 1], [1, -1], [-1, 1], [-1, -1]])

    product = EnsembleArray(n_neurons,
                            n_ensembles=dimensions,
                            ens_dimensions=2,
                            encoders=encoders,
                            radius=input_magnitude * np.sqrt(2))
    with product:
        product.A = nengo.Node(size_in=dimensions, label="A")
        product.B = nengo.Node(size_in=dimensions, label="B")
        nengo.Connection(product.A, product.input[::2], synapse=None)
        nengo.Connection(product.B, product.input[1::2], synapse=None)

        # Remove default output
        for conn in list(product.connections):
            if conn.post is product.output:
                product.connections.remove(conn)
        product.nodes.remove(product.output)

        # Add product output
        product.output = product.add_output('product',
                                            lambda x: x[0] * x[1],
                                            synapse=None)

    return product
Ejemplo n.º 13
0
    def __init__(self, n_neurons, dimensions, mem_synapse=0.1, fdbk_scale=1.0,
                 difference_gain=1.0, gate_gain=10, reset_gain=3,
                 **mem_args):

        self.input = nengo.Node(size_in=dimensions)
        self.output = nengo.Node(size_in=dimensions)

        # integrator to store value
        self.mem = EnsembleArray(n_neurons, dimensions, label="mem",
                                 **mem_args)
        nengo.Connection(self.mem.output, self.mem.input, synapse=mem_synapse,
                         transform=np.eye(dimensions) * fdbk_scale)

        # calculate difference between stored value and input
        self.diff = EnsembleArray(n_neurons, dimensions, label="diff")
        nengo.Connection(self.input, self.diff.input, synapse=None)
        nengo.Connection(self.mem.output, self.diff.input,
                         transform=np.eye(dimensions) * -1)

        # feed difference into integrator
        nengo.Connection(self.diff.output, self.mem.input,
                         transform=np.eye(dimensions) * difference_gain,
                         synapse=mem_synapse)

        # gate difference (if gate==0, update stored value,
        # otherwise retain stored value)
        self.gate = nengo.Node(size_in=1)
        for e in self.diff.ensembles:
            nengo.Connection(self.gate, e.neurons,
                             transform=[[-gate_gain]] * e.n_neurons)

        # reset input (if reset=1, remove all values stored, and set values
        # to 0)
        self.reset_node = nengo.Node(size_in=1)
        for e in self.mem.ensembles:
            nengo.Connection(self.reset_node, e.neurons,
                             transform=[[-reset_gain]] * e.n_neurons)

        nengo.Connection(self.mem.output, self.output, synapse=None)
Ejemplo n.º 14
0
    def build_output(self, model):
        with model:

            self.output = EnsembleArray(n_neurons=self.neurons_per_dim,
                                        n_ensembles=self.dimension,
                                        label="output",
                                        radius=self.radius)

            output_output = self.output.output

            self.output_probe = nengo.Probe(output_output,
                                            'output',
                                            synapse=0.02)
Ejemplo n.º 15
0
def Cepstra(n_neurons, n_freqs, n_cepstra, net=None):
    if net is None:
        net = nengo.Network("Cepstra")

    with net:
        net.input = nengo.Node(size_in=n_freqs, label="input")
        net.out_ea = EnsembleArray(n_neurons, n_ensembles=n_cepstra)
        nengo.Connection(net.input,
                         net.out_ea.input,
                         synapse=None,
                         transform=idct(n_freqs, n_cepstra))
        net.output = net.out_ea.output
    return net
Ejemplo n.º 16
0
def Product(n_neurons, dimensions, input_magnitude=1):
    encoders = nengo.dists.Choice([[1, 1], [1, -1], [-1, 1], [-1, -1]])

    product = EnsembleArray(n_neurons, n_ensembles=dimensions,
                            ens_dimensions=2,
                            encoders=encoders,
                            radius=input_magnitude * np.sqrt(2))
    with product:
        product.A = nengo.Node(size_in=dimensions, label="A")
        product.B = nengo.Node(size_in=dimensions, label="B")
        nengo.Connection(product.A, product.input[::2], synapse=None)
        nengo.Connection(product.B, product.input[1::2], synapse=None)

        # Remove default output
        for conn in list(product.connections):
            if conn.post is product.output:
                product.connections.remove(conn)
        product.nodes.remove(product.output)

        # Add product output
        product.output = product.add_output(
            'product', lambda x: x[0] * x[1], synapse=None)

    return product
Ejemplo n.º 17
0
    def build_syllables(self, net):
        assert len(self.syllables) > 0, "No syllables added"

        # Make a readout for the production info coming from the DMPs
        intercepts = Exponential(0.15, self.production_info.threshold, 1)
        net.production_info = EnsembleArray(self.production_info.n_per_d,
                                            n_ensembles=48,
                                            encoders=Choice([[1]]),
                                            intercepts=intercepts,
                                            radius=1.1)

        net.syllables = []
        dt = self.trial.dt
        for syllable in self.syllables:
            forcing_f, gesture_ix = traj2func(syllable.trajectory, dt=dt)
            forcing_f.__name__ = syllable.label
            dmp = RhythmicDMP(forcing_f=forcing_f,
                              freq=syllable.freq,
                              **self.syllable.kwargs())
            nengo.Connection(dmp.output, net.production_info.input[gesture_ix])
            net.syllables.append(dmp)
Ejemplo n.º 18
0
def VectorNormalize(min_mag, max_mag, dimensions, radius_scale=1.0,
                    n_neurons_norm=50, n_neurons_prod=150,
                    norm_error_per_dimension=0.0003,
                    subtract_scale=1, net=None):
    if net is None:
        net = nengo.Network(label="Vector Normalize")

    max_radius_scale = max_mag * radius_scale

    norm_sub_in_low = min_mag ** 2
    norm_sub_in_high = max_mag ** 2
    norm_sub_in_trfm = scale_trfm(norm_sub_in_low, norm_sub_in_high)
    norm_sub_in_bias = scale_bias(norm_sub_in_low, norm_sub_in_high)

    norm_sub_in_bias_offset = -(norm_error_per_dimension * dimensions)

    prod_a_trfm = 1.0 / max_radius_scale
    prod_b_low = 1.0 - 1.0 / min_mag
    prod_b_high = 1.0 - 1.0 / max_mag
    prod_b_trfm = scale_trfm(prod_b_low, prod_b_high)
    prod_b_bias = scale_bias(prod_b_low, prod_b_high)

    def norm_sub_func(x, nsit=norm_sub_in_trfm, nsib=norm_sub_in_bias,
                      pbt=prod_b_trfm, pbb=prod_b_bias):
        return norm_subtract_func(x, nsit, nsib, pbt, pbb)

    def prod_func(x, y, pat=prod_a_trfm, pbt=prod_b_trfm, pbb=prod_b_bias):
        return prod_out_func(x, y, pat, 0, pbt, pbb)

    with net:
        net.input = nengo.Node(size_in=dimensions)
        net.output = nengo.Node(size_in=dimensions)
        bias_node = nengo.Node(1)

        # Ensemble array to represent input vector and to compute vector
        # norm
        norm_array = EnsembleArray(n_neurons_norm, dimensions,
                                   radius=max_radius_scale)
        norm_array.add_output('squared', lambda x: x ** 2)
        nengo.Connection(net.input, norm_array.input)

        # Ensemble to calculate amount of magnitude to be subtracted
        # i.e. (1 - 1 / np.linalg.norm(input))
        norm_subtract_ens = nengo.Ensemble(n_neurons_norm, 1,
                                           n_eval_points=5000)
        nengo.Connection(norm_array.squared, norm_subtract_ens,
                         transform=np.ones((1, dimensions)) * norm_sub_in_trfm)
        nengo.Connection(bias_node, norm_subtract_ens,
                         transform=norm_sub_in_bias + norm_sub_in_bias_offset)

        # Product network to compute product between input vector and
        # magnitude to be subtracted
        prod_array = Product(n_neurons_prod, dimensions)
        for e in prod_array.product.ensembles:
            e.n_eval_points = 5000
        prod_array.product.add_output('prod2', lambda x: prod_func(x[0], x[1]))
        prod_array.prod2 = prod_array.product.prod2

        nengo.Connection(norm_array.output, prod_array.A,
                         transform=prod_a_trfm)
        nengo.Connection(norm_subtract_ens, prod_array.B,
                         function=norm_sub_func,
                         transform=np.ones((dimensions, 1)))

        # Output connections
        nengo.Connection(norm_array.output, net.output)
        nengo.Connection(prod_array.prod2, net.output,
                         transform=-subtract_scale)
    return net
Ejemplo n.º 19
0
def InputGatedMemory(n_neurons, dimensions, feedback=1.0,
                     difference_gain=1.0, recurrent_synapse=0.1,
                     difference_synapse=None, net=None):
    """Stores a given vector in memory, with input controlled by a gate.

    Parameters
    ----------
    n_neurons : int
        Number of neurons per dimension in the vector.
    dimensions : int
        Dimensionality of the vector.

    feedback : float, optional (Default: 1.0)
        Strength of the recurrent connection from the memory to itself.
    difference_gain : float, optional (Default: 1.0)
        Strength of the connection from the difference ensembles to the
        memory ensembles.
    recurrent_synapse : float, optional (Default: 0.1)

    difference_synapse : Synapse (Default: None)
        If None, ...
    net : Network, optional (Default: None)
        A network in which the network components will be built.
        This is typically used to provide a custom set of Nengo object
        defaults through modifying ``net.config``.

    Returns
    -------
    net : Network
        The newly built memory network, or the provided ``net``.

    Attributes
    ----------
    net.diff : EnsembleArray
        Represents the difference between the desired vector and
        the current vector represented by ``mem``.
    net.gate : Node
        With input of 0, the network is not gated, and ``mem`` will be updated
        to minimize ``diff``. With input greater than 0, the network will be
        increasingly gated such that ``mem`` will retain its current value,
        and ``diff`` will be inhibited.
    net.input : Node
        The desired vector.
    net.mem : EnsembleArray
        Integrative population that stores the vector.
    net.output : Node
        The vector currently represented by ``mem``.
    net.reset : Node
        With positive input, the ``mem`` population will be inhibited,
        effectively wiping out the vector currently being remembered.

    """
    if net is None:
        net = nengo.Network(label="Input Gated Memory")

    if difference_synapse is None:
        difference_synapse = recurrent_synapse

    n_total_neurons = n_neurons * dimensions

    with net:
        # integrator to store value
        net.mem = EnsembleArray(n_neurons, dimensions, label="mem")
        nengo.Connection(net.mem.output, net.mem.input,
                         transform=feedback,
                         synapse=recurrent_synapse)

        # calculate difference between stored value and input
        net.diff = EnsembleArray(n_neurons, dimensions, label="diff")
        nengo.Connection(net.mem.output, net.diff.input, transform=-1)

        # feed difference into integrator
        nengo.Connection(net.diff.output, net.mem.input,
                         transform=difference_gain,
                         synapse=difference_synapse)

        # gate difference (if gate==0, update stored value,
        # otherwise retain stored value)
        net.gate = nengo.Node(size_in=1)
        net.diff.add_neuron_input()
        nengo.Connection(net.gate, net.diff.neuron_input,
                         transform=np.ones((n_total_neurons, 1)) * -10,
                         synapse=None)

        # reset input (if reset=1, remove all values, and set to 0)
        net.reset = nengo.Node(size_in=1)
        nengo.Connection(net.reset, net.mem.add_neuron_input(),
                         transform=np.ones((n_total_neurons, 1)) * -3,
                         synapse=None)

    net.input = net.diff.input
    net.output = net.mem.output

    return net
Ejemplo n.º 20
0
def Visual_Transform_Network(vis_vocab,
                             vis_am_threshold,
                             vis_am_input_scale,
                             copy_draw_trfms_x,
                             copy_draw_trfms_y,
                             mtr_vocab,
                             mtr_sp_scale_factor,
                             net=None,
                             net_label='VIS TRFM'):
    if net is None:
        net = nengo.Network(label=net_label)

    with net:
        # ----------------------- Inputs and Outputs --------------------------
        net.input = nengo.Node(size_in=vis_vocab.dimensions)
        net.output = nengo.Node(size_in=mtr_vocab.dimensions)

        # ------------------ Digit (Answer) Classification --------------------
        # Takes digit semantic pointer from visual wm and identifies
        # appropriate digit classification
        # - Generates: Digit class (I - 1) used for inhibition.
        # -            Default output vectors inhibits all.
        # Note: threshold is halved to compenstate (sort of) for drift in the
        #       visual WM system
        digit_classify = \
            cfg.make_assoc_mem(vis_vocab.vectors[:len(mtr_vocab.keys), :],
                               np.ones((len(mtr_vocab.keys),
                                        len(mtr_vocab.keys))) -
                               np.eye(len(mtr_vocab.keys)),
                               threshold=vis_am_threshold,
                               label='DIGIT CLASSIFY')
        digit_classify.add_default_output_vector(np.ones(len(mtr_vocab.keys)))
        nengo.Connection(net.input,
                         digit_classify.input,
                         transform=vis_am_input_scale,
                         synapse=None)

        # --------------------- Motor SP Transformation -----------------------
        if len(mtr_vocab.keys) != copy_draw_trfms_x.shape[0]:
            raise ValueError('Transform System - Number of motor pointers',
                             ' does not match number of given copydraw',
                             ' transforms.')

        # ------------------ Motor SP Transform ensembles ---------------------
        for n in range(len(mtr_vocab.keys)):
            mtr_path_dim = mtr_vocab.dimensions // 2
            # Motor SP contains both X and Y information, so motor path dim is
            # half that of the SP dim

            # trfm_x = convert_func_2_diff_func(copy_draw_trfms_x[n])
            # trfm_y = convert_func_2_diff_func(copy_draw_trfms_y[n])
            trfm_x = np.array(copy_draw_trfms_x[n])
            trfm_y = np.array(copy_draw_trfms_y[n])

            trfm_ea = EnsembleArray(n_neurons=cfg.n_neurons_ens,
                                    n_ensembles=mtr_vocab.dimensions,
                                    radius=mtr_sp_scale_factor)
            cfg.make_inhibitable(trfm_ea)

            nengo.Connection(net.input,
                             trfm_ea.input[:mtr_path_dim],
                             transform=trfm_x.T,
                             synapse=None)
            nengo.Connection(net.input,
                             trfm_ea.input[mtr_path_dim:],
                             transform=trfm_y.T,
                             synapse=None)

            # Class output is inverted (i.e. if class is 3, it's [1, 1, 0, 1])
            # So transform here is just the identity
            inhib_trfm = np.zeros((1, len(mtr_vocab.keys)))
            inhib_trfm[0, n] = 1
            nengo.Connection(digit_classify.output,
                             trfm_ea.inhibit,
                             transform=inhib_trfm)
            nengo.Connection(trfm_ea.output, net.output, synapse=None)

    return net
Ejemplo n.º 21
0
    def __init__(self, n_neurons, dimensions, mem_synapse=0.1,
                 fdbk_transform=1.0, input_transform=1.0, difference_gain=1.0,
                 gate_gain=3, reset_value=None, cleanup_values=None,
                 wta_output=False, wta_inhibit_scale=1, 
                 label=None, seed=None, add_to_container=None, **mem_args):

        super(InputGatedCleanupMemory, self).__init__(label, seed,
                                                      add_to_container)

        if cleanup_values is None:
            raise ValueError('InputGatedCleanupMemory - cleanup_values must' +
                             ' be defined.')
        else:
            cleanup_values = np.matrix(cleanup_values)

        # Keep copy of network parameters
        self.n_neurons = n_neurons
        self.dimensions = dimensions
        self.gate_gain = gate_gain
        self.input_transform = np.dot(input_transform, cleanup_values)
        self.mem_synapse = mem_synapse
        self.mem_args = copy(mem_args)

        cu_args = copy(mem_args)
        cu_args['radius'] = 1
        cu_args['encoders'] = Choice([[1]])
        cu_args['intercepts'] = Uniform(0.5, 1)
        cu_args['eval_points'] = Uniform(0.6, 1.3)
        cu_args['n_eval_points'] = 5000

        with self:
            self.input = nengo.Node(size_in=dimensions)
            self.output = nengo.Node(size_in=dimensions)

            self.mem = EnsembleArray(n_neurons, cleanup_values.shape[0],
                                     label="mem", **cu_args)
            self.mem.add_output('thresh', function=lambda x: 1)
            nengo.Connection(self.mem.thresh, self.mem.input,
                             synapse=mem_synapse)

            if wta_output:
                nengo.Connection(self.mem.output, self.mem.input,
                                 transform=(np.eye(cleanup_values.shape[0]) 
                                            - 1) * wta_inhibit_scale)

            # calculate difference between stored value and input
            diff_args = copy(mem_args)
            diff_args['radius'] = 1
            self.diff = EnsembleArray(n_neurons, cleanup_values.shape[0],
                                      label="diff", **diff_args)
            self.diff_input = self.diff.input

            nengo.Connection(self.input, self.diff.input, synapse=None,
                             transform=self.input_transform)
            nengo.Connection(self.mem.output, self.diff.input, transform=-1)

            # feed difference into integrator
            nengo.Connection(self.diff.output, self.mem.input,
                             transform=difference_gain, synapse=mem_synapse)

            # connect cleanup to output
            nengo.Connection(self.mem.thresh, self.output,
                             transform=cleanup_values.T, synapse=None)

            # gate difference (if gate==0, update stored value,
            # otherwise retain stored value)
            self.gate = nengo.Ensemble(n_neurons, 1, encoders=Choice([[1]]),
                                       intercepts=Uniform(0.5, 1))
            for e in self.diff.ensembles:
                nengo.Connection(self.gate, e.neurons,
                                 transform=[[-gate_gain]] * e.n_neurons)

        # No indenty! Not supposed to be within network context
        if reset_value is not None:
            make_resettable(self, reset_value)
Ejemplo n.º 22
0
class InputGatedMemory(nengo.Network):
    """Stores a given vector in memory, with input controlled by a gate.

    Parameters
    ----------
    n_neurons : int
        Number of neurons per dimension in the vector.
    dimensions : int
        Dimensionality of the vector.

    feedback : float, optional
        Strength of the recurrent connection from the memory to itself.
    difference_gain : float, optional
        Strength of the connection from the difference ensembles to the
        memory ensembles.
    recurrent_synapse : float, optional

    difference_synapse : Synapse
        If None, ...
    **kwargs
        Keyword arguments passed through to ``nengo.Network``
        like 'label' and 'seed'.

    Attributes
    ----------
    diff : EnsembleArray
        Represents the difference between the desired vector and
        the current vector represented by ``mem``.
    gate : Node
        With input of 0, the network is not gated, and ``mem`` will be updated
        to minimize ``diff``. With input greater than 0, the network will be
        increasingly gated such that ``mem`` will retain its current value,
        and ``diff`` will be inhibited.
    input : Node
        The desired vector.
    mem : EnsembleArray
        Integrative population that stores the vector.
    output : Node
        The vector currently represented by ``mem``.
    reset : Node
        With positive input, the ``mem`` population will be inhibited,
        effectively wiping out the vector currently being remembered.
    """

    def __init__(self,
                 n_neurons,
                 dimensions,
                 feedback=1.0,
                 difference_gain=1.0,
                 recurrent_synapse=0.1,
                 difference_synapse=None,
                 **kwargs):

        if 'net' in kwargs:
            raise ObsoleteError("The 'net' argument is no longer supported.")
        kwargs.setdefault('label', "Input gated memory")
        super().__init__(**kwargs)

        if difference_synapse is None:
            difference_synapse = recurrent_synapse

        n_total_neurons = n_neurons * dimensions

        with self:
            # integrator to store value
            self.mem = EnsembleArray(n_neurons, dimensions, label="mem")
            nengo.Connection(self.mem.output, self.mem.input,
                             transform=feedback,
                             synapse=recurrent_synapse)

            # calculate difference between stored value and input
            self.diff = EnsembleArray(n_neurons, dimensions, label="diff")
            nengo.Connection(self.mem.output, self.diff.input, transform=-1)

            # feed difference into integrator
            nengo.Connection(self.diff.output, self.mem.input,
                             transform=difference_gain,
                             synapse=difference_synapse)

            # gate difference (if gate==0, update stored value,
            # otherwise retain stored value)
            self.gate = nengo.Node(size_in=1)
            self.diff.add_neuron_input()
            nengo.Connection(self.gate, self.diff.neuron_input,
                             transform=np.ones((n_total_neurons, 1)) * -10,
                             synapse=None)

            # reset input (if reset=1, remove all values, and set to 0)
            self.reset = nengo.Node(size_in=1)
            nengo.Connection(self.reset, self.mem.add_neuron_input(),
                             transform=np.ones((n_total_neurons, 1)) * -3,
                             synapse=None)

        self.input = self.diff.input
        self.output = self.mem.output
Ejemplo n.º 23
0
def InputGatedMemory(n_neurons,
                     n_neurons_diff,
                     dimensions,
                     feedback=1.0,
                     difference_gain=1.0,
                     recurrent_synapse=0.1,
                     difference_synapse=None,
                     net=None,
                     **kwargs):
    """Stores a given vector in memory, with input controlled by a gate.
    Parameters
    ----------
    n_neurons : int
        Number of neurons per dimension in the vector.
    dimensions : int
        Dimensionality of the vector.
    feedback : float, optional (Default: 1.0)
        Strength of the recurrent connection from the memory to itself.
    difference_gain : float, optional (Default: 1.0)
        Strength of the connection from the difference ensembles to the
        memory ensembles.
    recurrent_synapse : float, optional (Default: 0.1)
    difference_synapse : Synapse (Default: None)
        If None, ...
    kwargs
        Keyword arguments passed through to ``nengo.Network``.
    Returns
    -------
    net : Network
        The newly built memory network, or the provided ``net``.
    Attributes
    ----------
    net.diff : EnsembleArray
        Represents the difference between the desired vector and
        the current vector represented by ``mem``.
    net.gate : Node
        With input of 0, the network is not gated, and ``mem`` will be updated
        to minimize ``diff``. With input greater than 0, the network will be
        increasingly gated such that ``mem`` will retain its current value,
        and ``diff`` will be inhibited.
    net.input : Node
        The desired vector.
    net.mem : EnsembleArray
        Integrative population that stores the vector.
    net.output : Node
        The vector currently represented by ``mem``.
    net.reset : Node
        With positive input, the ``mem`` population will be inhibited,
        effectively wiping out the vector currently being remembered.
    """
    if net is None:
        kwargs.setdefault('label', "Input gated memory")
        net = nengo.Network(**kwargs)
    else:
        warnings.warn("The 'net' argument is deprecated.", DeprecationWarning)

    if difference_synapse is None:
        difference_synapse = recurrent_synapse

    n_total_neurons = n_neurons * dimensions
    n_total_neurons_diff = n_neurons_diff * dimensions

    with net:
        # integrator to store value

        mem_net = nengo.Network()
        mem_net.config[nengo.Ensemble].encoders = nengo.dists.Choice([[-1.]])
        mem_net.config[nengo.Ensemble].radius = 1
        mem_net.config[nengo.Ensemble].eval_points = nengo.dists.Uniform(
            -1, 0.0)
        mem_net.config[nengo.Ensemble].intercepts = nengo.dists.Uniform(
            -0.6, 1.)
        with mem_net:
            net.mem = EnsembleArray(n_neurons, dimensions, label="mem")
        nengo.Connection(net.mem.output,
                         net.mem.input,
                         transform=feedback,
                         synapse=recurrent_synapse)

        diff_net = nengo.Network()
        diff_net.config[nengo.Ensemble].radius = 0.5
        diff_net.config[nengo.Ensemble].eval_points = nengo.dists.Uniform(
            -0.5, 0.5)
        with diff_net:
            # calculate difference between stored value and input
            net.diff = EnsembleArray(n_neurons_diff, dimensions, label="diff")
        nengo.Connection(net.mem.output, net.diff.input, transform=-1)

        # feed difference into integrator
        nengo.Connection(net.diff.output,
                         net.mem.input,
                         transform=difference_gain,
                         synapse=difference_synapse)

        # gate difference (if gate==0, update stored value,
        # otherwise retain stored value)
        net.gate = nengo.Node(size_in=1)
        net.diff.add_neuron_input()
        nengo.Connection(net.gate,
                         net.diff.neuron_input,
                         transform=np.ones((n_total_neurons_diff, 1)) * -10,
                         synapse=None)

        # reset input (if reset=1, remove all values, and set to 0)
        net.reset = nengo.Node(size_in=1)
        nengo.Connection(net.reset,
                         net.mem.add_neuron_input(),
                         transform=np.ones((n_total_neurons, 1)) * -3,
                         synapse=None)

    net.input = net.diff.input
    net.output = net.mem.output

    return net
Ejemplo n.º 24
0
    def init_module(self):
        bias_node = nengo.Node(1)

        # Number of actions in this spaun setup
        num_actions = experiment.num_learn_actions

        # ------------------- Action detection network ------------------------
        # Translates
        self.action_input = nengo.Node(size_in=num_actions)
        self.bg_utilities_input = nengo.Node(size_in=num_actions)
        self.vis_sp_input = nengo.Node(size_in=vocab.sp_dim)

        # ------------------- Reward detection network ------------------------
        # Translates visual input into reward yes/no signals
        # Note: Output of reward_detect is inverted
        num_reward_sps = len(vocab.reward.keys)
        self.reward_detect = cfg.make_thresh_ens_net(num_ens=num_reward_sps)
        nengo.Connection(bias_node,
                         self.reward_detect.input,
                         transform=np.ones(num_reward_sps)[:, None])
        nengo.Connection(self.vis_sp_input,
                         self.reward_detect.input,
                         transform=-vocab.reward.vectors,
                         synapse=None)

        # Calculate positive reward values
        self.pos_reward_vals = \
            cfg.make_ens_array(n_ensembles=num_actions, radius=1)
        nengo.Connection(self.action_input,
                         self.pos_reward_vals.input,
                         transform=np.eye(num_actions),
                         synapse=None)

        # Calculate negative reward values
        self.neg_reward_vals = \
            cfg.make_ens_array(n_ensembles=num_actions, radius=1)
        nengo.Connection(self.action_input,
                         self.neg_reward_vals.input,
                         transform=np.ones(num_actions) - np.eye(num_actions),
                         synapse=None)

        # Do the appropriate reward cross linking
        for i in range(num_actions):
            # No reward detect --> disinhibit neg_reward_vals
            nengo.Connection(self.reward_detect.output[0],
                             self.neg_reward_vals.ensembles[i].neurons,
                             transform=[[-3]] *
                             self.neg_reward_vals.ensembles[i].n_neurons)
            # Yes reward detect --> disinhibit pos_reward_vals
            nengo.Connection(self.reward_detect.output[1],
                             self.pos_reward_vals.ensembles[i].neurons,
                             transform=[[-3]] *
                             self.pos_reward_vals.ensembles[i].n_neurons)

        # Calculate the utility bias needed (so that the rewards don't send
        # the utilities to +inf, -inf)
        self.util_vals = EnsembleArray(100,
                                       num_actions,
                                       encoders=Choice([[1]]),
                                       intercepts=Exponential(0.15, 0.3, 1))
        nengo.Connection(self.reward_detect.output,
                         self.util_vals.input,
                         transform=-np.ones((num_actions, 2)))
        nengo.Connection(self.action_input,
                         self.util_vals.input,
                         transform=np.ones((num_actions, num_actions)),
                         synapse=None)
        nengo.Connection(self.bg_utilities_input,
                         self.util_vals.input,
                         transform=1,
                         synapse=None)
Ejemplo n.º 25
0
def test_circularconv(Simulator, nl, dims=4, neurons_per_product=128):
    rng = np.random.RandomState(4238)

    n_neurons = neurons_per_product
    n_neurons_d = 2 * neurons_per_product
    radius = 1

    a = rng.normal(scale=np.sqrt(1./dims), size=dims)
    b = rng.normal(scale=np.sqrt(1./dims), size=dims)
    result = circconv(a, b)
    assert np.abs(a).max() < radius
    assert np.abs(b).max() < radius
    assert np.abs(result).max() < radius

    # --- model
    model = nengo.Network(label="circular convolution")
    with model:
        model.config[nengo.Ensemble].neuron_type = nl()
        inputA = nengo.Node(a)
        inputB = nengo.Node(b)
        A = EnsembleArray(n_neurons, dims, radius=radius)
        B = EnsembleArray(n_neurons, dims, radius=radius)
        cconv = nengo.networks.CircularConvolution(
            n_neurons_d, dimensions=dims)
        res = EnsembleArray(n_neurons, dims, radius=radius)

        nengo.Connection(inputA, A.input)
        nengo.Connection(inputB, B.input)
        nengo.Connection(A.output, cconv.A)
        nengo.Connection(B.output, cconv.B)
        nengo.Connection(cconv.output, res.input)

        A_p = nengo.Probe(A.output, synapse=0.03)
        B_p = nengo.Probe(B.output, synapse=0.03)
        res_p = nengo.Probe(res.output, synapse=0.03)

    # --- simulation
    sim = Simulator(model)
    sim.run(1.0)

    t = sim.trange()

    with Plotter(Simulator, nl) as plt:
        def plot(actual, probe, title=""):
            ref_y = np.tile(actual, (len(t), 1))
            sim_y = sim.data[probe]
            colors = ['b', 'g', 'r', 'c', 'm', 'y']
            for i in range(min(dims, len(colors))):
                plt.plot(t, ref_y[:, i], '--', color=colors[i])
                plt.plot(t, sim_y[:, i], '-', color=colors[i])
                plt.title(title)

        plt.subplot(311)
        plot(a, A_p, title="A")
        plt.subplot(312)
        plot(b, B_p, title="B")
        plt.subplot(313)
        plot(result, res_p, title="Result")
        plt.tight_layout()
        plt.savefig('test_circularconv.test_circularconv_%d.pdf' % dims)
        plt.close()

    # --- results
    tmask = t > (0.5 + sim.dt/2)
    assert sim.data[A_p][tmask].shape == (499, dims)
    a_sim = sim.data[A_p][tmask].mean(axis=0)
    b_sim = sim.data[B_p][tmask].mean(axis=0)
    res_sim = sim.data[res_p][tmask].mean(axis=0)

    rtol, atol = 0.1, 0.05
    assert np.allclose(a, a_sim, rtol=rtol, atol=atol)
    assert np.allclose(b, b_sim, rtol=rtol, atol=atol)
    assert rmse(result, res_sim) < 0.075
Ejemplo n.º 26
0
    def __init__(self,
                 num_func_points,
                 func_value_range=1.0,
                 func_output_dimensions=1,
                 n_neurons=500,
                 label=None,
                 seed=None,
                 add_to_container=None):
        super(DifferenceFunctionEvaluator,
              self).__init__(label, seed, add_to_container)

        intercept_interval = 2.0 / (num_func_points - 1)

        self.func_output_dimensions = func_output_dimensions
        self.n_neurons = n_neurons

        with self:
            bias_node = nengo.Node(1)

            self.func_input = nengo.Node(size_in=1)
            self.func_output = nengo.Node(size_in=func_output_dimensions)

            self.diff_func_pts = []
            self.diff_func_outputs = []
            self.func_gate_eas = []

            func_domain_inhib_ea = EA(25,
                                      num_func_points - 1,
                                      encoders=Choice([[-1]]),
                                      intercepts=Uniform(
                                          0, intercept_interval))

            # Generate inhibit signal based of the function domain input value
            func_domain_inhib_ea.add_output('const', lambda x: 1)
            inhib_trfm = np.array([
                np.linspace(-1, 1, num_func_points)[:-1] +
                intercept_interval / 2.0
            ])
            nengo.Connection(bias_node,
                             func_domain_inhib_ea.input,
                             transform=-1 - inhib_trfm.T)
            nengo.Connection(self.func_input,
                             func_domain_inhib_ea.input,
                             transform=2 * np.ones((num_func_points - 1, 1)),
                             synapse=None)

            for n in range(func_output_dimensions):
                func_gate_ea = EA(n_neurons,
                                  num_func_points,
                                  radius=func_value_range)

                for i, gate in enumerate(func_gate_ea.all_ensembles[1:]):
                    nengo.Connection(func_domain_inhib_ea.const[i],
                                     gate.neurons,
                                     transform=[[-5]] * n_neurons)

                self.func_gate_eas.append(func_gate_ea)
                self.diff_func_pts.append(func_gate_ea.input)
                self.diff_func_outputs.append(func_gate_ea.output)

                nengo.Connection(func_gate_ea.output,
                                 self.func_output[n],
                                 transform=np.ones((1, num_func_points)),
                                 synapse=None)
Ejemplo n.º 27
0
    def add_cleanup_output(self,
                           output_name='output',
                           n_neurons=50,
                           inhibit_scale=3.5,
                           replace_output=False):
        """Adds a cleaned outputs to the associative memory network.

        Creates a doubled-inhibited ensemble structure to the desired assoc
        memory output to perform a cleanup operation on it. Note that using the
        'filtered_step_func' utility mapping performs a similar cleanup
        operation, but does not do a very good cleanup approximation for
        utility values near (+/- 0.2) the threshold value. This function
        adds the infrastructure needed to perform the cleanup operation across
        the entire range of output values, at the cost of two synaptic delays
        and adding (n_neurons * 2 * n_items) additional neurons to the network.

        Note: This function creates 2 nodes:
            - A node named 'cleaned_<OUTPUT_NAME>' that outputs the cleaned
              version of the output vectors.
            - A node named 'cleaned_<OUTPUT_NAME>_utilities' that outputs the
              utilities of the cleaned output vectors.

        Parameters
        ----------
        output_name: string, optional
            The name of the input to which the default output vector
            should be applied.

        n_neurons: int, optional
            Number of neurons to use for the ensembles used in the double-
            inhibited cleanup network.
        inhibit_scale: float, optional
            Scaling factor applied to the inhibitory connections between
            the ensembles. It is recommended that this value be at
            least 1.0 / minimum(assoc memory activation thresholds), and that
            the minimum assoc memory activation threshold be at most 0.1.

        replace_output: boolean, optional
            Flag to indicate whether or not to replace the output object
            (e.g. am.output) with the cleaned output node.
        """
        cleanup_output_name = '_'.join(
            [self.cleanup_output_prefix, output_name])
        cleanup_utilities_name = '_'.join([
            self.cleanup_output_prefix, output_name, self.utility_output_suffix
        ])
        output_utilities_name = '_'.join(
            [output_name, self.utility_output_suffix])

        # --- Check if cleanup network has already been created for
        #     the desired output node
        if hasattr(self, cleanup_output_name):
            raise ValidationError("Cleanup output already exists for "
                                  "output: '%s'." % output_name,
                                  attr='output_name')

        with self.cleanup_ens_config:
            # --- Set up the double inhibited ensembles, and make the
            #     appropriate connections.
            self.bias_ens1 = EnsembleArray(n_neurons,
                                           self.n_items,
                                           label=output_name + '_bias_ens1')
            self.bias_ens2 = EnsembleArray(n_neurons,
                                           self.n_items,
                                           label=output_name + '_bias_ens2')

            utility = getattr(self, output_utilities_name)

            nengo.Connection(self.bias_node,
                             self.bias_ens1.input,
                             transform=np.ones((self.n_items, 1)),
                             synapse=None)
            nengo.Connection(self.bias_node,
                             self.bias_ens2.input,
                             transform=np.ones((self.n_items, 1)),
                             synapse=None)
            nengo.Connection(utility,
                             self.bias_ens1.input,
                             transform=-inhibit_scale)
            nengo.Connection(self.bias_ens1.output,
                             self.bias_ens2.input,
                             transform=-1.0)

            # --- Make the output node and connect it
            output_vectors = self._output_vectors[output_name]
            cleanup_output_node = nengo.Node(size_in=output_vectors.shape[1],
                                             label=cleanup_output_name)
            nengo.Connection(self.bias_ens2.output,
                             cleanup_output_node,
                             transform=output_vectors.T,
                             synapse=None)

            setattr(self, cleanup_output_name, cleanup_output_node)
            setattr(self, cleanup_utilities_name, self.bias_ens2.output)

            # --- Replace the original output node (pointer) if required
            if replace_output:
                setattr(self, output_name, cleanup_output_node)

            # --- Make inhibitory connection if inhibit option is set
            if self.inhibit is not None:
                for e in self.bias_ens2.ensembles:
                    nengo.Connection(self.inhibit,
                                     e,
                                     transform=-self._inhib_scale,
                                     synapse=None)

            # --- Connect default output vector to cleaned outputs
            #     (if available)
            default_vector_ens = getattr(
                self, '_'.join([output_name, self.default_ens_suffix]), None)
            if default_vector_ens:
                default_output_vectors = \
                    self._default_output_vectors[output_name]
                nengo.Connection(default_vector_ens,
                                 cleanup_output_node,
                                 transform=default_output_vectors.T,
                                 synapse=None)
Ejemplo n.º 28
0
def VectorNormalize(min_mag,
                    max_mag,
                    dimensions,
                    radius_scale=1.0,
                    n_neurons_norm=50,
                    n_neurons_prod=150,
                    norm_error_per_dimension=0.0003,
                    subtract_scale=1,
                    net=None):
    if net is None:
        net = nengo.Network(label="Vector Normalize")

    max_radius_scale = max_mag * radius_scale

    norm_sub_in_low = min_mag**2
    norm_sub_in_high = max_mag**2
    norm_sub_in_trfm = scale_trfm(norm_sub_in_low, norm_sub_in_high)
    norm_sub_in_bias = scale_bias(norm_sub_in_low, norm_sub_in_high)

    norm_sub_in_bias_offset = -(norm_error_per_dimension * dimensions)

    prod_a_trfm = 1.0 / max_radius_scale
    prod_b_low = 1.0 - 1.0 / min_mag
    prod_b_high = 1.0 - 1.0 / max_mag
    prod_b_trfm = scale_trfm(prod_b_low, prod_b_high)
    prod_b_bias = scale_bias(prod_b_low, prod_b_high)

    def norm_sub_func(x,
                      nsit=norm_sub_in_trfm,
                      nsib=norm_sub_in_bias,
                      pbt=prod_b_trfm,
                      pbb=prod_b_bias):
        return norm_subtract_func(x, nsit, nsib, pbt, pbb)

    def prod_func(x, y, pat=prod_a_trfm, pbt=prod_b_trfm, pbb=prod_b_bias):
        return prod_out_func(x, y, pat, 0, pbt, pbb)

    with net:
        net.input = nengo.Node(size_in=dimensions)
        net.output = nengo.Node(size_in=dimensions)
        bias_node = nengo.Node(1)

        # Ensemble array to represent input vector and to compute vector
        # norm
        norm_array = EnsembleArray(n_neurons_norm,
                                   dimensions,
                                   radius=max_radius_scale)
        norm_array.add_output('squared', lambda x: x**2)
        nengo.Connection(net.input, norm_array.input)

        # Ensemble to calculate amount of magnitude to be subtracted
        # i.e. (1 - 1 / np.linalg.norm(input))
        norm_subtract_ens = nengo.Ensemble(n_neurons_norm,
                                           1,
                                           n_eval_points=5000)
        nengo.Connection(norm_array.squared,
                         norm_subtract_ens,
                         transform=np.ones((1, dimensions)) * norm_sub_in_trfm)
        nengo.Connection(bias_node,
                         norm_subtract_ens,
                         transform=norm_sub_in_bias + norm_sub_in_bias_offset)

        # Product network to compute product between input vector and
        # magnitude to be subtracted
        prod_array = Product(n_neurons_prod, dimensions)
        for e in prod_array.product.ensembles:
            e.n_eval_points = 5000
        prod_array.product.add_output('prod2', lambda x: prod_func(x[0], x[1]))
        prod_array.prod2 = prod_array.product.prod2

        nengo.Connection(norm_array.output,
                         prod_array.A,
                         transform=prod_a_trfm)
        nengo.Connection(norm_subtract_ens,
                         prod_array.B,
                         function=norm_sub_func,
                         transform=np.ones((dimensions, 1)))

        # Output connections
        nengo.Connection(norm_array.output, net.output)
        nengo.Connection(prod_array.prod2,
                         net.output,
                         transform=-subtract_scale)
    return net
Ejemplo n.º 29
0
import nengo
import numpy as np
from nengo.networks import CircularConvolution, EnsembleArray

model = nengo.Network(label="Neural Extraction")

D = 10
num_items = 20

threshold_func = lambda x: 1.0 if x > 0.3 else 0.0

with model:
    extract = nengo.Network(label="Extract")
    with extract:
        a = EnsembleArray(n_neurons=80, n_ensembles=D)
        b = EnsembleArray(n_neurons=80, n_ensembles=D)
        c = CircularConvolution(n_neurons=80, dimensions=D)
        d = EnsembleArray(n_neurons=80, n_ensembles=D)

        nengo.Connection(a.output, c.A)
        nengo.Connection(b.output, c.B)
        nengo.Connection(c.output, d.input)

    assoc = nengo.Network(label="Associate")
    assoc_nodes = []
    with assoc:
        assoc_input = nengo.Node(size_in=D)
        assoc_output = nengo.Node(size_in=D)

        for item in range(num_items):
            assoc_nodes.append(nengo.Ensemble(n_neurons=20, dimensions=1))
Ejemplo n.º 30
0
    def test_matrix_mul(self):
        N = 100

        Amat = np.asarray([[.5, -.5]])
        Bmat = np.asarray([[0, -1.], [.7, 0]])
        radius = 1

        model = nengo.Model('Matrix Multiplication', seed=123)
        with model:
            A = EnsembleArray(nengo.LIF(N * Amat.size),
                              Amat.size, radius=radius)
            B = EnsembleArray(nengo.LIF(N * Bmat.size),
                              Bmat.size, radius=radius)

            inputA = nengo.Node(output=Amat.ravel())
            inputB = nengo.Node(output=Bmat.ravel())
            nengo.Connection(inputA, A.input)
            nengo.Connection(inputB, B.input)
            A_p = nengo.Probe(
                A.output, 'output', sample_every=0.01, filter=0.01)
            B_p = nengo.Probe(
                B.output, 'output', sample_every=0.01, filter=0.01)

            C = EnsembleArray(nengo.LIF(N * Amat.size * Bmat.shape[1] * 2),
                              Amat.size * Bmat.shape[1],
                              dimensions_per_ensemble=2,
                              radius=1.5 * radius)

            for ens in C.ensembles:
                ens.encoders = np.tile([[1, 1], [-1, 1], [1, -1], [-1, -1]],
                                       (ens.n_neurons // 4, 1))

            transformA = np.zeros((C.dimensions, Amat.size))
            transformB = np.zeros((C.dimensions, Bmat.size))

            for i in range(Amat.shape[0]):
                for j in range(Amat.shape[1]):
                    for k in range(Bmat.shape[1]):
                        tmp = (j + k * Amat.shape[1] + i * Bmat.size)
                        transformA[tmp * 2][j + i * Amat.shape[1]] = 1
                        transformB[tmp * 2 + 1][k + j * Bmat.shape[1]] = 1

            nengo.Connection(A.output, C.input, transform=transformA)
            nengo.Connection(B.output, C.input, transform=transformB)
            C_p = nengo.Probe(
                C.output, 'output', sample_every=0.01, filter=0.01)

            D = EnsembleArray(nengo.LIF(N * Amat.shape[0] * Bmat.shape[1]),
                              Amat.shape[0] * Bmat.shape[1], radius=radius)

            def product(x):
                return x[0]*x[1]

            transformC = np.zeros((D.dimensions, Bmat.size))
            for i in range(Bmat.size):
                transformC[i // Bmat.shape[0]][i] = 1

            prod = C.add_output("product", product)

            nengo.Connection(prod, D.input, transform=transformC)
            D_p = nengo.Probe(
                D.output, 'output', sample_every=0.01, filter=0.01)

        sim = self.Simulator(model)
        sim.run(1)

        with Plotter(self.Simulator) as plt:
            t = sim.trange(dt=0.01)
            plt.plot(t, sim.data(D_p))
            for d in np.dot(Amat, Bmat).flatten():
                plt.axhline(d, color='k')
            plt.savefig('test_ensemble_array.test_matrix_mul.pdf')
            plt.close()

        self.assertTrue(np.allclose(sim.data(A_p)[50:, 0], 0.5,
                                    atol=.1, rtol=.01))
        self.assertTrue(np.allclose(sim.data(A_p)[50:, 1], -0.5,
                                    atol=.1, rtol=.01))

        self.assertTrue(np.allclose(sim.data(B_p)[50:, 0], 0,
                                    atol=.1, rtol=.01))
        self.assertTrue(np.allclose(sim.data(B_p)[50:, 1], -1,
                                    atol=.1, rtol=.01))
        self.assertTrue(np.allclose(sim.data(B_p)[50:, 2], .7,
                                    atol=.1, rtol=.01))
        self.assertTrue(np.allclose(sim.data(B_p)[50:, 3], 0,
                                    atol=.1, rtol=.01))

        Dmat = np.dot(Amat, Bmat)
        for i in range(Amat.shape[0]):
            for k in range(Bmat.shape[1]):
                self.assertTrue(np.allclose(
                    sim.data(D_p)[-10:, i * Bmat.shape[1] + k],
                    Dmat[i, k],
                    atol=0.1, rtol=0.1),
                    (sim.data(D_p)[-10:, i * Bmat.shape[1] + k], Dmat[i, k]))
Ejemplo n.º 31
0
def InputGatedMemory(n_neurons,
                     dimensions,
                     fdbk_scale=1.0,
                     gate_gain=10,
                     difference_gain=1.0,
                     reset_gain=3,
                     mem_config=None,
                     net=None):
    """Stores a given vector in memory, with input controlled by a gate."""
    if net is None:
        net = nengo.Network(label="Input Gated Memory")

    if mem_config is None:
        mem_config = nengo.Config(nengo.Connection)
        mem_config[nengo.Connection].synapse = nengo.Lowpass(0.1)

    n_total_neurons = n_neurons * dimensions

    with net:
        # integrator to store value
        with mem_config:
            net.mem = EnsembleArray(n_neurons,
                                    dimensions,
                                    neuron_nodes=True,
                                    label="mem")
            nengo.Connection(net.mem.output,
                             net.mem.input,
                             transform=fdbk_scale)

        # calculate difference between stored value and input
        net.diff = EnsembleArray(n_neurons,
                                 dimensions,
                                 neuron_nodes=True,
                                 label="diff")
        nengo.Connection(net.mem.output, net.diff.input, transform=-1)

        # feed difference into integrator
        with mem_config:
            nengo.Connection(net.diff.output,
                             net.mem.input,
                             transform=difference_gain)

        # gate difference (if gate==0, update stored value,
        # otherwise retain stored value)
        net.gate = nengo.Node(size_in=1)
        nengo.Connection(net.gate,
                         net.diff.neuron_input,
                         transform=np.ones((n_total_neurons, 1)) * -gate_gain,
                         synapse=None)

        # reset input (if reset=1, remove all values, and set to 0)
        net.reset = nengo.Node(size_in=1)
        nengo.Connection(net.reset,
                         net.mem.neuron_input,
                         transform=np.ones((n_total_neurons, 1)) * -reset_gain,
                         synapse=None)

    net.input = net.diff.input
    net.output = net.mem.output

    return net
Ejemplo n.º 32
0
def make_ensarray_func(n_neurons, dimensions, **ens_args):
    n_ensembles = ens_args.get('n_ensembles', dimensions)
    return EnsembleArray(n_neurons, n_ensembles, **ens_args)
Ejemplo n.º 33
0
    def build(self, input_manager, olfactor=False):

        with self:
            if olfactor:
                N=input_manager.Nodors
                odors = Node(input_manager.get_odor_concentrations, size_in=N)

                odor_memory = EnsembleArray(n_neurons=100, n_ensembles=N, ens_dimensions=2)
                odor_change = EnsembleArray(n_neurons=200, n_ensembles=N, ens_dimensions=1, radius=0.01,
                                            intercepts=dists.Uniform(0, 0.1))
                # odor_change_Node = Node(size_in=N)
                # print(odor_change.neuron_output[0])
                for i in range(N):
                    Connection(odors[i], odor_memory.ensembles[i][0], transform=[[1]], synapse=0.01)
                    Connection(odor_memory.ensembles[i][0], odor_memory.ensembles[i][1], transform=1, synapse=1.0)
                    Connection(odor_memory.ensembles[i][0], odor_change.input[i], transform=1, synapse=0.1)
                    Connection(odor_memory.ensembles[i][1], odor_change.input[i], transform=-1, synapse=0.1)

                # Collect data for plotting
                self.p_odor = Probe(odors)
                # self.ens_probe = nengo.Probe(ens.output, synapse=0.01)
                self.p_change = Probe(odor_change.output)
                # self.p_change = odor_change.probes


            x = Ensemble(n_neurons=200, dimensions=3, neuron_type=Direct())
            y = Ensemble(n_neurons=200, dimensions=3, neuron_type=Direct())
            z = Ensemble(n_neurons=200, dimensions=3, neuron_type=Direct())
            synapse = 1.0
            # synapse=0.1

            def linear_oscillator(x):
                dr = 1 - x[0] ** 2 - x[1] ** 2
                s = 2 * np.pi * x[2] / 2
                if s > 0.1:
                    v = synapse * -x[1] * s + x[0] * dr + x[0]
                    n = synapse * x[0] * s + x[1] * dr + x[1]
                    return [v, n]
                else:
                    return [0, 1]

            def angular_oscillator(x):
                dr = 1 - x[0] ** 2 - x[1] ** 2
                s = 2 * np.pi * x[2]
                if s > 0.1:
                    return [synapse * -x[1] * s + x[0] * dr + x[0],
                            synapse * x[0] * s + x[1] * dr + x[1]]
                else:
                    return [0, 1]

            def feeding_oscillator(x):
                dr = 1 - x[0] ** 2 - x[1] ** 2
                s = 2 * np.pi * x[2]
                if s > 0.1:
                    return [synapse * -x[1] * s + x[0] * dr + x[0],
                            synapse * x[0] * s + x[1] * dr + x[1]]
                else:
                    return [0, 1]

            def oscillator_interference(x):
                coup = input_manager.osc_coupling
                c0 = coup.crawler_interference_start
                cr = 1 - coup.feeder_interference_free_window / np.pi
                f0 = coup.crawler_interference_start
                fr = 1 - coup.feeder_interference_free_window / np.pi
                r = coup.interference_ratio
                if x[0] > 0 or x[2] > 0:
                    v = [x[0], 0, x[2]]
                else:
                    v = x
                return v

            def crawler(x):
                return np.abs(x)*2 * input_manager.scaled_stride_step

            def turner(x):
                return x * input_manager.turner.get_amp(0)

            def feeder(x):
                if x > 0.999:
                    return 1
                else:
                    return 0

            Connection(x, x[:2], synapse=synapse, function=linear_oscillator)
            Connection(y, y[:2], synapse=synapse, function=angular_oscillator)
            Connection(z, z[:2], synapse=synapse, function=feeding_oscillator)
            linear_freq_node = Node(input_manager.crawler.get_freq, size_out=1)
            angular_freq_node = Node(input_manager.turner.get_freq, size_out=1)
            feeding_freq_node = Node(input_manager.feeder.get_freq, size_out=1)

            linear_freq = Ensemble(n_neurons=50, dimensions=1, neuron_type=Direct())
            angular_freq = Ensemble(n_neurons=50, dimensions=1, neuron_type=Direct())
            feeding_freq = Ensemble(n_neurons=50, dimensions=1, neuron_type=Direct())

            Connection(linear_freq_node, linear_freq)
            Connection(angular_freq_node, angular_freq)
            Connection(feeding_freq_node, feeding_freq)

            Connection(linear_freq, x[2])
            Connection(angular_freq, y[2])
            Connection(feeding_freq, z[2])

            interference = Ensemble(n_neurons=200, dimensions=3, neuron_type=Direct())
            Connection(x[0], interference[0], synapse=0)
            Connection(y[0], interference[1], synapse=0)
            Connection(z[0], interference[2], synapse=0)

            speeds = Ensemble(n_neurons=200, dimensions=3, neuron_type=Direct())
            Connection(interference, speeds, synapse=0.01, function=oscillator_interference)

            linear_s = Node(size_in=1)
            angular_s = Node(size_in=1)
            feeding_s = Node(size_in=1)

            Connection(speeds[0], linear_s, synapse=0, function=crawler)
            Connection(speeds[1], angular_s, synapse=0, function=turner)
            Connection(speeds[2], feeding_s, synapse=0, function=feeder)

            # Collect data for plotting
            self.p_speeds = Probe(speeds)
            self.p_linear_s = Probe(linear_s)
            self.p_angular_s = Probe(angular_s)
            self.p_feeding_s = Probe(feeding_s)
Ejemplo n.º 34
0
 def build_input(self, net):
     assert self.trial.trajectory is not None, "Must define trajectory"
     net.trajectory = EnsembleArray(80, n_ensembles=48)
     # Sneaky: override the net.trajectory.input node output
     net.trajectory.input.output = ArrayProcess(self.trial.trajectory)
     net.trajectory.input.size_in = 0