Example #1
0
def build_dense(model,
                transform,
                sig_in,
                decoders=None,
                encoders=None,
                rng=np.random):
    """Build a `.Dense` transform object."""

    weights = transform.sample(rng=rng).astype(rc.float_dtype)

    if decoders is not None:
        weights = multiply(weights, decoders.astype(rc.float_dtype))
    if encoders is not None:
        weights = multiply(encoders.astype(rc.float_dtype).T, weights)

    # Add operator for applying weights
    weight_sig = Signal(weights, readonly=True, name="%s.weights" % transform)
    weighted = Signal(
        shape=transform.size_out if encoders is None else weights.shape[0],
        name="%s.weighted" % transform,
    )
    model.add_op(Reset(weighted))

    op = ElementwiseInc if weights.ndim < 2 else DotInc
    model.add_op(
        op(weight_sig, sig_in, weighted, tag="%s.apply_weights" % transform))

    return weighted, weight_sig
Example #2
0
def build_convolution(model,
                      transform,
                      sig_in,
                      decoders=None,
                      encoders=None,
                      rng=np.random):
    if decoders is not None:
        raise BuildError("Applying a convolution transform to a decoded "
                         "connection is not supported")
    if encoders is not None:
        raise BuildError(
            "Applying encoders to a convolution transform is not supported")

    weights = transform.sample(rng=rng)
    weight_sig = Signal(weights, name="%s.weights" % transform, readonly=True)
    weighted = Signal(np.zeros(transform.size_out),
                      name="%s.weighted" % transform)
    model.add_op(Reset(weighted))

    model.add_op(
        ConvInc(weight_sig,
                sig_in,
                weighted,
                transform,
                tag="%s.apply_weights" % transform))

    return weighted, weight_sig
Example #3
0
def test_sparsedotinc_builderror():
    A = Signal(np.ones(2))
    X = Signal(np.ones(2))
    Y = Signal(np.ones(2))

    with pytest.raises(BuildError, match="must be a sparse Signal"):
        SparseDotInc(A, X, Y)
Example #4
0
def build_rls(model, rls, rule):
    conn = rule.connection
    pre_activities = model.sig[conn.pre_obj]['out']

    pre_filtered = (pre_activities if rls.pre_synapse is None else model.build(
        rls.pre_synapse, pre_activities))

    # Create input error signal
    error = Signal(np.zeros(rule.size_in), name="RLS:error")
    model.add_op(Reset(error))
    model.sig[rule]['in'] = error

    # Create signal for running estimate of inverse correlation matrix
    assert pre_filtered.ndim == 1
    n_neurons = pre_filtered.shape[0]
    inv_gamma = Signal(np.eye(n_neurons) * rls.learning_rate,
                       name="RLS:inv_gamma")

    model.add_op(
        SimRLS(pre_filtered=pre_filtered,
               error=error,
               delta=model.sig[rule]['delta'],
               inv_gamma=inv_gamma))

    # expose these for probes
    model.sig[rule]['pre_filtered'] = pre_filtered
    model.sig[rule]['error'] = error
    model.sig[rule]['inv_gamma'] = inv_gamma
Example #5
0
def build_sparse(model, transform, sig_in, decoders=None, encoders=None, rng=np.random):
    """Build a `.Sparse` transform object."""

    if decoders is not None:
        raise BuildError(
            "Applying a sparse transform to a decoded connection is not supported"
        )

    # Shouldn't be possible for encoders to be non-None, since that only
    # occurs for a connection solver with weights=True, and those can only
    # be applied to decoded connections (which are disallowed above)
    assert encoders is None

    # Add output signal
    weighted = Signal(shape=transform.size_out, name="%s.weighted" % transform)
    model.add_op(Reset(weighted))

    weights = transform.sample(rng=rng)
    assert weights.ndim == 2

    # Add operator for applying weights
    weight_sig = Signal(weights, name="%s.weights" % transform, readonly=True)
    model.add_op(
        SparseDotInc(weight_sig, sig_in, weighted, tag="%s.apply_weights" % transform)
    )

    return weighted, weight_sig
Example #6
0
def test_encoder_decoder_with_views(RefSimulator):
    foo = Signal([1.0], name="foo")
    decoders = np.asarray([.2, .1])
    m = Model(dt=0.001)
    sig_in, sig_out = build_pyfunc(lambda t, x: x + 1, True, 2, 2, None, m)
    m.operators += [
        DotInc(Signal([[1.0], [2.0]]), foo[:], sig_in),
        ProdUpdate(Signal(decoders * 0.5), sig_out, Signal(0.2), foo[:])
    ]

    def check(sig, target):
        assert np.allclose(sim.signals[sig], target)

    sim = RefSimulator(None, model=m)

    sim.step()
    # DotInc to pop.input_signal (input=[1.0,2.0])
    # produpdate updates foo (foo=[0.2])
    # pop updates pop.output_signal (output=[2,3])

    check(foo, .2)
    check(sig_in, [1, 2])
    check(sig_out, [2, 3])

    sim.step()
    # DotInc to pop.input_signal (input=[0.2,0.4])
    #  (note that pop resets its own input signal each timestep)
    # produpdate updates foo (foo=[0.39]) 0.2*0.5*2+0.1*0.5*3 + 0.2*0.2
    # pop updates pop.output_signal (output=[1.2,1.4])

    check(foo, .39)
    check(sig_in, [0.2, 0.4])
    check(sig_out, [1.2, 1.4])
Example #7
0
    def test_signal_indexing_1(self):
        m = nengo.Model("test_signal_indexing_1")

        one = Signal(n=1, name='a')
        two = Signal(n=2, name='b')
        three = Signal(n=3, name='c')
        tmp = Signal(n=3, name='tmp')
        m.signals = [one, two, three, tmp]

        m.operators = [
            ProdUpdate(Constant(1), three[:1], Constant(0), one),
            ProdUpdate(Constant(2.0), three[1:], Constant(0), two),
            Reset(tmp),
            DotInc(Constant([[0, 0, 1], [0, 1, 0], [1, 0, 0]]), three, tmp),
            Copy(src=tmp, dst=three, as_update=True),
        ]

        sim = m.simulator(sim_class=self.Simulator, builder=testbuilder)
        sim.signals[three] = np.asarray([1, 2, 3])
        sim.step()
        self.assertTrue(np.all(sim.signals[one] == 1))
        self.assertTrue(np.all(sim.signals[two] == [4, 6]))
        self.assertTrue(np.all(sim.signals[three] == [3, 2, 1]))
        sim.step()
        self.assertTrue(np.all(sim.signals[one] == 3))
        self.assertTrue(np.all(sim.signals[two] == [4, 2]))
        self.assertTrue(np.all(sim.signals[three] == [1, 2, 3]))
Example #8
0
    def test_simple_direct_mode(self):
        dt = 0.001
        m = nengo.Model("test_simple_direct_mode")

        time = Signal(n=1, name='time')
        sig = Signal(n=1, name='sig')
        pop = Direct(n_in=1, n_out=1, fn=np.sin)
        m.signals = [sig, time]
        m.operators = []
        Builder().build_direct(pop, m, dt)
        m.operators += [
            ProdUpdate(Constant(dt), Constant(1), Constant(1), time),
            DotInc(Constant([[1.0]]), time, pop.input_signal),
            ProdUpdate(Constant([[1.0]]), pop.output_signal, Constant(0), sig)
        ]

        sim = m.simulator(sim_class=self.Simulator, dt=dt, builder=testbuilder)
        sim.step()
        for i in range(5):
            sim.step()

            t = (i + 2) * dt
            self.assertTrue(np.allclose(sim.signals[time], t),
                            msg='%s != %s' % (sim.signals[time], t))
            self.assertTrue(np.allclose(sim.signals[sig], np.sin(t - dt * 2)),
                            msg='%s != %s' %
                            (sim.signals[sig], np.sin(t - dt * 2)))
Example #9
0
def initialise_memristors(rule, in_size, out_size):
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        np.random.seed(rule.seed)
        r_min_noisy = get_truncated_normal(
            rule.r_min, rule.r_min * rule.noise_percentage[0], 0, np.inf,
            out_size, in_size)
        np.random.seed(rule.seed)
        r_max_noisy = get_truncated_normal(
            rule.r_max, rule.r_max * rule.noise_percentage[1],
            np.max(r_min_noisy), np.inf, out_size, in_size)

    np.random.seed(rule.seed)
    # from Eq. 7 in paper
    exponent = -0.093 - 0.53 * rule.voltage

    exponent_noisy = np.random.normal(
        exponent,
        np.abs(exponent) * rule.noise_percentage[2], (out_size, in_size))

    np.random.seed(rule.seed)
    pos_mem_initial = np.random.normal(1e8, 1e8 * rule.noise_percentage[3],
                                       (out_size, in_size))
    np.random.seed(rule.seed + 1) if rule.seed else np.random.seed(rule.seed)
    neg_mem_initial = np.random.normal(1e8, 1e8 * rule.noise_percentage[3],
                                       (out_size, in_size))

    pos_memristors = Signal(shape=(out_size, in_size),
                            name=f"{rule}:pos_memristors",
                            initial_value=pos_mem_initial)
    neg_memristors = Signal(shape=(out_size, in_size),
                            name=f"{rule}:neg_memristors",
                            initial_value=neg_mem_initial)

    return pos_memristors, neg_memristors, r_min_noisy, r_max_noisy, exponent_noisy
Example #10
0
def build_convolution(
    model, transform, sig_in, decoders=None, encoders=None, rng=np.random
):
    """Build a `.Convolution` transform object."""

    if decoders is not None:
        raise BuildError(
            "Applying a convolution transform to a decoded "
            "connection is not supported"
        )

    # Shouldn't be possible for encoders to be non-None, since that only
    # occurs for a connection solver with weights=True, and those can only
    # be applied to decoded connections (which are disallowed above)
    assert encoders is None

    weights = transform.sample(rng=rng)
    weight_sig = Signal(weights, readonly=True, name="%s.weights" % transform)
    weighted = Signal(shape=transform.size_out, name="%s.weighted" % transform)
    model.add_op(Reset(weighted))

    model.add_op(
        ConvInc(
            weight_sig, sig_in, weighted, transform, tag="%s.apply_weights" % transform
        )
    )

    return weighted, weight_sig
Example #11
0
def build_izhikevich(model, izhikevich, neurons):
    """Builds an `.Izhikevich` object into a model.

    In addition to adding a `.SimNeurons` operator, this build function sets up
    signals to track the voltage and recovery terms for each neuron.

    Parameters
    ----------
    model : Model
        The model to build into.
    izhikevich : Izhikevich
        Neuron type to build.
    neuron : Neurons
        The neuron population object corresponding to the neuron type.

    Notes
    -----
    Does not modify ``model.params[]`` and can therefore be called
    more than once with the same `.Izhikevich` instance.
    """

    model.sig[neurons]['voltage'] = Signal(
        np.ones(neurons.size_in) * izhikevich.reset_voltage,
        name="%s.voltage" % neurons)
    model.sig[neurons]['recovery'] = Signal(
        np.ones(neurons.size_in)
        * izhikevich.reset_voltage
        * izhikevich.coupling, name="%s.recovery" % neurons)
    model.add_op(SimNeurons(neurons=izhikevich,
                            J=model.sig[neurons]['in'],
                            output=model.sig[neurons]['out'],
                            states=[model.sig[neurons]['voltage'],
                                    model.sig[neurons]['recovery']]))
Example #12
0
def build_alif(model, alif, neurons):
    """Builds an `.AdaptiveLIF` object into a model.

    In addition to adding a `.SimNeurons` operator, this build function sets up
    signals to track the voltage, refractory time, and adaptation term
    for each neuron.

    Parameters
    ----------
    model : Model
        The model to build into.
    alif : AdaptiveLIF
        Neuron type to build.
    neuron : Neurons
        The neuron population object corresponding to the neuron type.

    Notes
    -----
    Does not modify ``model.params[]`` and can therefore be called
    more than once with the same `.AdaptiveLIF` instance.
    """

    model.sig[neurons]['voltage'] = Signal(
        np.zeros(neurons.size_in), name="%s.voltage" % neurons)
    model.sig[neurons]['refractory_time'] = Signal(
        np.zeros(neurons.size_in), name="%s.refractory_time" % neurons)
    model.sig[neurons]['adaptation'] = Signal(
        np.zeros(neurons.size_in), name="%s.adaptation" % neurons)
    model.add_op(SimNeurons(neurons=alif,
                            J=model.sig[neurons]['in'],
                            output=model.sig[neurons]['out'],
                            states=[model.sig[neurons]['voltage'],
                                    model.sig[neurons]['refractory_time'],
                                    model.sig[neurons]['adaptation']]))
Example #13
0
def build_dense(model,
                transform,
                sig_in,
                decoders=None,
                encoders=None,
                rng=np.random):
    weights = transform.sample(rng=rng)

    if decoders is not None:
        weights = multiply(weights, decoders)
    if encoders is not None:
        weights = multiply(encoders.T, weights)

    # Add operator for applying weights
    weight_sig = Signal(weights, name="%s.weights" % transform, readonly=True)
    weighted = Signal(
        np.zeros(transform.size_out if encoders is None else weights.shape[0]),
        name="%s.weighted" % transform)
    model.add_op(Reset(weighted))

    op = ElementwiseInc if weights.ndim < 2 else DotInc
    model.add_op(
        op(weight_sig, sig_in, weighted, tag="%s.apply_weights" % transform))

    return weighted, weight_sig
Example #14
0
def test_signal_indexing_1(RefSimulator):
    one = Signal(np.zeros(1), name="a")
    two = Signal(np.zeros(2), name="b")
    three = Signal(np.zeros(3), name="c")
    tmp = Signal(np.zeros(3), name="tmp")

    m = Model(dt=0.001)
    m.operators += [
        ProdUpdate(Signal(1, name="A1"), three[:1], Signal(0, name="Z0"), one),
        ProdUpdate(Signal(2.0, name="A2"), three[1:], Signal(0, name="Z1"),
                   two),
        Reset(tmp),
        DotInc(Signal([[0, 0, 1], [0, 1, 0], [1, 0, 0]], name="A3"), three,
               tmp),
        Copy(src=tmp, dst=three, as_update=True),
    ]

    sim = RefSimulator(None, model=m)
    sim.signals[three] = np.asarray([1, 2, 3])
    sim.step()
    assert np.all(sim.signals[one] == 1)
    assert np.all(sim.signals[two] == [4, 6])
    assert np.all(sim.signals[three] == [3, 2, 1])
    sim.step()
    assert np.all(sim.signals[one] == 3)
    assert np.all(sim.signals[two] == [4, 2])
    assert np.all(sim.signals[three] == [1, 2, 3])
Example #15
0
File: neurons.py Project: hunse/phd
def build_if(model, neuron_type, neurons):
    """Builds a `.IF` object into a model. """
    model.sig[neurons]['voltage'] = Signal(
        np.zeros(neurons.size_in), name="%s.voltage" % neurons)
    model.sig[neurons]['refractory_time'] = Signal(
        np.zeros(neurons.size_in), name="%s.refractory_time" % neurons)
    model.add_op(SimNeurons(
        neurons=neuron_type,
        J=model.sig[neurons]['in'],
        output=model.sig[neurons]['out'],
        states=[model.sig[neurons]['voltage'],
                model.sig[neurons]['refractory_time']]))
Example #16
0
def build_process(model, process, sig_in=None, sig_out=None, mode="set"):
    """Builds a `.Process` object into a model.

    Parameters
    ----------
    model : Model
        The model to build into.
    process : Process
        Process to build.
    sig_in : Signal, optional
        The input signal, or None if no input signal.
    sig_out : Signal, optional
        The output signal, or None if no output signal.
    mode : "set" or "inc" or "update", optional
        The ``mode`` of the built `.SimProcess`.

    Notes
    -----
    Does not modify ``model.params[]`` and can therefore be called
    more than once with the same `.Process` instance.
    """
    if sig_out is None:
        sig_out = Signal(shape=sig_in.shape, name="%s.%s" % (sig_in.name, process))

    shape_in = sig_in.shape if sig_in is not None else (0,)
    shape_out = sig_out.shape if sig_out is not None else (0,)
    dtype = (
        sig_out.dtype
        if sig_out is not None
        else sig_in.dtype
        if sig_in is not None
        else rc.float_dtype
    )
    state_init = process.make_state(shape_in, shape_out, model.dt, dtype=dtype)
    state = {}
    for name, value in state_init.items():
        state[name] = Signal(value)
        model.sig[process]["_state_" + name] = state[name]

    model.add_op(
        SimProcess(
            process,
            sig_in,
            sig_out,
            model.time,
            mode=mode,
            state=state,
            tag=str(process),
        )
    )

    return sig_out
Example #17
0
def build_lif(model, lif, neurons):
    model.sig[neurons]['voltage'] = Signal(np.zeros(neurons.size_in),
                                           name="%s.voltage" % neurons)
    model.sig[neurons]['refractory_time'] = Signal(np.zeros(neurons.size_in),
                                                   name="%s.refractory_time" %
                                                   neurons)
    model.add_op(
        SimNeurons(neurons=lif,
                   J=model.sig[neurons]['in'],
                   output=model.sig[neurons]['out'],
                   states=[
                       model.sig[neurons]['voltage'],
                       model.sig[neurons]['refractory_time']
                   ]))
Example #18
0
def build_rls(model, rls, rule):
    """Builds an `.RLS` (Recursive Least Squares) object into a model.

    Calls synapse build functions to filter the pre activities,
    and adds a `.SimRLS` operator to the model to calculate the delta.

    Parameters
    ----------
    model : Model
        The model to build into.
    rls : RLS
        Learning rule type to build.
    rule : LearningRule
        The learning rule object corresponding to the neuron type.

    Notes
    -----
    Does not modify ``model.params[]`` and can therefore be called
    more than once with the same `.RLS` instance.
    """
    conn = rule.connection
    pre_activities = model.sig[conn.pre_obj]["out"]

    pre_filtered = (pre_activities if rls.pre_synapse is None else model.build(
        rls.pre_synapse, pre_activities))

    # Create input error signal
    error = Signal(np.zeros(rule.size_in), name="RLS:error")
    model.add_op(Reset(error))
    model.sig[rule]["in"] = error

    # Create signal for running estimate of inverse correlation matrix
    assert pre_filtered.ndim == 1
    n_neurons = pre_filtered.shape[0]
    learning_rate = rls.learning_rate * model.dt / n_neurons
    inv_gamma = Signal(np.eye(n_neurons) * learning_rate, name="RLS:inv_gamma")

    model.add_op(
        SimRLS(
            pre_filtered=pre_filtered,
            error=error,
            delta=model.sig[rule]["delta"],
            inv_gamma=inv_gamma,
        ))

    # expose these for probes
    model.sig[rule]["pre_filtered"] = pre_filtered
    model.sig[rule]["error"] = error
    model.sig[rule]["inv_gamma"] = inv_gamma
Example #19
0
def signal_probe(model, key, probe):
    """Build a "signal" probe type.

    Signal probes directly probe a target signal.
    """

    try:
        sig = model.sig[probe.obj][key]
    except IndexError:
        raise BuildError("Attribute %r is not probeable on %s." %
                         (key, probe.obj))

    if sig is None:
        raise BuildError("Attribute %r on %s is None, cannot be probed" %
                         (key, probe.obj))

    if probe.slice is not None:
        sig = sig[probe.slice]

    if probe.synapse is None:
        model.sig[probe]["in"] = sig
    else:
        model.sig[probe]["in"] = Signal(shape=sig.shape, name=str(probe))
        model.sig[probe]["filtered"] = model.build(probe.synapse,
                                                   sig,
                                                   mode="update")
        model.add_op(Copy(model.sig[probe]["filtered"],
                          model.sig[probe]["in"]))
Example #20
0
def build_alifrate(model, alifrate, neurons):
    """Builds an `.AdaptiveLIFRate` object into a model.

    In addition to adding a `.SimNeurons` operator, this build function sets up
    signals to track the adaptation term for each neuron.

    Parameters
    ----------
    model : Model
        The model to build into.
    alifrate : AdaptiveLIFRate
        Neuron type to build.
    neuron : Neurons
        The neuron population object corresponding to the neuron type.

    Notes
    -----
    Does not modify ``model.params[]`` and can therefore be called
    more than once with the same `.AdaptiveLIFRate` instance.
    """

    model.sig[neurons]["adaptation"] = Signal(shape=neurons.size_in,
                                              name="%s.adaptation" % neurons)
    model.add_op(
        SimNeurons(
            neurons=alifrate,
            J=model.sig[neurons]["in"],
            output=model.sig[neurons]["out"],
            states=[model.sig[neurons]["adaptation"]],
        ))
Example #21
0
def build_aml(model, aml, rule):
    conn = rule.connection
    rng = np.random.RandomState(model.seeds[conn])

    error = Signal(np.zeros(rule.size_in), name="aml:error")
    model.add_op(Reset(error))
    model.sig[rule]['in'] = error

    pre = model.sig[conn.pre_obj]['in']
    decoders = model.sig[conn]['weights']
    delta = model.sig[rule]['delta']

    encoders = model.params[conn.pre_obj].encoders
    gain = model.params[conn.pre_obj].gain
    bias = model.params[conn.pre_obj].bias

    eval_points = get_eval_points(model, conn, rng)
    targets = eval_points

    x = np.dot(eval_points, encoders.T)

    wrapped_solver = (model.decoder_cache.wrap_solver(solve_for_decoders)
                      if model.seeded[conn] else solve_for_decoders)
    base_decoders, _ = wrapped_solver(conn, gain, bias, x, targets, rng=rng)

    model.add_op(SimAML(
        aml.learning_rate, base_decoders, pre, error, decoders, delta))
Example #22
0
def conn_probe(model, probe):
    """Build a "connection" probe type.

    Connection probes create a connection from the target, and probe
    the resulting signal (used when you want to probe the default
    output of an object, which may not have a predefined signal).
    """

    conn = Connection(
        probe.target,
        probe,
        synapse=probe.synapse,
        solver=probe.solver,
        add_to_container=False,
    )

    # Set connection's seed to probe's (which isn't used elsewhere)
    model.seeded[conn] = model.seeded[probe]
    model.seeds[conn] = model.seeds[probe]

    # Make a sink signal for the connection
    model.sig[probe]["in"] = Signal(shape=conn.size_out, name=str(probe))
    model.add_op(Reset(model.sig[probe]["in"]))

    # Build the connection
    model.build(conn)
Example #23
0
def build_synapse(model, synapse, sig_in, sig_out=None):
    """Builds a `.Synapse` object into a model.

    Parameters
    ----------
    model : Model
        The model to build into.
    synapse : Synapse
        Synapse to build.
    sig_in : Signal
        The input signal.
    sig_out : Signal, optional (Default: None)
        The output signal. If None, a new output signal will be
        created and returned.

    Notes
    -----
    Does not modify ``model.params[]`` and can therefore be called
    more than once with the same `.Synapse` instance.
    """
    if sig_out is None:
        sig_out = Signal(np.zeros(sig_in.shape),
                         name="%s.%s" % (sig_in.name, synapse))

    model.add_op(
        SimProcess(synapse, sig_in, sig_out, model.time, mode='update'))
    return sig_out
Example #24
0
def build_spikingrectifiedlinear(model, spikingrectifiedlinear, neurons):
    """Builds a `.SpikingRectifiedLinear` object into a model.

    In addition to adding a `.SimNeurons` operator, this build function sets up
    signals to track the voltage for each neuron.

    Parameters
    ----------
    model : Model
        The model to build into.
    spikingrectifiedlinear: SpikingRectifiedLinear
        Neuron type to build.
    neuron : Neurons
        The neuron population object corresponding to the neuron type.

    Notes
    -----
    Does not modify ``model.params[]`` and can therefore be called
    more than once with the same `.SpikingRectifiedLinear` instance.
    """

    model.sig[neurons]['voltage'] = Signal(
        np.zeros(neurons.size_in), name="%s.voltage" % neurons)
    model.add_op(SimNeurons(
        neurons=spikingrectifiedlinear,
        J=model.sig[neurons]['in'],
        output=model.sig[neurons]['out'],
        states=[model.sig[neurons]['voltage']]))
def build_bidirectionalpowerlaw(model, bidirectionalpowerlaw, memristors):
    """Builds exponent `.BidirectionalPowerlawMemristor` object into exponent model.

    In addition to adding exponent `.SimMemristors` operator, this build function sets up
    signals to track the resistance for each memristor.

    Parameters
    ----------
    model : Model
        The model to build into.
    bidirectionalpowerlaw: BidirectionalPowerlaw
        Memristor type to build.
    memristor : Memristors
        The memristor population object corresponding to the memristor type.

    Notes
    -----
    TBD
    """

    model.sig[memristors]["resistance"] = Signal(shape=memristors.size_in,
                                                 name="%s.resistance" %
                                                 memristors)
    model.add_op(
        SimMemristors(
            memristors=bidirectionalpowerlaw,
            output=model.sig[memristors]["out"],
            states=[model.sig[memristors]["resistance"]],
        ))
Example #26
0
def build_node(model, node):
    """Builds a `.Node` object into a model.

    The node build function is relatively simple. It involves creating input
    and output signals, and connecting them with an `.Operator` that depends
    on the type of ``node.output``.

    Parameters
    ----------
    model : Model
        The model to build into.
    node : Node
        The node to build.

    Notes
    -----
    Sets ``model.params[node]`` to ``None``.
    """

    # input signal
    if not is_array_like(node.output) and node.size_in > 0:
        sig_in = Signal(shape=node.size_in, name="%s.in" % node)
        model.add_op(Reset(sig_in))
    else:
        sig_in = None

    # Provide output
    if node.output is None:
        sig_out = sig_in
    elif isinstance(node.output, Process):
        sig_out = Signal(shape=node.size_out, name="%s.out" % node)
        model.build(node.output, sig_in, sig_out, mode="set")
    elif callable(node.output):
        sig_out = (
            Signal(shape=node.size_out, name="%s.out" % node)
            if node.size_out > 0
            else None
        )
        model.add_op(SimPyFunc(output=sig_out, fn=node.output, t=model.time, x=sig_in))
    elif is_array_like(node.output):
        sig_out = Signal(node.output.astype(rc.float_dtype), name="%s.out" % node)
    else:
        raise BuildError("Invalid node output type %r" % type(node.output).__name__)

    model.sig[node]["in"] = sig_in
    model.sig[node]["out"] = sig_out
    model.params[node] = None
Example #27
0
def build_mpes(model, mpes, rule):
    gain = 1e5

    conn = rule.connection

    # NB "mpes" is the "mPES()" frontend class

    # Create input error signal
    error = Signal(shape=rule.size_in, name="mPES:error")
    model.add_op(Reset(error))
    model.sig[rule]["in"] = error  # error connection will attach here

    # Filter pre-synaptic activities with pre_synapse
    acts = build_or_passthrough(model, mpes.pre_synapse,
                                model.sig[conn.pre_obj]["out"])

    post = get_post_ens(conn)
    encoders = model.sig[post]["encoders"][:, conn.post_slice]

    out_size = encoders.shape[0]
    in_size = acts.shape[0]

    # initial_conductances_pos = 1 / mpes.initial_resistances( 1e8, 1e8, (out_size, in_size) )
    # initial_conductances_neg = 1 / mpes.initial_resistances( 1e8, 1e8, (out_size, in_size) )
    initial_conductances_pos = mpes.initial_conductances_pos
    initial_conductances_neg = mpes.initial_conductances_neg

    pos_memristors = Signal(shape=(out_size, in_size),
                            name="mPES:pos_memristors",
                            initial_value=initial_conductances_pos)
    neg_memristors = Signal(shape=(out_size, in_size),
                            name="mPES:neg_memristors",
                            initial_value=initial_conductances_neg)

    model.sig[rule]["pos_memristors"] = pos_memristors
    model.sig[rule]["neg_memristors"] = neg_memristors

    model.add_op(
        SimmPES(acts, error, model.sig[rule]["delta"], mpes.learning_rate,
                encoders, model.sig[rule]["pos_memristors"],
                model.sig[rule]["neg_memristors"]))

    # expose these for probes
    model.sig[rule]["error"] = error
    model.sig[rule]["activities"] = acts
    model.sig[rule]["pos_memristors"] = pos_memristors
    model.sig[rule]["neg_memristors"] = neg_memristors
Example #28
0
def test_encoder_decoder_pathway(RefSimulator):
    """Verifies (like by hand) that the simulator does the right
    things in the right order."""
    foo = Signal([1.0], name="foo")
    decoders = np.asarray([.2, .1])
    decs = Signal(decoders * 0.5)
    m = Model(dt=0.001)
    sig_in, sig_out = build_pyfunc(lambda t, x: x + 1, True, 2, 2, None, m)
    m.operators += [
        DotInc(Signal([[1.0], [2.0]]), foo, sig_in),
        ProdUpdate(decs, sig_out, Signal(0.2), foo)
    ]

    def check(sig, target):
        assert np.allclose(sim.signals[sig], target)

    sim = RefSimulator(None, model=m)

    check(foo, 1.0)
    check(sig_in, 0)
    check(sig_out, 0)

    sim.step()
    # DotInc to pop.input_signal (input=[1.0,2.0])
    # produpdate updates foo (foo=[0.2])
    # pop updates pop.output_signal (output=[2,3])

    check(sig_in, [1, 2])
    check(sig_out, [2, 3])
    check(foo, .2)
    check(decs, [.1, .05])

    sim.step()
    # DotInc to pop.input_signal (input=[0.2,0.4])
    #  (note that pop resets its own input signal each timestep)
    # produpdate updates foo (foo=[0.39]) 0.2*0.5*2+0.1*0.5*3 + 0.2*0.2
    # pop updates pop.output_signal (output=[1.2,1.4])

    check(decs, [.1, .05])
    check(sig_in, [0.2, 0.4])
    check(sig_out, [1.2, 1.4])
    # -- foo is computed as a prodUpdate of the *previous* output signal
    #    foo <- .2 * foo + dot(decoders * .5, output_signal)
    #           .2 * .2  + dot([.2, .1] * .5, [2, 3])
    #           .04      + (.2 + .15)
    #        <- .39
    check(foo, .39)
Example #29
0
def build_mpes(model, mpes, rule):
    conn = rule.connection

    # Create input error signal
    error = Signal(shape=(rule.size_in, ), name="PES:error")
    model.add_op(Reset(error))
    model.sig[rule]["in"] = error  # error connection will attach here

    acts = build_or_passthrough(model, mpes.pre_synapse,
                                model.sig[conn.pre_obj]["out"])

    post = get_post_ens(conn)
    encoders = model.sig[post]["encoders"]

    pos_memristors, neg_memristors, r_min_noisy, r_max_noisy, exponent_noisy = initialise_memristors(
        mpes, acts.shape[0], encoders.shape[0])

    model.sig[conn]["pos_memristors"] = pos_memristors
    model.sig[conn]["neg_memristors"] = neg_memristors

    if conn.post_obj is not conn.post:
        # in order to avoid slicing encoders along an axis > 0, we pad
        # `error` out to the full base dimensionality and then do the
        # dotinc with the full encoder matrix
        # comes into effect when slicing post connection
        padded_error = Signal(shape=(encoders.shape[1], ))
        model.add_op(Copy(error, padded_error, dst_slice=conn.post_slice))
    else:
        padded_error = error

    # error = dot(encoders, error)
    local_error = Signal(shape=(post.n_neurons, ))
    model.add_op(Reset(local_error))
    model.add_op(DotInc(encoders, padded_error, local_error, tag="PES:encode"))

    model.operators.append(
        SimmPES(acts, local_error, model.sig[conn]["pos_memristors"],
                model.sig[conn]["neg_memristors"], model.sig[conn]["weights"],
                mpes.noise_percentage, mpes.gain, r_min_noisy, r_max_noisy,
                exponent_noisy, mpes.initial_state))

    # expose these for probes
    model.sig[rule]["error"] = error
    model.sig[rule]["activities"] = acts
    model.sig[rule]["pos_memristors"] = pos_memristors
    model.sig[rule]["neg_memristors"] = neg_memristors
Example #30
0
def slice_signal(model, signal, sl):
    assert signal.ndim == 1
    if isinstance(sl, slice) and (sl.step is None or sl.step == 1):
        return signal[sl]
    else:
        size = np.arange(signal.size)[sl].size
        sliced_signal = Signal(np.zeros(size), name="%s.sliced" % signal.name)
        model.add_op(Copy(signal, sliced_signal, src_slice=sl))
        return sliced_signal