Пример #1
0
def test_signal_indexing_1(RefSimulator):
    one = Signal(np.zeros(1), name="a")
    two = Signal(np.zeros(2), name="b")
    three = Signal(np.zeros(3), name="c")
    tmp = Signal(np.zeros(3), name="tmp")

    m = Model(dt=0.001)
    m.operators += [
        Reset(one),
        Reset(two),
        Reset(tmp),
        DotInc(Signal(1, name="A1"), three[:1], one),
        DotInc(Signal(2.0, name="A2"), three[1:], two),
        DotInc(Signal([[0, 0, 1], [0, 1, 0], [1, 0, 0]], name="A3"), three,
               tmp),
        Copy(src=tmp, dst=three, as_update=True),
    ]

    sim = RefSimulator(None, model=m)
    sim.signals[three] = np.asarray([1, 2, 3])
    sim.step()
    assert np.all(sim.signals[one] == 1)
    assert np.all(sim.signals[two] == [4, 6])
    assert np.all(sim.signals[three] == [3, 2, 1])
    sim.step()
    assert np.all(sim.signals[one] == 3)
    assert np.all(sim.signals[two] == [4, 2])
    assert np.all(sim.signals[three] == [1, 2, 3])
Пример #2
0
def build_pes(model, pes, rule):
    # TODO: Filter activities
    conn = rule.connection
    activities = model.sig[conn.pre_obj]['out']
    error = model.sig[pes.error_connection]['out']

    scaled_error = Signal(np.zeros(error.shape),
                          name="PES:error * learning_rate")
    scaled_error_view = scaled_error.reshape((error.size, 1))
    activities_view = activities.reshape((1, activities.size))
    lr_sig = Signal(pes.learning_rate * model.dt, name="PES:learning_rate")

    model.add_op(Reset(scaled_error))
    model.add_op(DotInc(lr_sig, error, scaled_error, tag="PES:scale error"))

    if conn.solver.weights or (isinstance(conn.pre_obj, Neurons)
                               and isinstance(conn.post_obj, Neurons)):
        post = (conn.post_obj.ensemble
                if isinstance(conn.post_obj, Neurons) else conn.post_obj)
        transform = model.sig[conn]['transform']
        encoders = model.sig[post]['encoders']
        encoded_error = Signal(np.zeros(transform.shape[0]),
                               name="PES: encoded error")

        model.add_op(Reset(encoded_error))
        model.add_op(
            DotInc(encoders,
                   scaled_error,
                   encoded_error,
                   tag="PES:Encode error"))

        encoded_error_view = encoded_error.reshape((encoded_error.size, 1))
        model.add_op(
            ElementwiseInc(encoded_error_view,
                           activities_view,
                           transform,
                           tag="PES:Inc Transform"))
    elif isinstance(conn.pre_obj, Neurons):
        transform = model.sig[conn]['transform']
        model.add_op(
            ElementwiseInc(scaled_error_view,
                           activities_view,
                           transform,
                           tag="PES:Inc Transform"))
    else:
        assert isinstance(conn.pre_obj, Ensemble)
        decoders = model.sig[conn]['decoders']
        model.add_op(
            ElementwiseInc(scaled_error_view,
                           activities_view,
                           decoders,
                           tag="PES:Inc Decoder"))

    # expose these for probes
    model.sig[rule]['scaled_error'] = scaled_error
    model.sig[rule]['activities'] = activities

    model.params[rule] = None  # no build-time info to return
Пример #3
0
def build_delta_rule(model, delta_rule, rule):
    conn = rule.connection

    # Create input error signal
    error = Signal(np.zeros(rule.size_in), name="DeltaRule:error")
    model.add_op(Reset(error))
    model.sig[rule]["in"] = error  # error connection will attach here

    # Multiply by post_fn output if necessary
    post_fn = delta_rule.post_fn.function
    post_tau = delta_rule.post_tau
    post_target = delta_rule.post_target
    if post_fn is not None:
        post_sig = model.sig[conn.post_obj][post_target]
        post_synapse = Lowpass(post_tau) if post_tau is not None else None
        post_input = (post_sig if post_synapse is None else model.build(
            post_synapse, post_sig))

        post = Signal(np.zeros(post_input.shape), name="DeltaRule:post")
        model.add_op(
            SimPyFunc(post,
                      post_fn,
                      t=None,
                      x=post_input,
                      tag="DeltaRule:post_fn"))
        model.sig[rule]["post"] = post

        error0 = error
        error = Signal(np.zeros(rule.size_in), name="DeltaRule:post_error")
        model.add_op(Reset(error))
        model.add_op(ElementwiseInc(error0, post, error))

    # Compute: correction = -learning_rate * dt * error
    correction = Signal(np.zeros(error.shape), name="DeltaRule:correction")
    model.add_op(Reset(correction))
    lr_sig = Signal(-delta_rule.learning_rate * model.dt,
                    name="DeltaRule:learning_rate")
    model.add_op(DotInc(lr_sig, error, correction, tag="DeltaRule:correct"))

    # delta_ij = correction_i * pre_j
    pre_synapse = Lowpass(delta_rule.pre_tau)
    pre = model.build(pre_synapse, model.sig[conn.pre_obj]["out"])

    model.add_op(Reset(model.sig[rule]["delta"]))
    model.add_op(
        ElementwiseInc(
            correction.reshape((-1, 1)),
            pre.reshape((1, -1)),
            model.sig[rule]["delta"],
            tag="DeltaRule:Inc Delta",
        ))

    # expose these for probes
    model.sig[rule]["error"] = error
    model.sig[rule]["correction"] = correction
    model.sig[rule]["pre"] = pre
Пример #4
0
def test_operators():
    sig = Signal(np.array([0.0]), name="sig")
    assert fnmatch(repr(TimeUpdate(sig, sig)), "<TimeUpdate at 0x*>")
    assert fnmatch(repr(TimeUpdate(sig, sig, tag="tag")),
                   "<TimeUpdate 'tag' at 0x*>")
    assert fnmatch(repr(Reset(sig)), "<Reset at 0x*>")
    assert fnmatch(repr(Reset(sig, tag="tag")), "<Reset 'tag' at 0x*>")
    assert fnmatch(repr(Copy(sig, sig)), "<Copy at 0x*>")
    assert fnmatch(repr(Copy(sig, sig, tag="tag")), "<Copy 'tag' at 0x*>")
    assert fnmatch(repr(ElementwiseInc(sig, sig, sig)),
                   "<ElementwiseInc at 0x*>")
    assert fnmatch(repr(ElementwiseInc(sig, sig, sig, tag="tag")),
                   "<ElementwiseInc 'tag' at 0x*>")
    assert fnmatch(repr(DotInc(sig, sig, sig)), "<DotInc at 0x*>")
    assert fnmatch(repr(DotInc(sig, sig, sig, tag="tag")),
                   "<DotInc 'tag' at 0x*>")
    assert fnmatch(repr(SimPyFunc(sig, lambda x: 0.0, True, sig)),
                   "<SimPyFunc at 0x*>")
    assert fnmatch(
        repr(SimPyFunc(sig, lambda x: 0.0, True, sig, tag="tag")),
        "<SimPyFunc 'tag' at 0x*>",
    )
    assert fnmatch(repr(SimPES(sig, sig, sig, 0.1)), "<SimPES at 0x*>")
    assert fnmatch(repr(SimPES(sig, sig, sig, 0.1, tag="tag")),
                   "<SimPES 'tag' at 0x*>")
    assert fnmatch(repr(SimBCM(sig, sig, sig, sig, 0.1)), "<SimBCM at 0x*>")
    assert fnmatch(repr(SimBCM(sig, sig, sig, sig, 0.1, tag="tag")),
                   "<SimBCM 'tag' at 0x*>")
    assert fnmatch(repr(SimOja(sig, sig, sig, sig, 0.1, 1.0)),
                   "<SimOja at 0x*>")
    assert fnmatch(repr(SimOja(sig, sig, sig, sig, 0.1, 1.0, tag="tag")),
                   "<SimOja 'tag' at 0x*>")
    assert fnmatch(repr(SimVoja(sig, sig, sig, sig, 1.0, sig, 1.0)),
                   "<SimVoja at 0x*>")
    assert fnmatch(
        repr(SimVoja(sig, sig, sig, sig, 0.1, sig, 1.0, tag="tag")),
        "<SimVoja 'tag' at 0x*>",
    )
    assert fnmatch(repr(SimRLS(sig, sig, sig, sig)), "<SimRLS at 0x*>")
    assert fnmatch(
        repr(SimRLS(sig, sig, sig, sig, tag="tag")),
        "<SimRLS 'tag' at 0x*>",
    )
    assert fnmatch(repr(SimNeurons(LIF(), sig, {"sig": sig})),
                   "<SimNeurons at 0x*>")
    assert fnmatch(
        repr(SimNeurons(LIF(), sig, {"sig": sig}, tag="tag")),
        "<SimNeurons 'tag' at 0x*>",
    )
    assert fnmatch(repr(SimProcess(WhiteNoise(), sig, sig, sig)),
                   "<SimProcess at 0x*>")
    assert fnmatch(
        repr(SimProcess(WhiteNoise(), sig, sig, sig, tag="tag")),
        "<SimProcess 'tag' at 0x*>",
    )
Пример #5
0
def build_pes(model, pes, rule):
    conn = rule.connection

    # Create input error signal
    error = Signal(np.zeros(rule.size_in), name="PES:error")
    model.add_op(Reset(error))
    model.sig[rule]['in'] = error  # error connection will attach here

    acts = filtered_signal(model, pes, model.sig[conn.pre_obj]['out'],
                           pes.pre_tau)
    acts_view = acts.reshape((1, acts.size))

    # Compute the correction, i.e. the scaled negative error
    correction = Signal(np.zeros(error.shape), name="PES:correction")
    local_error = correction.reshape((error.size, 1))
    model.add_op(Reset(correction))

    # correction = -learning_rate * (dt / n_neurons) * error
    n_neurons = (conn.pre_obj.n_neurons if isinstance(conn.pre_obj, Ensemble)
                 else conn.pre_obj.size_in)
    lr_sig = Signal(-pes.learning_rate * model.dt / n_neurons,
                    name="PES:learning_rate")
    model.add_op(DotInc(lr_sig, error, correction, tag="PES:correct"))

    if conn.solver.weights or (isinstance(conn.pre_obj, Neurons)
                               and isinstance(conn.post_obj, Neurons)):
        post = get_post_ens(conn)
        transform = model.sig[conn]['transform']
        encoders = model.sig[post]['encoders']

        # encoded = dot(encoders, correction)
        encoded = Signal(np.zeros(transform.shape[0]), name="PES:encoded")
        model.add_op(Reset(encoded))
        model.add_op(DotInc(encoders, correction, encoded, tag="PES:encode"))
        local_error = encoded.reshape((encoded.size, 1))
    elif not isinstance(conn.pre_obj, (Ensemble, Neurons)):
        raise ValueError("'pre' object '%s' not suitable for PES learning" %
                         (conn.pre_obj))

    # delta = local_error * activities
    model.add_op(Reset(model.sig[rule]['delta']))
    model.add_op(
        ElementwiseInc(local_error,
                       acts_view,
                       model.sig[rule]['delta'],
                       tag="PES:Inc Delta"))

    # expose these for probes
    model.sig[rule]['error'] = error
    model.sig[rule]['correction'] = correction
    model.sig[rule]['activities'] = acts

    model.params[rule] = None  # no build-time info to return
Пример #6
0
def build_aml(model, aml, rule):
    conn = rule.connection
    rng = np.random.RandomState(model.seeds[conn])

    error = Signal(np.zeros(rule.size_in), name="aml:error")
    model.add_op(Reset(error))
    model.sig[rule]['in'] = error

    pre = model.sig[conn.pre_obj]['in']
    decoders = model.sig[conn]['weights']
    delta = model.sig[rule]['delta']

    encoders = model.params[conn.pre_obj].encoders
    gain = model.params[conn.pre_obj].gain
    bias = model.params[conn.pre_obj].bias

    eval_points = get_eval_points(model, conn, rng)
    targets = eval_points

    x = np.dot(eval_points, encoders.T)

    wrapped_solver = (model.decoder_cache.wrap_solver(solve_for_decoders)
                      if model.seeded[conn] else solve_for_decoders)
    base_decoders, _ = wrapped_solver(conn, gain, bias, x, targets, rng=rng)

    model.add_op(SimAML(
        aml.learning_rate, base_decoders, pre, error, decoders, delta))
Пример #7
0
def build_rls(model, rls, rule):
    conn = rule.connection
    pre_activities = model.sig[conn.pre_obj]['out']

    pre_filtered = (pre_activities if rls.pre_synapse is None else model.build(
        rls.pre_synapse, pre_activities))

    # Create input error signal
    error = Signal(np.zeros(rule.size_in), name="RLS:error")
    model.add_op(Reset(error))
    model.sig[rule]['in'] = error

    # Create signal for running estimate of inverse correlation matrix
    assert pre_filtered.ndim == 1
    n_neurons = pre_filtered.shape[0]
    inv_gamma = Signal(np.eye(n_neurons) * rls.learning_rate,
                       name="RLS:inv_gamma")

    model.add_op(
        SimRLS(pre_filtered=pre_filtered,
               error=error,
               delta=model.sig[rule]['delta'],
               inv_gamma=inv_gamma))

    # expose these for probes
    model.sig[rule]['pre_filtered'] = pre_filtered
    model.sig[rule]['error'] = error
    model.sig[rule]['inv_gamma'] = inv_gamma
Пример #8
0
def build_convolution(model,
                      transform,
                      sig_in,
                      decoders=None,
                      encoders=None,
                      rng=np.random):
    if decoders is not None:
        raise BuildError("Applying a convolution transform to a decoded "
                         "connection is not supported")
    if encoders is not None:
        raise BuildError(
            "Applying encoders to a convolution transform is not supported")

    weights = transform.sample(rng=rng)
    weight_sig = Signal(weights, name="%s.weights" % transform, readonly=True)
    weighted = Signal(np.zeros(transform.size_out),
                      name="%s.weighted" % transform)
    model.add_op(Reset(weighted))

    model.add_op(
        ConvInc(weight_sig,
                sig_in,
                weighted,
                transform,
                tag="%s.apply_weights" % transform))

    return weighted, weight_sig
Пример #9
0
def build_node(model, node):
    # Get input
    if node.output is None or callable(node.output):
        if node.size_in > 0:
            model.sig[node]['in'] = model.Signal(npext.castDecimal(
                np.zeros(node.size_in)),
                                                 name="%s.signal" % node)
            # Reset input signal to 0 each timestep
            model.add_op(Reset(model.sig[node]['in']))

    # Provide output
    if node.output is None:
        model.sig[node]['out'] = model.sig[node]['in']
    elif not callable(node.output):
        model.sig[node]['out'] = model.Signal(node.output, name=str(node))
    else:
        sig_in, sig_out = build_pyfunc(model=model,
                                       fn=node.output,
                                       t_in=True,
                                       n_in=node.size_in,
                                       n_out=node.size_out,
                                       label="%s.pyfn" % node)
        if sig_in is not None:
            model.add_op(
                DotInc(model.sig[node]['in'],
                       model.sig['common'][1],
                       sig_in,
                       tag="%s input" % node))
        if sig_out is not None:
            model.sig[node]['out'] = sig_out

    model.params[node] = None
Пример #10
0
def build_convolution(
    model, transform, sig_in, decoders=None, encoders=None, rng=np.random
):
    """Build a `.Convolution` transform object."""

    if decoders is not None:
        raise BuildError(
            "Applying a convolution transform to a decoded "
            "connection is not supported"
        )

    # Shouldn't be possible for encoders to be non-None, since that only
    # occurs for a connection solver with weights=True, and those can only
    # be applied to decoded connections (which are disallowed above)
    assert encoders is None

    weights = transform.sample(rng=rng)
    weight_sig = Signal(weights, readonly=True, name="%s.weights" % transform)
    weighted = Signal(shape=transform.size_out, name="%s.weighted" % transform)
    model.add_op(Reset(weighted))

    model.add_op(
        ConvInc(
            weight_sig, sig_in, weighted, transform, tag="%s.apply_weights" % transform
        )
    )

    return weighted, weight_sig
Пример #11
0
def test_multidotinc_compress(monkeypatch):
    if nengo.version.version_info < (2, 3, 1):  # LEGACY
        # Nengo versions <= 2.3.0 have more stringent op validation which
        # required PreserveValue. That's been removed, so the strict
        # validation causes this test to fail despite it working.
        monkeypatch.setattr(nengo.utils.simulator, "validate_ops", lambda *args: None)

    a = Signal([0, 0])
    b = Signal([0, 0])
    A = Signal([[1, 2], [0, 1]])
    B = Signal([[2, 1], [-1, 1]])
    x = Signal([1, 1])
    y = Signal([1, -1])

    m = Model(dt=0)
    m.operators += [Reset(a), DotInc(A, x, a), DotInc(B, y, a)]
    m.operators += [DotInc(A, y, b), DotInc(B, x, b)]

    with nengo_ocl.Simulator(None, model=m) as sim:
        sim.step()
        assert np.allclose(sim.signals[a], [4, -1])
        assert np.allclose(sim.signals[b], [2, -1])
        sim.step()
        assert np.allclose(sim.signals[a], [4, -1])
        assert np.allclose(sim.signals[b], [4, -2])
Пример #12
0
def remove_zero_incs(operators):
    """
    Remove any operators where we know the input (and therefore output) is
    zero.

    If the input to a DotInc/ElementwiseInc/Copy is zero then we know
    that the output of the op will be zero, so we can just get rid of it.

    Parameters
    ----------
    operators : list of `~nengo.builder.Operator`
        Operators in the model

    Returns
    -------
    new_operators : list of `~nengo.builder.Operator`
        Modified list of operators
    """

    logger.debug("REMOVE_ZERO_INCS")
    logger.debug("input ops")
    logger.debug(operators)

    sets, incs, _, updates = signal_io_dicts(operators)

    new_operators = []
    for op in operators:
        if isinstance(op, (DotInc, ElementwiseInc, Copy)):
            for src in op.reads:
                # check if the input is the output of a Node (in which case the
                # value might change, so we should never get rid of this op).
                # checking the name of the signal seems a bit fragile, but I
                # can't think of a better solution
                if src.name.startswith("<Node"):
                    continue

                # find any ops that modify src
                pred = sets[src.base] + incs[src.base]

                # the input (and therefore output) will be zero if the only
                # input is a Reset(0) op, or the only input is a constant
                # signal (not set/inc/updated) that is all zero
                zero_input = (
                    (len(pred) == 1 and type(pred[0]) == Reset and
                     np.all(pred[0].value == 0)) or
                    (len(pred) == 0 and np.all(src.initial_value == 0) and
                     len(updates[src.base]) == 0) and not src.trainable)
                if zero_input:
                    if len(op.sets) > 0:
                        new_operators.append(Reset(op.sets[0]))
                    break
            else:
                new_operators.append(op)
        else:
            new_operators.append(op)

    logger.debug("new ops")
    logger.debug(new_operators)

    return new_operators
Пример #13
0
def build_tensor_node(model, node):
    """This is the Nengo build function, so that Nengo knows what to do with
    TensorNodes."""

    # time signal
    if node.pass_time:
        time_in = model.time
    else:
        time_in = None

    # input signal
    if node.shape_in is not None:
        sig_in = builder.Signal(shape=(node.size_in,), name="%s.in" % node)
        model.add_op(Reset(sig_in))
    else:
        sig_in = None

    sig_out = builder.Signal(shape=(node.size_out,), name="%s.out" % node)

    model.sig[node]["in"] = sig_in
    model.sig[node]["out"] = sig_out
    model.params[node] = None

    model.operators.append(
        SimTensorNode(node.tensor_func, time_in, sig_in, sig_out, node.shape_in)
    )
Пример #14
0
def build_aml(model, aml, rule):
    if aml.seed is None:
        rng = np.random
    else:
        rng = np.random.RandomState(aml.seed)

    conn = rule.connection

    error = Signal(np.zeros(rule.size_in), name="aml:error")
    model.add_op(Reset(error))
    model.sig[rule]['in'] = error

    pre = model.sig[conn.pre_obj]['in']
    decoders = model.sig[conn]['weights']

    # TODO caching
    encoders = model.params[conn.pre_obj].encoders
    gain = model.params[conn.pre_obj].gain
    bias = model.params[conn.pre_obj].bias

    eval_points = get_eval_points(model, conn, rng)
    targets = eval_points

    x = np.dot(eval_points, encoders.T)

    base_decoders, _ = solve_for_decoders(conn,
                                          gain,
                                          bias,
                                          x,
                                          targets,
                                          rng=rng)

    model.add_op(SimAML(aml.learning_rate, base_decoders, pre, error,
                        decoders))
Пример #15
0
def build_weight_symmetry_learning(model, weight_symmetry_learning, rule):
    if weight_symmetry_learning.seed is None:
        rng = np.random
    else:
        rng = np.random.RandomState(weight_symmetry_learning.seed)

    conn = rule.connection

    pre = model.sig[conn.pre_obj.neurons]['out']

    decoders = model.sig[conn]['weights']

    scale = Signal(np.zeros(rule.size_in), name="WeightSymmetryLearn:scale")
    model.add_op(Reset(scale))
    model.sig[rule]['in'] = scale

    encoders = model.params[conn.pre_obj].encoders
    gain = model.params[conn.pre_obj].gain
    bias = model.params[conn.pre_obj].bias

    eval_points = get_eval_points(model, conn, rng)
    targets = eval_points

    x = np.dot(eval_points, encoders.T / conn.pre_obj.radius)

    base_decoders, _ = solve_for_decoders(conn,
                                          gain,
                                          bias,
                                          x,
                                          targets,
                                          rng=rng)

    model.add_op(
        SimWeightSymmetryLearning(weight_symmetry_learning.learning_rate,
                                  base_decoders, pre, decoders, scale))
Пример #16
0
def build_node(model, node):
    # input signal
    if not is_array_like(node.output) and node.size_in > 0:
        sig_in = Signal(np.zeros(node.size_in), name="%s.in" % node)
        model.add_op(Reset(sig_in))
    else:
        sig_in = None

    # Provide output
    if node.output is None:
        sig_out = sig_in
    elif isinstance(node.output, Process):
        sig_out = Signal(np.zeros(node.size_out), name="%s.out" % node)
        model.build(node.output, sig_in, sig_out)
    elif callable(node.output):
        sig_out = (Signal(np.zeros(node.size_out), name="%s.out" %
                          node) if node.size_out > 0 else None)
        model.add_op(
            SimPyFunc(output=sig_out, fn=node.output, t=model.time, x=sig_in))
    elif is_array_like(node.output):
        sig_out = Signal(node.output, name="%s.out" % node)
    else:
        raise BuildError("Invalid node output type %r" %
                         node.output.__class__.__name__)

    model.sig[node]['in'] = sig_in
    model.sig[node]['out'] = sig_out
    model.params[node] = None
Пример #17
0
def build_dense(model,
                transform,
                sig_in,
                decoders=None,
                encoders=None,
                rng=np.random):
    """Build a `.Dense` transform object."""

    weights = transform.sample(rng=rng).astype(rc.float_dtype)

    if decoders is not None:
        weights = multiply(weights, decoders.astype(rc.float_dtype))
    if encoders is not None:
        weights = multiply(encoders.astype(rc.float_dtype).T, weights)

    # Add operator for applying weights
    weight_sig = Signal(weights, readonly=True, name="%s.weights" % transform)
    weighted = Signal(
        shape=transform.size_out if encoders is None else weights.shape[0],
        name="%s.weighted" % transform,
    )
    model.add_op(Reset(weighted))

    op = ElementwiseInc if weights.ndim < 2 else DotInc
    model.add_op(
        op(weight_sig, sig_in, weighted, tag="%s.apply_weights" % transform))

    return weighted, weight_sig
Пример #18
0
def build_dense(model,
                transform,
                sig_in,
                decoders=None,
                encoders=None,
                rng=np.random):
    weights = transform.sample(rng=rng)

    if decoders is not None:
        weights = multiply(weights, decoders)
    if encoders is not None:
        weights = multiply(encoders.T, weights)

    # Add operator for applying weights
    weight_sig = Signal(weights, name="%s.weights" % transform, readonly=True)
    weighted = Signal(
        np.zeros(transform.size_out if encoders is None else weights.shape[0]),
        name="%s.weighted" % transform)
    model.add_op(Reset(weighted))

    op = ElementwiseInc if weights.ndim < 2 else DotInc
    model.add_op(
        op(weight_sig, sig_in, weighted, tag="%s.apply_weights" % transform))

    return weighted, weight_sig
Пример #19
0
def conn_probe(model, probe):
    """Build a "connection" probe type.

    Connection probes create a connection from the target, and probe
    the resulting signal (used when you want to probe the default
    output of an object, which may not have a predefined signal).
    """

    conn = Connection(
        probe.target,
        probe,
        synapse=probe.synapse,
        solver=probe.solver,
        add_to_container=False,
    )

    # Set connection's seed to probe's (which isn't used elsewhere)
    model.seeded[conn] = model.seeded[probe]
    model.seeds[conn] = model.seeds[probe]

    # Make a sink signal for the connection
    model.sig[probe]["in"] = Signal(shape=conn.size_out, name=str(probe))
    model.add_op(Reset(model.sig[probe]["in"]))

    # Build the connection
    model.build(conn)
Пример #20
0
def test_create_signals_partition():
    # check that signals are partitioned based on plan
    sigs = [DummySignal(), DummySignal(),
            DummySignal(), DummySignal()]
    plan = [tuple(DummyOp(reads=[x]) for x in sigs[:2]),
            tuple(DummyOp(reads=[x]) for x in sigs[2:])]
    bases, sig_map = create_signals(sigs, plan, np.float32, 10)
    assert sig_map[sigs[0]].key == sig_map[sigs[1]].key
    assert sig_map[sigs[1]].key != sig_map[sigs[2]].key
    assert sig_map[sigs[2]].key == sig_map[sigs[3]].key

    # check that signals are partioned for different read blocks
    plan = [tuple(DummyOp(reads=[sigs[i], sigs[2 + i]]) for i in range(2))]
    bases, sig_map = create_signals(sigs, plan, np.float32, 10)
    assert sig_map[sigs[0]].key == sig_map[sigs[1]].key
    assert sig_map[sigs[1]].key != sig_map[sigs[2]].key
    assert sig_map[sigs[2]].key == sig_map[sigs[3]].key

    # check that signals are partioned for different sig types
    plan = [tuple(DummyOp(reads=[sigs[i]], sets=[sigs[2 + i]])
                  for i in range(2))]
    bases, sig_map = create_signals(sigs, plan, np.float32, 10)
    assert sig_map[sigs[0]].key == sig_map[sigs[1]].key
    assert sig_map[sigs[1]].key != sig_map[sigs[2]].key
    assert sig_map[sigs[2]].key == sig_map[sigs[3]].key

    # check that resets are ignored
    sigs = [DummySignal(), DummySignal(), DummySignal(), DummySignal()]
    plan = [tuple(Reset(x) for x in sigs)]
    bases, sig_map = create_signals(sigs, plan, np.float32, 10)
    assert len(bases) == 4
Пример #21
0
def test_noise(RefSimulator, seed):
    """Make sure that we can generate noise properly."""

    n = 1000
    mean, std = 0.1, 0.8
    noise = Signal(np.zeros(n), name="noise")
    process = nengo.processes.StochasticProcess(nengo.dists.Gaussian(
        mean, std))

    m = Model(dt=0.001)
    m.operators += [Reset(noise), SimNoise(noise, process)]

    sim = RefSimulator(None, model=m, seed=seed)
    samples = np.zeros((100, n))
    for i in range(100):
        sim.step()
        samples[i] = sim.signals[noise]

    h, xedges = np.histogram(samples.flat, bins=51)
    x = 0.5 * (xedges[:-1] + xedges[1:])
    dx = np.diff(xedges)
    z = 1. / np.sqrt(2 * np.pi * std**2) * np.exp(-0.5 *
                                                  (x - mean)**2 / std**2)
    y = h / float(h.sum()) / dx
    assert np.allclose(y, z, atol=0.02)
Пример #22
0
def build_sparse(model, transform, sig_in, decoders=None, encoders=None, rng=np.random):
    """Build a `.Sparse` transform object."""

    if decoders is not None:
        raise BuildError(
            "Applying a sparse transform to a decoded connection is not supported"
        )

    # Shouldn't be possible for encoders to be non-None, since that only
    # occurs for a connection solver with weights=True, and those can only
    # be applied to decoded connections (which are disallowed above)
    assert encoders is None

    # Add output signal
    weighted = Signal(shape=transform.size_out, name="%s.weighted" % transform)
    model.add_op(Reset(weighted))

    weights = transform.sample(rng=rng)
    assert weights.ndim == 2

    # Add operator for applying weights
    weight_sig = Signal(weights, name="%s.weights" % transform, readonly=True)
    model.add_op(
        SparseDotInc(weight_sig, sig_in, weighted, tag="%s.apply_weights" % transform)
    )

    return weighted, weight_sig
Пример #23
0
def build_mpes(model, mpes, rule):
    conn = rule.connection

    # Create input error signal
    error = Signal(shape=(rule.size_in, ), name="PES:error")
    model.add_op(Reset(error))
    model.sig[rule]["in"] = error  # error connection will attach here

    acts = build_or_passthrough(model, mpes.pre_synapse,
                                model.sig[conn.pre_obj]["out"])

    post = get_post_ens(conn)
    encoders = model.sig[post]["encoders"]

    pos_memristors, neg_memristors, r_min_noisy, r_max_noisy, exponent_noisy = initialise_memristors(
        mpes, acts.shape[0], encoders.shape[0])

    model.sig[conn]["pos_memristors"] = pos_memristors
    model.sig[conn]["neg_memristors"] = neg_memristors

    if conn.post_obj is not conn.post:
        # in order to avoid slicing encoders along an axis > 0, we pad
        # `error` out to the full base dimensionality and then do the
        # dotinc with the full encoder matrix
        # comes into effect when slicing post connection
        padded_error = Signal(shape=(encoders.shape[1], ))
        model.add_op(Copy(error, padded_error, dst_slice=conn.post_slice))
    else:
        padded_error = error

    # error = dot(encoders, error)
    local_error = Signal(shape=(post.n_neurons, ))
    model.add_op(Reset(local_error))
    model.add_op(DotInc(encoders, padded_error, local_error, tag="PES:encode"))

    model.operators.append(
        SimmPES(acts, local_error, model.sig[conn]["pos_memristors"],
                model.sig[conn]["neg_memristors"], model.sig[conn]["weights"],
                mpes.noise_percentage, mpes.gain, r_min_noisy, r_max_noisy,
                exponent_noisy, mpes.initial_state))

    # expose these for probes
    model.sig[rule]["error"] = error
    model.sig[rule]["activities"] = acts
    model.sig[rule]["pos_memristors"] = pos_memristors
    model.sig[rule]["neg_memristors"] = neg_memristors
Пример #24
0
def build_voja(model, voja, rule):
    """Builds a `.Voja` object into a model.

    Calls synapse build functions to filter the post activities,
    and adds a `.SimVoja` operator to the model to calculate the delta.

    Parameters
    ----------
    model : Model
        The model to build into.
    voja : Voja
        Learning rule type to build.
    rule : LearningRule
        The learning rule object corresponding to the neuron type.

    Notes
    -----
    Does not modify ``model.params[]`` and can therefore be called
    more than once with the same `.Voja` instance.
    """

    conn = rule.connection

    # Filtered post activity
    post = conn.post_obj
    post_filtered = build_or_passthrough(
        model, voja.post_synapse, model.sig[post]["out"]
    )

    # Learning signal, defaults to 1 in case no connection is made
    # and multiplied by the learning_rate * dt
    learning = Signal(shape=rule.size_in, name="Voja:learning")
    assert rule.size_in == 1
    model.add_op(Reset(learning, value=1.0))
    model.sig[rule]["in"] = learning  # optional connection will attach here

    scaled_encoders = model.sig[post]["encoders"]
    # The gain and radius are folded into the encoders during the ensemble
    # build process, so we need to make sure that the deltas are proportional
    # to this scaling factor
    encoder_scale = model.params[post].gain / post.radius
    assert post_filtered.shape == encoder_scale.shape

    model.add_op(
        SimVoja(
            pre_decoded=model.sig[conn]["out"],
            post_filtered=post_filtered,
            scaled_encoders=scaled_encoders,
            delta=model.sig[rule]["delta"],
            scale=encoder_scale,
            learning_signal=learning,
            learning_rate=voja.learning_rate,
        )
    )

    model.sig[rule]["scaled_encoders"] = scaled_encoders
    model.sig[rule]["post_filtered"] = post_filtered
Пример #25
0
def remove_bias_current(model, ens):
    if not 'bias' in model.sig[ens.neurons]:
        return

    sig_post_bias = model.sig[ens.neurons]['bias']
    sig_post_in = model.sig[ens.neurons]['in']
    for i, op in enumerate(model.operators):
        if isinstance(op, Copy):
            if (op.src is sig_post_bias) and (op.dst is sig_post_in):
                # Delete the copy operator and instead add a reset operator
                del model.operators[i]
                model.add_op((Reset(sig_post_in)))
Пример #26
0
def build_rls(model, rls, rule):
    """Builds an `.RLS` (Recursive Least Squares) object into a model.

    Calls synapse build functions to filter the pre activities,
    and adds a `.SimRLS` operator to the model to calculate the delta.

    Parameters
    ----------
    model : Model
        The model to build into.
    rls : RLS
        Learning rule type to build.
    rule : LearningRule
        The learning rule object corresponding to the neuron type.

    Notes
    -----
    Does not modify ``model.params[]`` and can therefore be called
    more than once with the same `.RLS` instance.
    """
    conn = rule.connection
    pre_activities = model.sig[conn.pre_obj]["out"]

    pre_filtered = (
        pre_activities
        if rls.pre_synapse is None
        else model.build(rls.pre_synapse, pre_activities)
    )

    # Create input error signal
    error = Signal(np.zeros(rule.size_in), name="RLS:error")
    model.add_op(Reset(error))
    model.sig[rule]["in"] = error

    # Create signal for running estimate of inverse correlation matrix
    assert pre_filtered.ndim == 1
    n_neurons = pre_filtered.shape[0]
    learning_rate = rls.learning_rate * model.dt / n_neurons
    inv_gamma = Signal(np.eye(n_neurons) * learning_rate, name="RLS:inv_gamma")

    model.add_op(
        SimRLS(
            pre_filtered=pre_filtered,
            error=error,
            delta=model.sig[rule]["delta"],
            inv_gamma=inv_gamma,
        )
    )

    # expose these for probes
    model.sig[rule]["pre_filtered"] = pre_filtered
    model.sig[rule]["error"] = error
    model.sig[rule]["inv_gamma"] = inv_gamma
Пример #27
0
def conn_probe(model, probe):
    conn = Connection(probe.target, probe, synapse=probe.synapse,
                      solver=probe.solver, add_to_container=False)

    # Set connection's seed to probe's (which isn't used elsewhere)
    model.seeds[conn] = model.seeds[probe]

    # Make a sink signal for the connection
    model.sig[probe]['in'] = Signal(np.zeros(conn.size_out), name=str(probe))
    model.add_op(Reset(model.sig[probe]['in']))

    # Build the connection
    model.build(conn)
Пример #28
0
def build_rmsp(model, rmsp, rule):
    """Builds a `.RMSP` object into a model.
    Calls synapse build functions to filter the pre and post activities,
    and adds a `.SimRMSP` operator to the model to calculate the delta.
    Parameters
    ----------
    model : Model
        The model to build into.
    rmsp : RMSP
        Learning rule type to build.
    rule : LearningRule
        The learning rule object corresponding to the neuron type.
    Notes
    -----
    Does not modify ``model.params[]`` and can therefore be called
    more than once with the same `.RMSP` instance.
    """

    conn = rule.connection
    pre_activities = model.sig[conn.pre_obj]["out"]
    if conn.pre_slice is not None:
        pre_activities = pre_activities[conn.pre_slice]
    post_activities = model.sig[get_post_ens(conn).neurons]["out"]
    if conn.post_slice is not None:
        post_activities = post_activities[conn.post_slice]
    pre_filtered = build_or_passthrough(model, rmsp.pre_synapse, pre_activities)
    post_filtered = build_or_passthrough(model, rmsp.post_synapse, post_activities)
    weights = model.sig[conn]["weights"]
    delta = model.sig[rule]["delta"]

    # Create input reward signal
    reward = Signal(shape=rule.size_in, name="RMSP:reward")
    model.add_op(Reset(reward))
    model.sig[rule]["in"] = reward  # reward connection will attach here

    model.add_op(
        SimRMSP(
            pre_filtered,
            post_filtered,
            weights,
            reward,
            delta,
            learning_rate=rmsp.learning_rate,
            jit=rmsp.jit,
        )
    )

    # expose these for probes
    model.sig[rule]["pre_filtered"] = pre_filtered
    model.sig[rule]["post_filtered"] = post_filtered
Пример #29
0
def build_pyfunc(model, fn, t_in, n_in, n_out, label):
    if n_in:
        sig_in = Signal(np.zeros(n_in), name="%s.input" % label)
        model.add_op(Reset(sig_in))
    else:
        sig_in = None

    if n_out > 0:
        sig_out = Signal(np.zeros(n_out), name="%s.output" % label)
    else:
        sig_out = None

    model.add_op(SimPyFunc(output=sig_out, fn=fn, t_in=t_in, x=sig_in))

    return sig_in, sig_out
Пример #30
0
def test_remove_unmodified_resets():
    a = Signal([1])

    # check that unmodified reset gets removed
    operators = [Reset(a, 2)]
    new_ops = remove_unmodified_resets(operators)
    assert new_ops == []
    assert np.all(a.initial_value == 2)

    # check that reset + inc doesn't get removed
    operators = [Reset(a, 2), dummies.Op(incs=[a])]
    new_ops = remove_unmodified_resets(operators)
    assert new_ops == operators

    # check that reset + update doesn't get removed
    operators = [Reset(a, 2), dummies.Op(updates=[a])]
    new_ops = remove_unmodified_resets(operators)
    assert new_ops == operators

    # check that reset + read does get removed
    operators = [Reset(a, 3), dummies.Op(reads=[a])]
    new_ops = remove_unmodified_resets(operators)
    assert new_ops == operators[1:]
    assert np.all(a.initial_value == 3)
def test_remove_identity_muls(Op):
    # check that identity input signals get removed
    As = [1.0, np.diag(np.ones(3)) if Op == DotInc else np.ones(3)]
    for A in As:
        x = dummies.Signal(shape=(1,) if isinstance(A, float) else A.shape[:1])
        y = dummies.Signal(shape=(1,) if isinstance(A, float) else A.shape[:1])
        a = Signal(A)
        a.trainable = False
        operators = [Op(a, x, y)]
        new_operators = remove_identity_muls(operators)
        assert len(new_operators) == 1
        new_op = new_operators[0]
        assert isinstance(new_op, Copy)
        assert new_op.src is x
        assert new_op.dst is y
        assert new_op.inc

    # check that identity x gets removed for elementwiseinc
    if Op == ElementwiseInc:
        a = dummies.Signal()
        x = dummies.Signal(initial_value=1)
        y = dummies.Signal()
        operators = [Op(a, x, y)]
        new_operators = remove_identity_muls(operators)
        assert len(operators) == 1
        new_op = new_operators[0]
        assert isinstance(new_op, Copy)
        assert new_op.src is a
        assert new_op.dst is y
        assert new_op.inc

    # check that reset inputs get removed
    for A in As:
        x = dummies.Signal(shape=(1,) if isinstance(A, float) else A.shape[:1])
        y = dummies.Signal(shape=(1,) if isinstance(A, float) else A.shape[:1])
        a = dummies.Signal(shape=(1,) if isinstance(A, float) else A.shape)
        r = Reset(a)
        r.value = A
        operators = [Op(a, x, y), r]
        new_operators = remove_identity_muls(operators)
        assert len(new_operators) == 2
        assert new_operators[1:] == operators[1:]
        new_op = new_operators[0]
        assert isinstance(new_op, Copy)
        assert new_op.src is x
        assert new_op.dst is y
        assert new_op.inc

    # check that non-identity inputs don't get removed
    a = Signal(np.ones((3, 3)))
    a.trainable = False
    operators = [Op(a, dummies.Signal(shape=(3,)),
                    dummies.Signal(shape=(3,)))]
    new_operators = remove_identity_muls(operators)
    assert new_operators == operators

    # check that node inputs don't get removed
    x = dummies.Signal(label="<Node lorem ipsum")
    operators = [Op(x, dummies.Signal(), dummies.Signal())]
    new_operators = remove_identity_muls(operators)
    assert new_operators == operators

    # check that identity inputs + trainable don't get removed
    x = Signal(1.0)
    x.trainable = True
    operators = [Op(x, dummies.Signal(), dummies.Signal())]
    new_operators = remove_identity_muls(operators)
    assert new_operators == operators

    # check that updated input doesn't get removed
    x = dummies.Signal()
    operators = [Op(x, dummies.Signal(), dummies.Signal()),
                 dummies.Op(updates=[x])]
    new_operators = remove_identity_muls(operators)
    assert new_operators == operators

    # check that inc'd input doesn't get removed
    x = dummies.Signal()
    operators = [Op(x, dummies.Signal(), dummies.Signal()),
                 dummies.Op(incs=[x])]
    new_operators = remove_identity_muls(operators)
    assert new_operators == operators

    # check that set'd input doesn't get removed
    x = dummies.Signal()
    operators = [Op(x, dummies.Signal(), dummies.Signal()),
                 dummies.Op(sets=[x])]
    new_operators = remove_identity_muls(operators)
    assert new_operators == operators