Example #1
0
def test_signal_values():
    """Make sure Signal.initial_value works."""
    two_d = Signal([[1.], [1.]])
    assert np.allclose(two_d.initial_value, np.array([[1], [1]]))
    two_d_view = two_d[0, :]
    assert np.allclose(two_d_view.initial_value, np.array([1]))

    # cannot change signal value after creation
    with pytest.raises(SignalError):
        two_d.initial_value = np.array([[0.5], [-0.5]])
    with pytest.raises((ValueError, RuntimeError)):
        two_d.initial_value[...] = np.array([[0.5], [-0.5]])
Example #2
0
def build_pes(model, pes, rule):
    conn = rule.connection

    # Create input error signal
    error = Signal(np.zeros(rule.size_in), name="PES:error")
    model.add_op(Reset(error))
    model.sig[rule]['in'] = error  # error connection will attach here

    acts = filtered_signal(
        model, pes, model.sig[conn.pre_obj]['out'], pes.pre_tau)
    acts_view = acts.reshape((1, acts.size))

    # Compute the correction, i.e. the scaled negative error
    correction = Signal(np.zeros(error.shape), name="PES:correction")
    local_error = correction.reshape((error.size, 1))
    model.add_op(Reset(correction))

    # correction = -learning_rate * (dt / n_neurons) * error
    n_neurons = (conn.pre_obj.n_neurons if isinstance(conn.pre_obj, Ensemble)
                 else conn.pre_obj.size_in)
    lr_sig = Signal(-pes.learning_rate * model.dt / n_neurons,
                    name="PES:learning_rate")
    model.add_op(DotInc(lr_sig, error, correction, tag="PES:correct"))

    if conn.solver.weights or (
            isinstance(conn.pre_obj, Neurons) and
            isinstance(conn.post_obj, Neurons)):
        post = get_post_ens(conn)
        weights = model.sig[conn]['weights']
        encoders = model.sig[post]['encoders']

        # encoded = dot(encoders, correction)
        encoded = Signal(np.zeros(weights.shape[0]), name="PES:encoded")
        model.add_op(Reset(encoded))
        model.add_op(DotInc(encoders, correction, encoded, tag="PES:encode"))
        local_error = encoded.reshape((encoded.size, 1))
    elif not isinstance(conn.pre_obj, (Ensemble, Neurons)):
        raise ValueError("'pre' object '%s' not suitable for PES learning"
                         % (conn.pre_obj))

    # delta = local_error * activities
    model.add_op(Reset(model.sig[rule]['delta']))
    model.add_op(ElementwiseInc(
        local_error, acts_view, model.sig[rule]['delta'], tag="PES:Inc Delta"))

    # expose these for probes
    model.sig[rule]['error'] = error
    model.sig[rule]['correction'] = correction
    model.sig[rule]['activities'] = acts

    model.params[rule] = None  # no build-time info to return
Example #3
0
def build_pes(model, pes, rule):
    # TODO: Filter activities
    conn = rule.connection
    activities = model.sig[conn.pre_obj]['out']
    error = model.sig[pes.error_connection]['out']

    scaled_error = Signal(np.zeros(error.shape),
                          name="PES:error * learning_rate")
    scaled_error_view = scaled_error.reshape((error.size, 1))
    activities_view = activities.reshape((1, activities.size))
    lr_sig = Signal(pes.learning_rate * model.dt, name="PES:learning_rate")

    model.add_op(Reset(scaled_error))
    model.add_op(DotInc(lr_sig, error, scaled_error, tag="PES:scale error"))

    if conn.solver.weights or (
            isinstance(conn.pre_obj, Neurons) and
            isinstance(conn.post_obj, Neurons)):
        post = (conn.post_obj.ensemble if isinstance(conn.post_obj, Neurons)
                else conn.post_obj)
        transform = model.sig[conn]['transform']
        encoders = model.sig[post]['encoders']
        encoded_error = Signal(np.zeros(transform.shape[0]),
                               name="PES: encoded error")

        model.add_op(Reset(encoded_error))
        model.add_op(DotInc(
            encoders, scaled_error, encoded_error, tag="PES:Encode error"))

        encoded_error_view = encoded_error.reshape((encoded_error.size, 1))
        model.add_op(ElementwiseInc(
            encoded_error_view, activities_view, transform,
            tag="PES:Inc Transform"))
    elif isinstance(conn.pre_obj, Neurons):
        transform = model.sig[conn]['transform']
        model.add_op(ElementwiseInc(
            scaled_error_view, activities_view, transform,
            tag="PES:Inc Transform"))
    else:
        assert isinstance(conn.pre_obj, Ensemble)
        decoders = model.sig[conn]['decoders']
        model.add_op(ElementwiseInc(
            scaled_error_view, activities_view, decoders,
            tag="PES:Inc Decoder"))

    # expose these for probes
    model.sig[rule]['scaled_error'] = scaled_error
    model.sig[rule]['activities'] = activities

    model.params[rule] = None  # no build-time info to return
Example #4
0
def test_signal_reshape():
    """Tests Signal.reshape"""
    three_d = Signal(np.ones((2, 2, 2)))
    assert three_d.reshape((8,)).shape == (8,)
    assert three_d.reshape((4, 2)).shape == (4, 2)
    assert three_d.reshape((2, 4)).shape == (2, 4)
    assert three_d.reshape(-1).shape == (8,)
    assert three_d.reshape((4, -1)).shape == (4, 2)
    assert three_d.reshape((-1, 4)).shape == (2, 4)
    assert three_d.reshape((2, -1, 2)).shape == (2, 2, 2)
    assert three_d.reshape((1, 2, 1, 2, 2, 1)).shape == (1, 2, 1, 2, 2, 1)
Example #5
0
def test_signal_reshape():
    """Tests Signal.reshape"""
    # check proper shape after reshape
    three_d = Signal(np.ones((2, 2, 2)))
    assert three_d.reshape((8,)).shape == (8,)
    assert three_d.reshape((4, 2)).shape == (4, 2)
    assert three_d.reshape((2, 4)).shape == (2, 4)
    assert three_d.reshape(-1).shape == (8,)
    assert three_d.reshape((4, -1)).shape == (4, 2)
    assert three_d.reshape((-1, 4)).shape == (2, 4)
    assert three_d.reshape((2, -1, 2)).shape == (2, 2, 2)
    assert three_d.reshape((1, 2, 1, 2, 2, 1)).shape == (1, 2, 1, 2, 2, 1)

    # check with non-contiguous arrays (and with offset)
    value = np.arange(20).reshape(5, 4)
    s = Signal(np.array(value), name='s')

    s0slice = slice(0, 3), slice(None, None, 2)
    s0shape = 2, 3
    s0 = s[s0slice].reshape(*s0shape)
    assert s.offset == 0
    assert np.array_equal(s0.initial_value, value[s0slice].reshape(*s0shape))

    s1slice = slice(1, None), slice(None, None, 2)
    s1shape = 2, 4
    s1 = s[s1slice].reshape(s1shape)
    assert s1.offset == 4 * s1.dtype.itemsize
    assert np.array_equal(s1.initial_value, value[s1slice].reshape(s1shape))

    # check error if non-contiguous array cannot be reshaped without copy
    s2slice = slice(None, None, 2), slice(None, None, 2)
    s2shape = 2, 3
    s2 = s[s2slice]
    with pytest.raises(SignalError):
        s2.reshape(s2shape)

    # check that views are working properly (incrementing `s` effects views)
    values = SignalDict()
    values.init(s)
    values.init(s0)
    values.init(s1)

    values[s] += 1
    assert np.array_equal(values[s0], value[s0slice].reshape(s0shape) + 1)
    assert np.array_equal(values[s1], value[s1slice].reshape(s1shape) + 1)
Example #6
0
def test_remove_unmodified_resets():
    a = Signal([1])

    # check that unmodified reset gets removed
    operators = [Reset(a, 2)]
    new_ops = remove_unmodified_resets(operators)
    assert new_ops == []
    assert np.all(a.initial_value == 2)

    # check that reset + inc doesn't get removed
    operators = [Reset(a, 2), DummyOp(incs=[a])]
    new_ops = remove_unmodified_resets(operators)
    assert new_ops == operators

    # check that reset + update doesn't get removed
    operators = [Reset(a, 2), DummyOp(updates=[a])]
    new_ops = remove_unmodified_resets(operators)
    assert new_ops == operators

    # check that reset + read does get removed
    operators = [Reset(a, 3), DummyOp(reads=[a])]
    new_ops = remove_unmodified_resets(operators)
    assert new_ops == operators[1:]
    assert np.all(a.initial_value == 3)
Example #7
0
def test_simple_pyfunc(RefSimulator):
    dt = 0.001
    time = Signal(np.zeros(1), name="time")
    sig = Signal(np.zeros(1), name="sig")
    m = Model(dt=dt)
    sig_in, sig_out = build_pyfunc(m, lambda t, x: np.sin(x), True, 1, 1, None)
    m.operators += [
        Reset(sig),
        DotInc(Signal([[1.0]]), time, sig_in),
        DotInc(Signal([[1.0]]), sig_out, sig),
        DotInc(Signal(dt), Signal(1), time, as_update=True),
    ]

    sim = RefSimulator(None, model=m)
    for i in range(5):
        sim.step()
        t = i * dt
        assert np.allclose(sim.signals[sig], np.sin(t))
        assert np.allclose(sim.signals[time], t + dt)
def test_remove_identity_muls(Op):
    # check that identity input signals get removed
    As = [1.0, np.diag(np.ones(3)) if Op == DotInc else np.ones(3)]
    for A in As:
        x = dummies.Signal(shape=(1,) if isinstance(A, float) else A.shape[:1])
        y = dummies.Signal(shape=(1,) if isinstance(A, float) else A.shape[:1])
        a = Signal(A)
        a.trainable = False
        operators = [Op(a, x, y)]
        new_operators = remove_identity_muls(operators)
        assert len(new_operators) == 1
        new_op = new_operators[0]
        assert isinstance(new_op, Copy)
        assert new_op.src is x
        assert new_op.dst is y
        assert new_op.inc

    # check that identity x gets removed for elementwiseinc
    if Op == ElementwiseInc:
        a = dummies.Signal()
        x = dummies.Signal(initial_value=1)
        y = dummies.Signal()
        operators = [Op(a, x, y)]
        new_operators = remove_identity_muls(operators)
        assert len(operators) == 1
        new_op = new_operators[0]
        assert isinstance(new_op, Copy)
        assert new_op.src is a
        assert new_op.dst is y
        assert new_op.inc

    # check that reset inputs get removed
    for A in As:
        x = dummies.Signal(shape=(1,) if isinstance(A, float) else A.shape[:1])
        y = dummies.Signal(shape=(1,) if isinstance(A, float) else A.shape[:1])
        a = dummies.Signal(shape=(1,) if isinstance(A, float) else A.shape)
        r = Reset(a)
        r.value = A
        operators = [Op(a, x, y), r]
        new_operators = remove_identity_muls(operators)
        assert len(new_operators) == 2
        assert new_operators[1:] == operators[1:]
        new_op = new_operators[0]
        assert isinstance(new_op, Copy)
        assert new_op.src is x
        assert new_op.dst is y
        assert new_op.inc

    # check that non-identity inputs don't get removed
    a = Signal(np.ones((3, 3)))
    a.trainable = False
    operators = [Op(a, dummies.Signal(shape=(3,)),
                    dummies.Signal(shape=(3,)))]
    new_operators = remove_identity_muls(operators)
    assert new_operators == operators

    # check that node inputs don't get removed
    x = dummies.Signal(label="<Node lorem ipsum")
    operators = [Op(x, dummies.Signal(), dummies.Signal())]
    new_operators = remove_identity_muls(operators)
    assert new_operators == operators

    # check that identity inputs + trainable don't get removed
    x = Signal(1.0)
    x.trainable = True
    operators = [Op(x, dummies.Signal(), dummies.Signal())]
    new_operators = remove_identity_muls(operators)
    assert new_operators == operators

    # check that updated input doesn't get removed
    x = dummies.Signal()
    operators = [Op(x, dummies.Signal(), dummies.Signal()),
                 dummies.Op(updates=[x])]
    new_operators = remove_identity_muls(operators)
    assert new_operators == operators

    # check that inc'd input doesn't get removed
    x = dummies.Signal()
    operators = [Op(x, dummies.Signal(), dummies.Signal()),
                 dummies.Op(incs=[x])]
    new_operators = remove_identity_muls(operators)
    assert new_operators == operators

    # check that set'd input doesn't get removed
    x = dummies.Signal()
    operators = [Op(x, dummies.Signal(), dummies.Signal()),
                 dummies.Op(sets=[x])]
    new_operators = remove_identity_muls(operators)
    assert new_operators == operators
Example #9
0
def test_sigmerger_check():
    # 0-d signals
    assert SigMerger.check([Signal(0), Signal(0)])
    assert not SigMerger.check([Signal(0), Signal(1)])

    # compatible along first axis
    assert SigMerger.check(
        [Signal(np.empty((1, 2))),
         Signal(np.empty((2, 2)))])

    # compatible along second axis
    assert SigMerger.check(
        [Signal(np.empty(
            (2, 1))), Signal(np.empty((2, 2)))], axis=1)
    assert not SigMerger.check(
        [Signal(np.empty(
            (2, 1))), Signal(np.empty((2, 2)))], axis=0)

    # shape mismatch
    assert not SigMerger.check(
        [Signal(np.empty(
            (2, ))), Signal(np.empty((2, 2)))])

    # mixed dtype
    assert not SigMerger.check(
        [Signal(np.empty(2, dtype=int)),
         Signal(np.empty(2, dtype=float))])

    s1 = Signal(np.empty(5))
    s2 = Signal(np.empty(5))

    # mixed signal and view
    assert not SigMerger.check([s1, s1[:3]])

    # mixed bases
    assert not SigMerger.check([s1[:2], s2[2:]])

    # compatible views
    assert SigMerger.check([s1[:2], s1[2:]])

    # sparse signals not mergeable
    assert not SigMerger.check([
        Signal(SparseMatrix([[0, 0]], 1.0, (1, 1))),
        Signal(SparseMatrix([[0, 0]], 1.0, (1, 1))),
    ])

    # same signal cannot appear twice
    sig = Signal(0)
    assert not SigMerger.check([sig, sig])
Example #10
0
def build_connection(model, conn):
    """Builds a `.Connection` object into a model.

    A brief summary of what happens in the connection build process,
    in order:

    1. Solve for decoders.
    2. Combine transform matrix with decoders to get weights.
    3. Add operators for computing the function
       or multiplying neural activity by weights.
    4. Call build function for the synapse.
    5. Call build function for the learning rule.
    6. Add operator for applying learning rule delta to weights.

    Some of these steps may be altered or omitted depending on the parameters
    of the connection, in particular the pre and post types.

    Parameters
    ----------
    model : Model
        The model to build into.
    conn : Connection
        The connection to build.

    Notes
    -----
    Sets ``model.params[conn]`` to a `.BuiltConnection` instance.
    """

    # Create random number generator
    rng = np.random.RandomState(model.seeds[conn])

    # Get input and output connections from pre and post
    def get_prepost_signal(is_pre):
        target = conn.pre_obj if is_pre else conn.post_obj
        key = "out" if is_pre else "in"

        if target not in model.sig:
            raise BuildError("Building %s: the %r object %s is not in the "
                             "model, or has a size of zero." %
                             (conn, "pre" if is_pre else "post", target))
        signal = model.sig[target].get(key, None)
        if signal is None or signal.size == 0:
            raise BuildError(
                "Building %s: the %r object %s has a %r size of zero." %
                (conn, "pre" if is_pre else "post", target, key))

        return signal

    model.sig[conn]["in"] = get_prepost_signal(is_pre=True)
    model.sig[conn]["out"] = get_prepost_signal(is_pre=False)

    decoders = None
    encoders = None
    eval_points = None
    solver_info = None
    post_slice = conn.post_slice

    # Figure out the signal going across this connection
    in_signal = model.sig[conn]["in"]
    if isinstance(conn.pre_obj,
                  Node) or (isinstance(conn.pre_obj, Ensemble)
                            and isinstance(conn.pre_obj.neuron_type, Direct)):
        # Node or Decoded connection in directmode
        sliced_in = slice_signal(model, in_signal, conn.pre_slice)
        if conn.function is None:
            in_signal = sliced_in
        elif isinstance(conn.function, np.ndarray):
            raise BuildError("Cannot use function points in direct connection")
        else:
            in_signal = Signal(shape=conn.size_mid, name="%s.func" % conn)
            model.add_op(SimPyFunc(in_signal, conn.function, None, sliced_in))
    elif isinstance(conn.pre_obj, Ensemble):  # Normal decoded connection
        eval_points, decoders, solver_info = model.build(
            conn.solver, conn, rng)
        if isinstance(conn.post_obj, Ensemble) and conn.solver.weights:
            model.sig[conn]["out"] = model.sig[conn.post_obj.neurons]["in"]

            encoders = model.params[conn.post_obj].scaled_encoders.T
            encoders = encoders[conn.post_slice]

            # post slice already applied to encoders (either here or in
            # `build_decoders`), so don't apply later
            post_slice = None
    else:
        in_signal = slice_signal(model, in_signal, conn.pre_slice)

    # Build transform
    if conn.solver.weights and not conn.solver.compositional:
        # special case for non-compositional weight solvers, where
        # the solver is solving for the full weight matrix. so we don't
        # need to combine decoders/transform/encoders.
        weighted, weights = model.build(Dense(decoders.shape, init=decoders),
                                        in_signal,
                                        rng=rng)
    else:
        weighted, weights = model.build(conn.transform,
                                        in_signal,
                                        decoders=decoders,
                                        encoders=encoders,
                                        rng=rng)

    model.sig[conn]["weights"] = weights

    # Build synapse
    if conn.synapse is not None:
        weighted = model.build(conn.synapse, weighted, mode="update")

    # Store the weighted-filtered output in case we want to probe it
    model.sig[conn]["weighted"] = weighted

    if isinstance(conn.post_obj, Neurons):
        # Apply neuron gains (we don't need to do this if we're connecting to
        # an Ensemble, because the gains are rolled into the encoders)
        gains = Signal(
            model.params[conn.post_obj.ensemble].gain[post_slice],
            name="%s.gains" % conn,
        )

        if is_integer(post_slice) or isinstance(post_slice, slice):
            sliced_out = model.sig[conn]["out"][post_slice]
        else:
            # advanced indexing not supported on Signals, so we need to set up an
            # intermediate signal and use a Copy op to perform the indexing
            sliced_out = Signal(shape=gains.shape, name="%s.sliced_out" % conn)
            model.add_op(Reset(sliced_out))
            model.add_op(
                Copy(sliced_out,
                     model.sig[conn]["out"],
                     dst_slice=post_slice,
                     inc=True))

        model.add_op(
            ElementwiseInc(gains,
                           weighted,
                           sliced_out,
                           tag="%s.gains_elementwiseinc" % conn))
    else:
        # Copy to the proper slice
        model.add_op(
            Copy(
                weighted,
                model.sig[conn]["out"],
                dst_slice=post_slice,
                inc=True,
                tag="%s" % conn,
            ))

    # Build learning rules
    if conn.learning_rule is not None:
        # TODO: provide a general way for transforms to expose learnable params
        if not isinstance(conn.transform, (Dense, NoTransform)):
            raise NotImplementedError(
                "Learning on connections with %s transforms is not supported" %
                (type(conn.transform).__name__, ))

        rule = conn.learning_rule
        rule = [rule] if not is_iterable(rule) else rule
        targets = []
        for r in rule.values() if isinstance(rule, dict) else rule:
            model.build(r)
            targets.append(r.modifies)

        if "encoders" in targets:
            encoder_sig = model.sig[conn.post_obj]["encoders"]
            encoder_sig.readonly = False
        if "decoders" in targets or "weights" in targets:
            if weights.ndim < 2:
                raise BuildError(
                    "'transform' must be a 2-dimensional array for learning")
            model.sig[conn]["weights"].readonly = False

    model.params[conn] = BuiltConnection(
        eval_points=eval_points,
        solver_info=solver_info,
        transform=conn.transform,
        weights=getattr(weights, "initial_value", None),
    )
Example #11
0
def test_sigmerger_check():
    # 0-d signals
    assert SigMerger.check([Signal(0), Signal(0)])
    assert not SigMerger.check([Signal(0), Signal(1)])

    # compatible along first axis
    assert SigMerger.check(
        [Signal(np.empty((1, 2))),
         Signal(np.empty((2, 2)))])

    # compatible along second axis
    assert SigMerger.check(
        [Signal(np.empty(
            (2, 1))), Signal(np.empty((2, 2)))], axis=1)
    assert not SigMerger.check(
        [Signal(np.empty(
            (2, 1))), Signal(np.empty((2, 2)))], axis=0)

    # shape mismatch
    assert not SigMerger.check(
        [Signal(np.empty(
            (2, ))), Signal(np.empty((2, 2)))])

    # mixed dtype
    assert not SigMerger.check(
        [Signal(np.empty(2, dtype=int)),
         Signal(np.empty(2, dtype=float))])

    s1 = Signal(np.empty(5))
    s2 = Signal(np.empty(5))

    # mixed signal and view
    assert not SigMerger.check([s1, s1[:3]])

    # mixed bases
    assert not SigMerger.check([s1[:2], s2[2:]])

    # compatible views
    assert SigMerger.check([s1[:2], s1[2:]])
Example #12
0
def test_signals():
    assert repr(Signal(np.array([0.0]))) == "Signal(name=None, shape=(1,))"
    assert (repr(Signal(np.array([1.0, 1.0]),
                        name="one")) == "Signal(name=one, shape=(2,))")
Example #13
0
def test_validate_ops():
    """tests validate_ops, including may_share_memory"""

    base1 = Signal(initial_value=np.ones((10, 4)))
    base2 = Signal(initial_value=np.ones((10, 4)))
    view1_a = base1[:5, :]
    view1_b = base1[5:, :]
    view1_ab = base1[:6, :]

    ops = [Operator() for _ in range(3)]

    # non-overlapping sets is OK
    validate_ops(sets={base1: [ops[1]], base2: [ops[2]]}, ups=[], incs=[])

    # missing set is bad
    with pytest.raises(AssertionError):
        validate_ops(sets={base1: [ops[1]], base2: []}, ups=[], incs=[])

    # multiple sets is bad
    with pytest.raises(AssertionError):
        validate_ops(sets={base1: [ops[1]], base2: ops}, ups=[], incs=[])

    # set base and view is bad
    with pytest.raises(AssertionError):
        validate_ops(sets={
            base1: [ops[1]],
            view1_a: [ops[2]]
        },
                     ups=[],
                     incs=[])

    # set non-overlapping views is OK
    validate_ops(sets={view1_a: [ops[1]], view1_b: [ops[2]]}, ups=[], incs=[])

    # set overlapping views is bad
    with pytest.raises(AssertionError):
        validate_ops(sets={
            view1_ab: [ops[1]],
            view1_b: [ops[2]]
        },
                     ups=[],
                     incs=[])

    # non-overlapping updates is OK
    validate_ops(ups={base1: [ops[1]], base2: [ops[2]]}, sets=[], incs=[])

    # missing update is bad
    with pytest.raises(AssertionError):
        validate_ops(ups={base1: [ops[1]], base2: []}, sets=[], incs=[])

    # multiple updates is bad
    with pytest.raises(AssertionError):
        validate_ops(ups={base1: [ops[1]], base2: ops}, sets=[], incs=[])

    # update base and view is bad
    with pytest.raises(AssertionError):
        validate_ops(ups={
            base1: [ops[1]],
            view1_a: [ops[2]]
        },
                     sets=[],
                     incs=[])

    # update non-overlapping views is OK
    validate_ops(ups={view1_a: [ops[1]], view1_b: [ops[2]]}, sets=[], incs=[])

    # update overlapping views is bad
    with pytest.raises(AssertionError):
        validate_ops(ups={
            view1_ab: [ops[1]],
            view1_b: [ops[2]]
        },
                     sets=[],
                     incs=[])
Example #14
0
    def __init__(self,
                 network,
                 dt=0.001,
                 seed=None,
                 model=None,
                 planner=greedy_planner):

        with Timer() as nengo_timer:
            if model is None:
                self.model = Model(dt=float(dt),
                                   label="%s, dt=%f" % (network, dt),
                                   decoder_cache=get_default_decoder_cache())
            else:
                self.model = model

            if network is not None:
                # Build the network into the model
                self.model.build(network)

        logger.info("Nengo build in %0.3f s" % nengo_timer.duration)

        # --- set seed
        seed = np.random.randint(npext.maxint) if seed is None else seed
        self.seed = seed
        self.rng = np.random.RandomState(self.seed)

        self._step = Signal(np.array(0.0, dtype=np.float64), name='step')
        self._time = Signal(np.array(0.0, dtype=np.float64), name='time')

        # --- operators
        with Timer() as planner_timer:
            operators = list(self.model.operators)

            # convert DotInc, Reset, Copy, and ProdUpdate to MultiProdUpdate
            operators = list(map(MultiProdUpdate.convert_to, operators))
            operators = MultiProdUpdate.compress(operators)

            # plan the order of operations, combining where appropriate
            op_groups = planner(operators)
            assert len([typ for typ, _ in op_groups if typ is Reset
                        ]) < 2, ("All resets not planned together")

            # add time operator after planning, to ensure it goes first
            time_op = TimeUpdate(self._step, self._time)
            operators.insert(0, time_op)
            op_groups.insert(0, (type(time_op), [time_op]))

            self.operators = operators
            self.op_groups = op_groups

        logger.info("Planning in %0.3f s" % planner_timer.duration)

        with Timer() as signals_timer:
            # Initialize signals
            all_signals = signals_from_operators(operators)
            all_bases = stable_unique([sig.base for sig in all_signals])

            sigdict = SignalDict()  # map from Signal.base -> ndarray
            for op in operators:
                op.init_signals(sigdict)

            # Add built states to the probe dictionary
            self._probe_outputs = self.model.params

            # Provide a nicer interface to probe outputs
            self.data = ProbeDict(self._probe_outputs)

            self.all_data = RaggedArray(
                [sigdict[sb] for sb in all_bases],
                [getattr(sb, 'name', '') for sb in all_bases],
                dtype=np.float32)

            builder = ViewBuilder(all_bases, self.all_data)
            self._AX_views = {}
            self._YYB_views = {}
            for op_type, op_list in op_groups:
                self.setup_views(builder, op_type, op_list)
            for probe in self.model.probes:
                builder.append_view(self.model.sig[probe]['in'])
            builder.add_views_to(self.all_data)

            self.all_bases = all_bases
            self.sidx = builder.sidx

            self._prep_all_data()

        logger.info("Signals in %0.3f s" % signals_timer.duration)

        # --- create list of plans
        with Timer() as plans_timer:
            self._plan = []
            for op_type, op_list in op_groups:
                self._plan.extend(self.plan_op_group(op_type, op_list))
            self._plan.extend(self.plan_probes())

        logger.info("Plans in %0.3f s" % plans_timer.duration)

        self.n_steps = 0
Example #15
0
def test_convtransposeinc_2d(channels_last, strides, kernel_size, padding, rng,
                             allclose, plt):
    """Test ConvTransposeInc by ensuring it is the transpose of ConvInc.

    Since convolution is a linear operator, it can be expressed as a matrix ``A``.
    We can therefore state that ``C.dot(A.dot(x)) == (A.T.dot(C.T)).T.dot(x)``,
    for an arbitrary vector ``x`` and arbitrary matrix ``C``.

    This test asserts this identity, and thereby tests the ``ConvTransposeInc`` operator
    against the ``ConvInc`` operator.
    """
    spatial_shape = (16, 17)
    in_channels = 32
    out_channels = 64

    x_shape = ChannelShape.from_space_and_channels(spatial_shape,
                                                   in_channels,
                                                   channels_last=channels_last)
    conv = Convolution(
        out_channels,
        x_shape,
        kernel_size=kernel_size,
        strides=strides,
        padding=padding,
        channels_last=channels_last,
    )

    nk = 10  # number of vectors we test ConvTransposeInc with
    C = rng.randn(nk, conv.output_shape.size)

    # compute ``conv_output = C.dot(A.dot(x))``, where ``A`` is the convolution operator
    x = Signal(rng.randn(*x_shape.shape))
    w = Signal(
        rng.randn(kernel_size[0], kernel_size[1], in_channels, out_channels))
    y = Signal(np.zeros(conv.output_shape.shape))

    signals = {sig: np.array(sig.initial_value) for sig in (x, w, y)}
    step_conv = ConvInc(w, x, y, conv).make_step(signals, None, None)
    step_conv()

    x_flat = signals[x].ravel()
    y_flat = signals[y].ravel()
    conv_output = C.dot(y_flat)

    # compute ``conv_transpose_output = (A.T.dot(C.T)).T.dot(x)``, where ``A.T`` is the
    # transpose convolution operator (applied to one column of ``C.T`` at a time)
    conv_transpose = ConvolutionTranspose(
        in_channels,
        conv.output_shape,
        output_shape=x_shape,
        kernel_size=kernel_size,
        strides=strides,
        padding=padding,
        channels_last=channels_last,
    )
    xt = Signal(np.zeros(conv_transpose.input_shape.shape))
    wt = Signal(np.transpose(w.initial_value, (0, 1, 3, 2)))
    yt = Signal(np.zeros(conv_transpose.output_shape.shape))

    signals_tr = {sig: np.array(sig.initial_value) for sig in (xt, wt, yt)}
    step_trans = ConvTransposeInc(wt, xt, yt, conv_transpose).make_step(
        signals_tr, None, None)

    AtCt = []
    for k in range(nk):
        signals_tr[xt][:] = C[k].reshape(conv_transpose.input_shape.shape)
        signals_tr[yt][:] = 0
        step_trans()
        AtCt.append(signals_tr[yt].copy().ravel())

    AtCt = np.array(AtCt)
    conv_transpose_output = AtCt.dot(x_flat)
    assert conv_transpose_output.shape == conv_output.shape

    success = allclose(conv_transpose_output, conv_output)
    if success:
        plt.saveas = None
    else:
        debug_convtransposeinc_2d(w, wt, x, xt, y, yt, conv, conv_transpose,
                                  plt)
    assert success
Example #16
0
def _test_convinc_2d(
    channels_last,
    stride0,
    stride1,
    kernel0,
    kernel1,
    padding,
    groups,
    out_channels,
    rng,
    allclose,
):
    shape0, shape1 = 16, 17
    in_channels = 32
    x_shape = ((shape0, shape1, in_channels) if channels_last else
               (in_channels, shape0, shape1))
    x = Signal(rng.randn(*x_shape))
    w = Signal(rng.randn(kernel0, kernel1, in_channels // groups,
                         out_channels))

    conv = Convolution(
        out_channels,
        x_shape,
        kernel_size=(kernel0, kernel1),
        strides=(stride0, stride1),
        padding=padding,
        channels_last=channels_last,
        groups=groups,
    )

    y = Signal(np.zeros(conv.output_shape.shape))

    signals = {sig: np.array(sig.initial_value) for sig in (x, w, y)}
    step = ConvInc(w, x, y, conv).make_step(signals, None, None)

    step()

    x0 = x.initial_value

    if not channels_last:
        x0 = np.moveaxis(x0, 0, -1)

    if padding == "same":
        strides = np.asarray([stride0, stride1])
        padding = np.ceil(np.asarray([shape0, shape1]) / strides)
        padding = np.maximum(
            (padding - 1) * strides + (kernel0, kernel1) - (shape0, shape1),
            0).astype(np.int64)
        x0 = np.pad(
            x0,
            [
                (padding[0] // 2, padding[0] - padding[0] // 2),
                (padding[1] // 2, padding[1] - padding[1] // 2),
                (0, 0),
            ],
            "constant",
        )

    y0 = scipy_conv2d(x0,
                      w.initial_value,
                      in_channels,
                      out_channels,
                      groups=groups)
    y0 = y0[::stride0, ::stride1, :]
    if not channels_last:
        y0 = np.moveaxis(y0, -1, 0)

    assert allclose(signals[y], y0)
Example #17
0
def build_connection(model, conn):
    # Create random number generator
    rng = np.random.RandomState(model.seeds[conn])

    # Get input and output connections from pre and post
    def get_prepost_signal(is_pre):
        target = conn.pre_obj if is_pre else conn.post_obj
        key = 'out' if is_pre else 'in'

        if target not in model.sig:
            raise ValueError("Building %s: the '%s' object %s "
                             "is not in the model, or has a size of zero." %
                             (conn, 'pre' if is_pre else 'post', target))
        if key not in model.sig[target]:
            raise ValueError("Error building %s: the '%s' object %s "
                             "has a '%s' size of zero." %
                             (conn, 'pre' if is_pre else 'post', target, key))

        return model.sig[target][key]

    model.sig[conn]['in'] = get_prepost_signal(is_pre=True)
    model.sig[conn]['out'] = get_prepost_signal(is_pre=False)

    decoders = None
    eval_points = None
    solver_info = None
    transform = full_transform(conn, slice_pre=False)

    # Figure out the signal going across this connection
    if (isinstance(conn.pre_obj, Node)
            or (isinstance(conn.pre_obj, Ensemble)
                and isinstance(conn.pre_obj.neuron_type, Direct))):
        # Node or Decoded connection in directmode
        if (conn.function is None and isinstance(conn.pre_slice, slice)
                and (conn.pre_slice.step is None or conn.pre_slice.step == 1)):
            signal = model.sig[conn]['in'][conn.pre_slice]
        else:
            signal = Signal(np.zeros(conn.size_mid), name='%s.func' % conn)
            fn = ((lambda x: x[conn.pre_slice]) if conn.function is None else
                  (lambda x: conn.function(x[conn.pre_slice])))
            model.add_op(
                SimPyFunc(output=signal,
                          fn=fn,
                          t_in=False,
                          x=model.sig[conn]['in']))
    elif isinstance(conn.pre_obj, Ensemble):
        # Normal decoded connection
        eval_points, activities, targets = build_linear_system(
            model, conn, rng)

        # Use cached solver, if configured
        solver = model.decoder_cache.wrap_solver(conn.solver)

        if conn.solver.weights:
            # include transform in solved weights
            targets = np.dot(targets, transform.T)
            transform = np.array(1., dtype=np.float64)

            decoders, solver_info = solver(
                activities,
                targets,
                rng=rng,
                E=model.params[conn.post_obj].scaled_encoders.T)
            model.sig[conn]['out'] = model.sig[conn.post_obj.neurons]['in']
            signal_size = model.sig[conn]['out'].size
        else:
            decoders, solver_info = solver(activities, targets, rng=rng)
            signal_size = conn.size_mid

        # Add operator for decoders
        decoders = decoders.T

        model.sig[conn]['decoders'] = Signal(decoders,
                                             name="%s.decoders" % conn)
        signal = Signal(np.zeros(signal_size), name=str(conn))
        model.add_op(Reset(signal))
        model.add_op(
            DotInc(model.sig[conn]['decoders'],
                   model.sig[conn]['in'],
                   signal,
                   tag="%s decoding" % conn))
    else:
        # Direct connection
        signal = model.sig[conn]['in']

    # Add operator for filtering
    if conn.synapse is not None:
        signal = filtered_signal(model, conn, signal, conn.synapse)

    # Add operator for transform
    if isinstance(conn.post_obj, Neurons):
        if not model.has_built(conn.post_obj.ensemble):
            # Since it hasn't been built, it wasn't added to the Network,
            # which is most likely because the Neurons weren't associated
            # with an Ensemble.
            raise RuntimeError("Connection '%s' refers to Neurons '%s' "
                               "that are not a part of any Ensemble." %
                               (conn, conn.post_obj))

        if conn.post_slice != slice(None):
            raise NotImplementedError(
                "Post-slices on connections to neurons are not implemented")

        gain = model.params[conn.post_obj.ensemble].gain[conn.post_slice]
        if transform.ndim < 2:
            transform = transform * gain
        else:
            transform *= gain[:, np.newaxis]

    model.sig[conn]['transform'] = Signal(transform,
                                          name="%s.transform" % conn)
    if transform.ndim < 2:
        model.add_op(
            ElementwiseInc(model.sig[conn]['transform'],
                           signal,
                           model.sig[conn]['out'],
                           tag=str(conn)))
    else:
        model.add_op(
            DotInc(model.sig[conn]['transform'],
                   signal,
                   model.sig[conn]['out'],
                   tag=str(conn)))

    # Build learning rules
    if conn.learning_rule:
        if isinstance(conn.pre_obj, Ensemble):
            model.add_op(PreserveValue(model.sig[conn]['decoders']))
        else:
            model.add_op(PreserveValue(model.sig[conn]['transform']))

        if isinstance(conn.pre_obj, Ensemble) and conn.solver.weights:
            # TODO: make less hacky.
            # Have to do this because when a weight_solver
            # is provided, then learning rules should operate on
            # "decoders" which is really the weight matrix.
            model.sig[conn]['transform'] = model.sig[conn]['decoders']

        rule = conn.learning_rule
        if is_iterable(rule):
            for r in itervalues(rule) if isinstance(rule, dict) else rule:
                model.build(r)
        elif rule is not None:
            model.build(rule)

    model.params[conn] = BuiltConnection(decoders=decoders,
                                         eval_points=eval_points,
                                         transform=transform,
                                         solver_info=solver_info)
Example #18
0
 def Signal(self, value, name=None, dtype=None):
     dtype = self.dtype if dtype is None else dtype
     return Signal(value, name=name, dtype=dtype)
Example #19
0
def test_remove_reset_incs():
    # elementwiseinc converted to elementwiseset
    x = dummies.Signal()
    operators = [
        Reset(x),
        ElementwiseInc(dummies.Signal(), dummies.Signal(), x)
    ]
    new_operators = remove_reset_incs(operators)
    assert len(new_operators) == 1
    assert isinstance(new_operators[0], op_builders.ElementwiseSet)
    assert new_operators[0].Y is x
    assert new_operators[0].incs == []
    assert new_operators[0].sets == [x]

    # dotinc converted to dotset
    x = dummies.Signal()
    operators = [Reset(x), DotInc(dummies.Signal(), dummies.Signal(), x)]
    new_operators = remove_reset_incs(operators)
    assert len(new_operators) == 1
    assert isinstance(new_operators[0], op_builders.DotSet)
    assert new_operators[0].Y is x

    # copy inc converted to copy set
    x = dummies.Signal()
    operators = [Reset(x), Copy(dummies.Signal(), x, inc=True)]
    new_operators = remove_reset_incs(operators)
    assert len(new_operators) == 1
    assert not new_operators[0].inc
    assert new_operators[0].dst is x

    # simprocess inc converted to simprocess set
    x = dummies.Signal()
    operators = [
        Reset(x),
        SimProcess(None, dummies.Signal(), x, dummies.Signal(), mode="inc"),
    ]
    new_operators = remove_reset_incs(operators)
    assert len(new_operators) == 1
    assert new_operators[0].mode == "set"
    assert new_operators[0].output is x

    # convinc converted to convset
    x = dummies.Signal()
    operators = [
        Reset(x),
        ConvInc(dummies.Signal(), dummies.Signal(), x, None)
    ]
    new_operators = remove_reset_incs(operators)
    assert len(new_operators) == 1
    assert isinstance(new_operators[0], transform_builders.ConvSet)
    assert new_operators[0].Y is x

    # sparsedotinc converted to sparsedotset
    x = dummies.Signal()
    operators = [
        Reset(x),
        SparseDotInc(dummies.Signal(sparse=True), dummies.Signal(), x, None),
    ]
    new_operators = remove_reset_incs(operators)
    assert len(new_operators) == 1
    assert isinstance(new_operators[0], op_builders.SparseDotSet)
    assert new_operators[0].Y is x

    # resetinc converted to reset
    x = dummies.Signal()
    operators = [Reset(x), op_builders.ResetInc(x)]
    operators[1].value = np.ones((2, 3))
    new_operators = remove_reset_incs(operators)
    assert len(new_operators) == 1
    assert type(new_operators[0]) == Reset
    assert np.allclose(new_operators[0].value, 1)
    assert new_operators[0].dst is x

    # multiple incs
    x = dummies.Signal()
    operators = [
        Reset(x),
        ElementwiseInc(dummies.Signal(), dummies.Signal(), x),
        ElementwiseInc(dummies.Signal(), dummies.Signal(), x),
    ]
    new_operators = remove_reset_incs(operators)
    assert len(new_operators) == 2
    assert isinstance(new_operators[0], op_builders.ElementwiseSet)
    assert isinstance(new_operators[1], ElementwiseInc)

    # nonzero reset doesn't get converted
    x = dummies.Signal()
    operators = [
        Reset(x, value=1),
        ElementwiseInc(dummies.Signal(), dummies.Signal(), x),
    ]
    new_operators = remove_reset_incs(operators)
    assert operators == new_operators

    # reset without inc
    x = dummies.Signal()
    operators = [
        Reset(x),
        Copy(dummies.Signal(), x, inc=False),
    ]
    new_operators = remove_reset_incs(operators)
    assert operators == new_operators

    # reset with partial inc
    x = Signal(shape=(10, ))
    operators = [
        Reset(x),
        Copy(dummies.Signal(), x[:5], inc=True),
    ]
    new_operators = remove_reset_incs(operators)
    assert operators == new_operators

    # unknown inc type
    class NewCopy(Copy):
        pass

    x = dummies.Signal()
    operators = [
        Reset(x),
        NewCopy(dummies.Signal(), x, inc=True),
        ElementwiseInc(dummies.Signal(), dummies.Signal(), x),
    ]
    with pytest.warns(UserWarning, match="Unknown incer type"):
        new_operators = remove_reset_incs(operators)
    assert len(new_operators) == 2
    # uses the known op (ElementwiseInc) instead of unknown one
    assert isinstance(new_operators[0], op_builders.ElementwiseSet)
    assert new_operators[1] is operators[1]

    operators = [
        Reset(x),
        NewCopy(dummies.Signal(), x, inc=True),
    ]
    # no optimization if only unknown incers
    with pytest.warns(UserWarning, match="Unknown incer type"):
        new_operators = remove_reset_incs(operators)
    assert new_operators == operators
Example #20
0
def build_connection(model, conn):
    # Create random number generator
    rng = np.random.RandomState(model.seeds[conn])

    # Get input and output connections from pre and post
    def get_prepost_signal(is_pre):
        target = conn.pre_obj if is_pre else conn.post_obj
        key = 'out' if is_pre else 'in'

        if target not in model.sig:
            raise ValueError("Building %s: the '%s' object %s "
                             "is not in the model, or has a size of zero." %
                             (conn, 'pre' if is_pre else 'post', target))
        if key not in model.sig[target]:
            raise ValueError("Error building %s: the '%s' object %s "
                             "has a '%s' size of zero." %
                             (conn, 'pre' if is_pre else 'post', target, key))

        return model.sig[target][key]

    model.sig[conn]['in'] = get_prepost_signal(is_pre=True)
    model.sig[conn]['out'] = get_prepost_signal(is_pre=False)

    weights = None
    eval_points = None
    solver_info = None
    signal_size = conn.size_out
    post_slice = conn.post_slice

    # Figure out the signal going across this connection
    in_signal = model.sig[conn]['in']
    if (isinstance(conn.pre_obj, Node)
            or (isinstance(conn.pre_obj, Ensemble)
                and isinstance(conn.pre_obj.neuron_type, Direct))):
        # Node or Decoded connection in directmode
        sliced_in = slice_signal(model, in_signal, conn.pre_slice)

        if conn.function is not None:
            in_signal = Signal(np.zeros(conn.size_mid), name='%s.func' % conn)
            model.add_op(
                SimPyFunc(output=in_signal,
                          fn=conn.function,
                          t_in=False,
                          x=sliced_in))
        else:
            in_signal = sliced_in

    elif isinstance(conn.pre_obj, Ensemble):  # Normal decoded connection
        eval_points, decoders, solver_info = build_decoders(model, conn, rng)

        if conn.solver.weights:
            model.sig[conn]['out'] = model.sig[conn.post_obj.neurons]['in']
            signal_size = conn.post_obj.neurons.size_in
            post_slice = Ellipsis  # don't apply slice later
            weights = decoders.T
        else:
            weights = multiply(conn.transform, decoders.T)
    else:
        in_signal = slice_signal(model, in_signal, conn.pre_slice)

    # Add operator for applying weights
    if weights is None:
        weights = np.array(conn.transform)

    if isinstance(conn.post_obj, Neurons):
        gain = model.params[conn.post_obj.ensemble].gain[post_slice]
        weights = multiply(gain, weights)

    if conn.learning_rule is not None and weights.ndim < 2:
        raise ValueError("Learning connection must have full transform matrix")

    model.sig[conn]['weights'] = Signal(weights,
                                        name="%s.weights" % conn,
                                        readonly=True)
    signal = Signal(np.zeros(signal_size), name="%s.weighted" % conn)
    model.add_op(Reset(signal))
    op = ElementwiseInc if weights.ndim < 2 else DotInc
    model.add_op(
        op(model.sig[conn]['weights'],
           in_signal,
           signal,
           tag="%s.weights_elementwiseinc" % conn))

    # Add operator for filtering
    if conn.synapse is not None:
        signal = model.build(conn.synapse, signal)

    # Copy to the proper slice
    model.add_op(
        SlicedCopy(signal,
                   model.sig[conn]['out'],
                   b_slice=post_slice,
                   inc=True,
                   tag="%s.gain" % conn))

    # Build learning rules
    if conn.learning_rule is not None:
        model.sig[conn]['weights'].readonly = False
        model.add_op(PreserveValue(model.sig[conn]['weights']))

        rule = conn.learning_rule
        if is_iterable(rule):
            for r in itervalues(rule) if isinstance(rule, dict) else rule:
                model.build(r)
        elif rule is not None:
            model.build(rule)

    model.params[conn] = BuiltConnection(eval_points=eval_points,
                                         solver_info=solver_info,
                                         weights=weights)
Example #21
0
def build_ensemble(model, ens):
    """Builds an `.Ensemble` object into a model.

    A brief summary of what happens in the ensemble build process, in order:

    1. Generate evaluation points and encoders.
    2. Normalize encoders to unit length.
    3. Determine bias and gain.
    4. Create neuron input signal
    5. Add operator for injecting bias.
    6. Call build function for neuron type.
    7. Scale encoders by gain and radius.
    8. Add operators for multiplying decoded input signal by encoders and
       incrementing the result in the neuron input signal.
    9. Call build function for injected noise.

    Some of these steps may be altered or omitted depending on the parameters
    of the ensemble, in particular the neuron type. For example, most steps are
    omitted for the `.Direct` neuron type.

    Parameters
    ----------
    model : Model
        The model to build into.
    ens : Ensemble
        The ensemble to build.

    Notes
    -----
    Sets ``model.params[ens]`` to a `.BuiltEnsemble` instance.
    """

    # Create random number generator
    rng = np.random.RandomState(model.seeds[ens])

    eval_points = gen_eval_points(ens,
                                  ens.eval_points,
                                  rng=rng,
                                  dtype=rc.float_dtype)

    # Set up signal
    model.sig[ens]["in"] = Signal(shape=ens.dimensions, name="%s.signal" % ens)
    model.add_op(Reset(model.sig[ens]["in"]))

    # Set up encoders
    if isinstance(ens.neuron_type, Direct):
        encoders = np.identity(ens.dimensions, dtype=rc.float_dtype)
    elif isinstance(ens.encoders, Distribution):
        encoders = get_samples(ens.encoders,
                               ens.n_neurons,
                               ens.dimensions,
                               rng=rng)
        encoders = np.asarray(encoders, dtype=rc.float_dtype)
    else:
        encoders = npext.array(ens.encoders, min_dims=2, dtype=rc.float_dtype)
    if ens.normalize_encoders:
        encoders /= npext.norm(encoders, axis=1, keepdims=True)
    if np.any(np.isnan(encoders)):
        raise BuildError(
            "NaNs detected in %r encoders. This usually means that you had zero-length "
            "encoders that were normalized, resulting in NaNs. Ensure all encoders "
            "have non-zero length, or set `normalize_encoders=False`." % ens)

    # Build the neurons
    gain, bias, max_rates, intercepts = get_gain_bias(ens,
                                                      rng,
                                                      dtype=rc.float_dtype)

    if isinstance(ens.neuron_type, Direct):
        model.sig[ens.neurons]["in"] = Signal(shape=ens.dimensions,
                                              name="%s.neuron_in" % ens)
        model.sig[ens.neurons]["out"] = model.sig[ens.neurons]["in"]
        model.add_op(Reset(model.sig[ens.neurons]["in"]))
    else:
        model.sig[ens.neurons]["in"] = Signal(shape=ens.n_neurons,
                                              name="%s.neuron_in" % ens)
        model.sig[ens.neurons]["out"] = Signal(shape=ens.n_neurons,
                                               name="%s.neuron_out" % ens)
        model.sig[ens.neurons]["bias"] = Signal(bias,
                                                name="%s.bias" % ens,
                                                readonly=True)
        model.add_op(
            Copy(model.sig[ens.neurons]["bias"], model.sig[ens.neurons]["in"]))
        # This adds the neuron's operator and sets other signals
        model.build(ens.neuron_type, ens.neurons)

    # Scale the encoders
    if isinstance(ens.neuron_type, Direct):
        scaled_encoders = encoders
    else:
        scaled_encoders = encoders * (gain / ens.radius)[:, np.newaxis]

    model.sig[ens]["encoders"] = Signal(scaled_encoders,
                                        name="%s.scaled_encoders" % ens,
                                        readonly=True)

    # Inject noise if specified
    if ens.noise is not None:
        model.build(ens.noise,
                    sig_out=model.sig[ens.neurons]["in"],
                    mode="inc")

    # Create output signal, using built Neurons
    model.add_op(
        DotInc(
            model.sig[ens]["encoders"],
            model.sig[ens]["in"],
            model.sig[ens.neurons]["in"],
            tag="%s encoding" % ens,
        ))

    # Output is neural output
    model.sig[ens]["out"] = model.sig[ens.neurons]["out"]

    model.params[ens] = BuiltEnsemble(
        eval_points=eval_points,
        encoders=encoders,
        intercepts=intercepts,
        max_rates=max_rates,
        scaled_encoders=scaled_encoders,
        gain=gain,
        bias=bias,
    )
Example #22
0
def build_FpgaPesEnsembleNetwork(model, network):
    """Builder to integrate FPGA network into Nengo

    Add build steps like nengo?
    """

    # Check if nengo_fpga.Simulator is being used to build this network
    if not network.using_fpga_sim:
        warn_str = "FpgaPesEnsembleNetwork not being built with nengo_fpga simulator."
        logger.warning(warn_str)
        print("WARNING: " + warn_str)

    # Check if all of the requirements to use the FPGA board are met
    if not (network.using_fpga_sim and network.config_found
            and network.fpga_found):
        # FPGA requirements not met...
        # Build the dummy network instead of using FPGA-specific stuff
        warn_str = "Building network with dummy (non-FPGA) ensemble."
        logger.warning(warn_str)
        print("WARNING: " + warn_str)
        nengo.builder.network.build_network(model, network)
        return

    # Generate the ensemble and connection parameters and save them to file
    extract_and_save_params(model, network)

    # Build the nengo network using the network's udp_socket function
    # Set up input/output signals
    input_sig = Signal(np.zeros(network.input_dimensions), name="input")
    model.sig[network.input]["in"] = input_sig
    model.sig[network.input]["out"] = input_sig
    model.add_op(Reset(input_sig))
    input_sig = model.build(nengo.synapses.Lowpass(0), input_sig)

    error_sig = Signal(np.zeros(network.output_dimensions), name="error")
    model.sig[network.error]["in"] = error_sig
    model.sig[network.error]["out"] = error_sig
    model.add_op(Reset(error_sig))
    error_sig = model.build(nengo.synapses.Lowpass(0), error_sig)

    output_sig = Signal(np.zeros(network.output_dimensions), name="output")
    model.sig[network.output]["out"] = output_sig
    if network.connection.synapse is not None:
        model.build(network.connection.synapse, output_sig)

    # Set up udp_socket combined input signals
    udp_socket_input_sig = Signal(
        np.zeros(network.input_dimensions + network.output_dimensions),
        name="udp_socket_input",
    )
    model.add_op(
        Copy(
            input_sig,
            udp_socket_input_sig,
            dst_slice=slice(0, network.input_dimensions),
        ))
    model.add_op(
        Copy(
            error_sig,
            udp_socket_input_sig,
            dst_slice=slice(network.input_dimensions, None),
        ))

    # Build udp socket function with Nengo SimPyFunc
    model.add_op(
        SimPyFunc(
            output=output_sig,
            fn=partial(udp_comm_func, net=network, dt=model.dt),
            t=model.time,
            x=udp_socket_input_sig,
        ))
Example #23
0
def build_network(model, network):
    """Takes a Network object and returns a Model.

    This determines the signals and operators necessary to simulate that model.

    Builder does this by mapping each high-level object to its associated
    signals and operators one-by-one, in the following order:

    1) Ensembles, Nodes, Neurons
    2) Subnetworks (recursively)
    3) Connections
    4) Learning Rules
    5) Probes
    """
    def get_seed(obj, rng):
        # Generate a seed no matter what, so that setting a seed or not on
        # one object doesn't affect the seeds of other objects.
        seed = rng.randint(npext.maxint)
        return (seed if not hasattr(obj, 'seed') or obj.seed is None
                else obj.seed)

    if model.toplevel is None:
        model.toplevel = network
        model.sig['common'][0] = Signal(
            npext.array(0.0, readonly=True), name='Common: Zero')
        model.sig['common'][1] = Signal(
            npext.array(1.0, readonly=True), name='Common: One')
        model.seeds[network] = get_seed(network, np.random)

    # Set config
    old_config = model.config
    model.config = network.config

    # assign seeds to children
    rng = np.random.RandomState(model.seeds[network])
    sorted_types = sorted(network.objects, key=lambda t: t.__name__)
    for obj_type in sorted_types:
        for obj in network.objects[obj_type]:
            model.seeds[obj] = get_seed(obj, rng)

    logger.debug("Network step 1: Building ensembles and nodes")
    for obj in network.ensembles + network.nodes:
        model.build(obj)

    logger.debug("Network step 2: Building subnetworks")
    for subnetwork in network.networks:
        model.build(subnetwork)

    logger.debug("Network step 3: Building connections")
    for conn in network.connections:
        # NB: we do these in the order in which they're defined, and build the
        # learning rule in the connection builder. Because learning rules are
        # attached to connections, the connection that contains the learning
        # rule (and the learning rule) are always built *before* a connection
        # that attaches to that learning rule. Therefore, we don't have to
        # worry about connection ordering here.
        # TODO: Except perhaps if the connection being learned
        # is in a subnetwork?
        model.build(conn)

    logger.debug("Network step 4: Building probes")
    for probe in network.probes:
        model.build(probe)

    # Unset config
    model.config = old_config
    model.params[network] = None
Example #24
0
    def build_test_rule(model, test_rule, rule):
        error = Signal(np.zeros(rule.connection.size_in))
        model.add_op(Reset(error))
        model.sig[rule]["in"] = error[:rule.size_in]

        model.add_op(Copy(error, model.sig[rule]["delta"]))
Example #25
0
def build_network(model, network):
    """Takes a Network object and returns a Model.

    This determines the signals and operators necessary to simulate that model.

    Builder does this by mapping each high-level object to its associated
    signals and operators one-by-one, in the following order:

    1) Ensembles, Nodes, Neurons
    2) Subnetworks (recursively)
    3) Connections
    4) Learning Rules
    5) Probes
    """
    def get_seed(obj, rng):
        # Generate a seed no matter what, so that setting a seed or not on
        # one object doesn't affect the seeds of other objects.
        seed = rng.randint(npext.maxint)
        return (seed
                if not hasattr(obj, 'seed') or obj.seed is None else obj.seed)

    if model.toplevel is None:
        model.toplevel = network
        model.sig['common'][0] = Signal(npext.array(0.0, readonly=True),
                                        name='Common: Zero')
        model.sig['common'][1] = Signal(npext.array(1.0, readonly=True),
                                        name='Common: One')
        model.seeds[network] = get_seed(network, np.random)

    # Set config
    old_config = model.config
    model.config = network.config

    # assign seeds to children
    rng = np.random.RandomState(model.seeds[network])
    sorted_types = sorted(network.objects, key=lambda t: t.__name__)
    for obj_type in sorted_types:
        for obj in network.objects[obj_type]:
            model.seeds[obj] = get_seed(obj, rng)

    logger.debug("Network step 1: Building ensembles and nodes")
    for obj in network.ensembles + network.nodes:
        model.build(obj)

    logger.debug("Network step 2: Building subnetworks")
    for subnetwork in network.networks:
        model.build(subnetwork)

    logger.debug("Network step 3: Building connections")
    for conn in network.connections:
        model.build(conn)

    logger.debug("Network step 4: Building learning rules")
    for conn in network.connections:
        rule = conn.learning_rule
        if is_iterable(rule):
            for r in (itervalues(rule) if isinstance(rule, dict) else rule):
                model.build(r)
        elif rule is not None:
            model.build(rule)

    logger.debug("Network step 5: Building probes")
    for probe in network.probes:
        model.build(probe)

    # Unset config
    model.config = old_config
    model.params[network] = None
Example #26
0
def build_pes(model, pes, rule):
    """Builds a `.PES` object into a model.

    Calls synapse build functions to filter the pre activities,
    and adds a `.SimPES` operator to the model to calculate the delta.

    Parameters
    ----------
    model : Model
        The model to build into.
    pes : PES
        Learning rule type to build.
    rule : LearningRule
        The learning rule object corresponding to the neuron type.

    Notes
    -----
    Does not modify ``model.params[]`` and can therefore be called
    more than once with the same `.PES` instance.
    """

    conn = rule.connection

    # Create input error signal
    error = Signal(shape=rule.size_in, name="PES:error")
    model.add_op(Reset(error))
    model.sig[rule]["in"] = error  # error connection will attach here

    # Filter pre-synaptic activities with pre_synapse
    acts = build_or_passthrough(
        model,
        pes.pre_synapse,
        slice_signal(
            model,
            model.sig[conn.pre_obj]["out"],
            conn.pre_slice,
        ) if isinstance(conn.pre_obj, Neurons) else
        model.sig[conn.pre_obj]["out"],
    )

    if conn._to_neurons:
        # multiply error by post encoders to get a per-neuron error
        #   i.e. local_error = dot(encoders, error)
        post = get_post_ens(conn)
        if not isinstance(conn.post_slice, slice):
            raise BuildError(
                "PES learning rule does not support advanced indexing on non-decoded "
                "connections")

        encoders = model.sig[post]["encoders"]
        # slice along neuron dimension if connecting to a neuron object, otherwise
        # slice along state dimension
        encoders = (encoders[:, conn.post_slice] if isinstance(
            conn.post_obj, Ensemble) else encoders[conn.post_slice, :])

        local_error = Signal(shape=(encoders.shape[0], ))
        model.add_op(Reset(local_error))
        model.add_op(DotInc(encoders, error, local_error, tag="PES:encode"))
    else:
        local_error = error

    model.add_op(
        SimPES(acts, local_error, model.sig[rule]["delta"], pes.learning_rate))

    # expose these for probes
    model.sig[rule]["error"] = error
    model.sig[rule]["activities"] = acts
Example #27
0
def test_get_tensor_signal():
    signals = SignalDict(tf.float32, 3)

    # check that tensor_signal is created correctly
    key = object()
    tensor_signal = signals.get_tensor_signal(
        (0,), key, np.float64, (3, 4), True)

    assert isinstance(tensor_signal, TensorSignal)
    assert np.array_equal(tensor_signal.indices, (0,))
    assert tensor_signal.key == key
    assert tensor_signal.dtype == np.float64
    assert tensor_signal.shape == (3, 4)
    assert tensor_signal.minibatch_size == 3
    assert tensor_signal.constant == signals.constant
    assert len(signals) == 0

    # check adding signal to sig_map
    sig = Signal(np.zeros(4))
    sig.minibatched = True
    tensor_signal = signals.get_tensor_signal(
        np.arange(4), key, np.float64, (2, 2), True, signal=sig)
    assert len(signals) == 1
    assert signals[sig] is tensor_signal
    assert next(iter(signals)) is sig
    assert next(iter(signals.values())) is tensor_signal

    # error if sig shape doesn't match indices
    with pytest.raises(AssertionError):
        sig = Signal(np.zeros((2, 2)))
        sig.minibatched = True
        signals.get_tensor_signal(
            np.arange(4), key, np.float64, (2, 2), True, signal=sig)

    # error if sig size doesn't match given shape
    with pytest.raises(AssertionError):
        sig = Signal(np.zeros(4))
        sig.minibatched = True
        signals.get_tensor_signal(
            np.arange(4), key, np.float64, (2, 3), True, signal=sig)

    # error if minibatched doesn't match
    with pytest.raises(AssertionError):
        sig = Signal(np.zeros(4))
        sig.minibatched = False
        signals.get_tensor_signal(
            np.arange(4), key, np.float64, (2, 2), True, signal=sig)
Example #28
0
def test_opstomerge_check_signals():
    sig = Signal(np.arange(10))
    sig_orig = sig.reshape(10)
    sig_reshaped = sig.reshape(2, 5)
    assert not OpsToMerge.check_signals(Copy(sig_orig, sig_orig),
                                        Copy(sig_reshaped, sig_reshaped))
Example #29
0
def build_ensemble(model, ens):
    # Create random number generator
    rng = np.random.RandomState(model.seeds[ens])

    eval_points = gen_eval_points(ens, ens.eval_points, rng=rng)

    # Set up signal
    model.sig[ens]['in'] = Signal(np.zeros(ens.dimensions),
                                  name="%s.signal" % ens)
    model.add_op(Reset(model.sig[ens]['in']))

    # Set up encoders
    if isinstance(ens.neuron_type, Direct):
        encoders = np.identity(ens.dimensions)
    elif isinstance(ens.encoders, Distribution):
        encoders = ens.encoders.sample(ens.n_neurons, ens.dimensions, rng=rng)
        encoders = np.asarray(encoders, dtype=np.float64)
    else:
        encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)
    encoders /= npext.norm(encoders, axis=1, keepdims=True)

    # Determine max_rates and intercepts
    max_rates = sample(ens.max_rates, ens.n_neurons, rng=rng)
    intercepts = sample(ens.intercepts, ens.n_neurons, rng=rng)

    # Build the neurons
    if ens.gain is not None and ens.bias is not None:
        gain = sample(ens.gain, ens.n_neurons, rng=rng)
        bias = sample(ens.bias, ens.n_neurons, rng=rng)
    elif ens.gain is not None or ens.bias is not None:
        # TODO: handle this instead of error
        raise NotImplementedError("gain or bias set for %s, but not both. "
                                  "Solving for one given the other is not "
                                  "implemented yet." % ens)
    else:
        gain, bias = ens.neuron_type.gain_bias(max_rates, intercepts)

    if isinstance(ens.neuron_type, Direct):
        model.sig[ens.neurons]['in'] = Signal(np.zeros(ens.dimensions),
                                              name='%s.neuron_in' % ens)
        model.sig[ens.neurons]['out'] = model.sig[ens.neurons]['in']
        model.add_op(Reset(model.sig[ens.neurons]['in']))
    else:
        model.sig[ens.neurons]['in'] = Signal(np.zeros(ens.n_neurons),
                                              name="%s.neuron_in" % ens)
        model.sig[ens.neurons]['out'] = Signal(np.zeros(ens.n_neurons),
                                               name="%s.neuron_out" % ens)
        model.add_op(
            Copy(src=Signal(bias, name="%s.bias" % ens),
                 dst=model.sig[ens.neurons]['in']))
        # This adds the neuron's operator and sets other signals
        model.build(ens.neuron_type, ens.neurons)

    # Scale the encoders
    if isinstance(ens.neuron_type, Direct):
        scaled_encoders = encoders
    else:
        scaled_encoders = encoders * (gain / ens.radius)[:, np.newaxis]

    model.sig[ens]['encoders'] = Signal(scaled_encoders,
                                        name="%s.scaled_encoders" % ens)

    # Inject noise if specified
    if ens.noise is not None:
        model.build(ens.noise, sig_out=model.sig[ens.neurons]['in'], inc=True)

    # Create output signal, using built Neurons
    model.add_op(
        DotInc(model.sig[ens]['encoders'],
               model.sig[ens]['in'],
               model.sig[ens.neurons]['in'],
               tag="%s encoding" % ens))

    # Output is neural output
    model.sig[ens]['out'] = model.sig[ens.neurons]['out']

    model.params[ens] = BuiltEnsemble(eval_points=eval_points,
                                      encoders=encoders,
                                      intercepts=intercepts,
                                      max_rates=max_rates,
                                      scaled_encoders=scaled_encoders,
                                      gain=gain,
                                      bias=bias)
Example #30
0
def test_sigmerger_check_signals():
    # 0-d signals
    SigMerger.check_signals([Signal(0), Signal(0)])
    with pytest.raises(ValueError):
        SigMerger.check_signals([Signal(0), Signal(1)])

    # compatible along first axis
    SigMerger.check_signals(
        [Signal(np.empty((1, 2))),
         Signal(np.empty((2, 2)))])

    # compatible along second axis
    SigMerger.check_signals(
        [Signal(np.empty(
            (2, 1))), Signal(np.empty((2, 2)))], axis=1)
    with pytest.raises(ValueError):
        SigMerger.check_signals(
            [Signal(np.empty(
                (2, 1))), Signal(np.empty((2, 2)))], axis=0)

    # shape mismatch
    with pytest.raises(ValueError):
        SigMerger.check_signals(
            [Signal(np.empty((2, ))),
             Signal(np.empty((2, 2)))])

    # mixed dtype
    with pytest.raises(ValueError):
        SigMerger.check_signals(
            [Signal(np.empty(2, dtype=int)),
             Signal(np.empty(2, dtype=float))])

    # compatible views
    s = Signal(np.empty(5))
    with pytest.raises(ValueError):
        SigMerger.check_signals([s[:2], s[2:]])
Example #31
0
def tests_signal_nan():
    with_nan = np.ones(4)
    with_nan[1] = np.nan
    with pytest.raises(SignalError, match="contains NaNs"):
        Signal(initial_value=with_nan)
Example #32
0
def test_convinc_2d(
    channels_last, stride0, stride1, kernel0, kernel1, padding, rng, allclose
):
    correlate2d = pytest.importorskip("scipy.signal").correlate2d

    shape0 = 16
    shape1 = 17
    in_channels = 32
    out_channels = 64
    x_shape = (
        (shape0, shape1, in_channels)
        if channels_last
        else (in_channels, shape0, shape1)
    )
    x = Signal(rng.randn(*x_shape))
    w = Signal(rng.randn(kernel0, kernel1, in_channels, out_channels))

    conv = Convolution(
        out_channels,
        x_shape,
        kernel_size=(kernel0, kernel1),
        strides=(stride0, stride1),
        padding=padding,
        channels_last=channels_last,
    )

    y = Signal(np.zeros(conv.output_shape.shape))

    signals = {sig: np.array(sig.initial_value) for sig in (x, w, y)}
    step = ConvInc(w, x, y, conv).make_step(signals, None, None)

    step()

    x0 = x.initial_value

    if not channels_last:
        x0 = np.moveaxis(x0, 0, -1)

    if padding == "same":
        strides = np.asarray([stride0, stride1])
        padding = np.ceil(np.asarray([shape0, shape1]) / strides)
        padding = np.maximum(
            (padding - 1) * strides + (kernel0, kernel1) - (shape0, shape1), 0
        ).astype(np.int64)
        x0 = np.pad(
            x0,
            [
                (padding[0] // 2, padding[0] - padding[0] // 2),
                (padding[1] // 2, padding[1] - padding[1] // 2),
                (0, 0),
            ],
            "constant",
        )

    y0 = np.stack(
        [
            np.sum(
                [
                    correlate2d(x0[..., j], w.initial_value[..., j, i], mode="valid")
                    for j in range(in_channels)
                ],
                axis=0,
            )
            for i in range(out_channels)
        ],
        axis=-1,
    )
    y0 = y0[::stride0, ::stride1, :]
    if not channels_last:
        y0 = np.moveaxis(y0, -1, 0)

    assert allclose(signals[y], y0)
def test_remove_constant_copies():
    # check that Copy with no inputs gets turned into Reset
    x = dummies.Signal()
    operators = [Copy(dummies.Signal(), x)]
    new_operators = remove_constant_copies(operators)
    assert len(new_operators) == 1
    assert isinstance(new_operators[0], Reset)
    assert new_operators[0].dst is x
    assert new_operators[0].value == 0

    # check that Copy with Node input doesn't get changed
    x = dummies.Signal(label="<Node lorem ipsum")
    operators = [Copy(x, dummies.Signal())]
    new_operators = remove_constant_copies(operators)
    assert new_operators == operators

    # check that Copy with trainable input doesn't get changed
    x = dummies.Signal()
    x.trainable = True
    operators = [Copy(x, dummies.Signal())]
    new_operators = remove_constant_copies(operators)
    assert new_operators == operators

    # check Copy with updated input doesn't get changed
    x = dummies.Signal()
    operators = [Copy(x, dummies.Signal()), dummies.Op(updates=[x])]
    new_operators = remove_constant_copies(operators)
    assert new_operators == operators

    # check Copy with inc'd input doesn't get changed
    x = dummies.Signal()
    operators = [Copy(x, dummies.Signal()), dummies.Op(incs=[x])]
    new_operators = remove_constant_copies(operators)
    assert new_operators == operators

    # check Copy with set input doesn't get changed
    x = dummies.Signal()
    operators = [Copy(x, dummies.Signal()), dummies.Op(sets=[x])]
    new_operators = remove_constant_copies(operators)
    assert new_operators == operators

    # check Copy with read input/output does get changed
    x = dummies.Signal()
    y = dummies.Signal()
    operators = [Copy(x, y), dummies.Op(reads=[x]),
                 dummies.Op(reads=[y])]
    new_operators = remove_constant_copies(operators)
    assert len(new_operators) == 3
    assert new_operators[1:] == operators[1:]
    assert isinstance(new_operators[0], Reset)
    assert new_operators[0].dst is y
    assert new_operators[0].value == 0

    # check Copy with Reset input does get changed
    x = dummies.Signal()
    y = dummies.Signal()
    operators = [Copy(x, y), Reset(x, 2)]
    new_operators = remove_constant_copies(operators)
    assert len(new_operators) == 1
    assert isinstance(new_operators[0], Reset)
    assert new_operators[0].dst is y
    assert new_operators[0].value == 2

    # check that slicing is respected
    x = dummies.Signal()
    y = Signal(initial_value=[0, 0])
    operators = [Copy(x, y, dst_slice=slice(1, 2)), Reset(x, 2)]
    new_operators = remove_constant_copies(operators)
    assert len(new_operators) == 1
    assert isinstance(new_operators[0], Reset)
    assert new_operators[0].dst.shape == (1,)
    assert new_operators[0].dst.is_view
    assert new_operators[0].dst.elemoffset == 1
    assert new_operators[0].dst.base is y
    assert new_operators[0].value == 2

    # check that CopyInc gets turned into ResetInc
    x = dummies.Signal()
    y = dummies.Signal()
    operators = [Copy(x, y, inc=True), Reset(x, 2)]
    new_operators = remove_constant_copies(operators)
    assert len(new_operators) == 1
    assert isinstance(new_operators[0], op_builders.ResetInc)
    assert new_operators[0].dst is y
    assert new_operators[0].value == 2
    assert len(new_operators[0].incs) == 1
    assert len(new_operators[0].sets) == 0