예제 #1
0
def signal_probe(model, key, probe):
    """Build a "signal" probe type.

    Signal probes directly probe a target signal.
    """

    try:
        sig = model.sig[probe.obj][key]
    except (IndexError, KeyError) as e:
        raise BuildError(
            f"Attribute '{key}' is not probeable on {probe.obj}.") from e

    if sig is None:
        raise BuildError(
            f"Attribute '{key}' on {probe.obj} is None, cannot be probed")

    if probe.slice is not None:
        sig = slice_signal(model, sig, probe.slice)

    if probe.synapse is None:
        model.sig[probe]["in"] = sig
    else:
        model.sig[probe]["in"] = Signal(shape=sig.shape, name=str(probe))
        model.sig[probe]["filtered"] = model.build(probe.synapse,
                                                   sig,
                                                   mode="update")
        model.add_op(Copy(model.sig[probe]["filtered"],
                          model.sig[probe]["in"]))
예제 #2
0
def build_regularspiking(model, regularspiking, neurons, block):
    base = regularspiking.base_type
    if type(base) not in (nengo.LIFRate, nengo.RectifiedLinear):
        raise BuildError(
            "RegularSpiking neurons with %r as a base type cannot be simulated on "
            "Loihi. Please either switch to a supported base neuron type like "
            "LIFRate or RectifiedLinear, or explicitly mark ensembles using this "
            "neuron type as off-chip with\n"
            "  net.config[ensembles].on_chip = False" % type(base).__name__
        )

    if base.amplitude != 1:
        raise BuildError(
            "Amplitude is not supported on RegularSpiking base types on Loihi, since "
            "this effectively modifies the `dt` for individual neurons. To change the "
            "amplitude of output spikes, set `amplitude` on the `RegularSpiking` "
            "instance instead of the base type instance."
        )

    check_state_zero(model, regularspiking, neurons, block)
    if type(base) is nengo.LIFRate:
        block.compartment.configure_lif(
            tau_rc=base.tau_rc, tau_ref=base.tau_ref, dt=model.dt
        )
    elif type(base) is nengo.RectifiedLinear:
        block.compartment.configure_relu(
            vth=1.0 / model.dt,  # so input == 1 -> neuron fires 1/dt steps -> 1 Hz
            dt=model.dt,
        )
예제 #3
0
def build_convolution(model,
                      transform,
                      sig_in,
                      decoders=None,
                      encoders=None,
                      rng=np.random):
    if decoders is not None:
        raise BuildError("Applying a convolution transform to a decoded "
                         "connection is not supported")
    if encoders is not None:
        raise BuildError(
            "Applying encoders to a convolution transform is not supported")

    weights = transform.sample(rng=rng)
    weight_sig = Signal(weights, name="%s.weights" % transform, readonly=True)
    weighted = Signal(np.zeros(transform.size_out),
                      name="%s.weighted" % transform)
    model.add_op(Reset(weighted))

    model.add_op(
        ConvInc(weight_sig,
                sig_in,
                weighted,
                transform,
                tag="%s.apply_weights" % transform))

    return weighted, weight_sig
예제 #4
0
    def validate(self):
        if self.location == 'cpu':
            return  # none of these checks currently apply to Lakemont

        N_CX_MAX = 1024
        if self.n > N_CX_MAX:
            raise BuildError("Number of compartments (%d) exceeded max (%d)" %
                             (self.n, N_CX_MAX))

        IN_AXONS_MAX = 4096
        n_axons = sum(s.n_axons for s in self.synapses)
        if n_axons > IN_AXONS_MAX:
            raise BuildError("Input axons (%d) exceeded max (%d)" %
                             (n_axons, IN_AXONS_MAX))

        MAX_SYNAPSE_BITS = 16384 * 64
        synapse_bits = sum(s.bits() for s in self.synapses)
        if synapse_bits > MAX_SYNAPSE_BITS:
            raise BuildError("Total synapse bits (%d) exceeded max (%d)" %
                             (synapse_bits, MAX_SYNAPSE_BITS))

        OUT_AXONS_MAX = 4096
        n_axons = sum(a.axon_slots() for a in self.axons)
        if n_axons > OUT_AXONS_MAX:
            raise BuildError("Output axons (%d) exceeded max (%d)" %
                             (n_axons, OUT_AXONS_MAX))

        for synapses in self.synapses:
            synapses.validate()

        for axons in self.axons:
            axons.validate()

        for probe in self.probes:
            probe.validate()
예제 #5
0
def signal_probe(model, key, probe):
    """Build a "signal" probe type.

    Signal probes directly probe a target signal.
    """

    try:
        sig = model.sig[probe.obj][key]
    except IndexError:
        raise BuildError("Attribute %r is not probeable on %s." %
                         (key, probe.obj))

    if sig is None:
        raise BuildError("Attribute %r on %s is None, cannot be probed" %
                         (key, probe.obj))

    if probe.slice is not None:
        sig = sig[probe.slice]

    if probe.synapse is None:
        model.sig[probe]["in"] = sig
    else:
        model.sig[probe]["in"] = Signal(shape=sig.shape, name=str(probe))
        model.sig[probe]["filtered"] = model.build(probe.synapse,
                                                   sig,
                                                   mode="update")
        model.add_op(Copy(model.sig[probe]["filtered"],
                          model.sig[probe]["in"]))
예제 #6
0
def validate_block(block):
    # -- Compartment
    validate_compartment(block.compartment)

    # -- Axons
    OUT_AXONS_MAX = 4096
    n_axons = sum(a.axon_slots() for a in block.axons)
    if n_axons > OUT_AXONS_MAX:
        raise BuildError("Output axons (%d) exceeded max (%d)" %
                         (n_axons, OUT_AXONS_MAX))

    for axon in block.axons:
        validate_axon(axon)

    # -- Synapses
    IN_AXONS_MAX = 4096
    n_axons = sum(s.n_axons for s in block.synapses)
    if n_axons > IN_AXONS_MAX:
        raise BuildError("Input axons (%d) exceeded max (%d)" %
                         (n_axons, IN_AXONS_MAX))

    MAX_SYNAPSE_BITS = 16384 * 64
    synapse_bits = sum(s.bits() for s in block.synapses)
    if synapse_bits > MAX_SYNAPSE_BITS:
        raise BuildError("Total synapse bits (%d) exceeded max (%d)" %
                         (synapse_bits, MAX_SYNAPSE_BITS))

    for synapse in block.synapses:
        validate_synapse(synapse)

    # -- Probes
    for probe in block.probes:
        validate_probe(probe)
예제 #7
0
def validate_block(block):
    # -- Compartment
    validate_compartment(block.compartment)

    # -- Axons
    n_axons = sum(a.axon_slots() for a in block.axons)
    if n_axons > MAX_OUT_AXONS:
        raise BuildError("Output axons (%d) exceeded max (%d)" %
                         (n_axons, MAX_OUT_AXONS))

    for axon in block.axons:
        validate_axon(axon)

    # -- Synapses
    n_axons = sum(s.n_axons for s in block.synapses)
    if n_axons > MAX_IN_AXONS:
        raise BuildError("Input axons (%d) exceeded max (%d)" %
                         (n_axons, MAX_IN_AXONS))

    synapse_bits = sum(s.bits() for s in block.synapses)
    if synapse_bits > MAX_SYNAPSE_BITS:
        raise BuildError("Total synapse bits (%d) exceeded max (%d)" %
                         (synapse_bits, MAX_SYNAPSE_BITS))

    for synapse in block.synapses:
        validate_synapse(synapse)
예제 #8
0
    def __init__(self, network, hostchip, passthrough, strict):
        self.hostchip = hostchip
        self.passthrough = passthrough

        self.objs = set()
        self._precomputable = True
        self._conns = (
            set(network.all_connections) | self.passthrough.to_add
        ) - self.passthrough.to_remove

        # Learning rules are not supported with precompute=True because
        # this would require a hybrid simulation where some parts of the
        # model interact with the host while other parts are precomputed
        # ahead of time. The simulator assumes that precompute=True does not
        # require any interaction between host and chip between time-steps.
        # Also see issue #214.
        has_learning = any(conn.learning_rule is not None for conn in self._conns)

        if not has_learning:
            self._find_precomputable_objs()
        else:
            self._precomputable = False
            if strict and has_learning:
                raise BuildError(
                    "precompute=True not supported when using learning rules"
                )

        if strict and not self._precomputable:
            raise BuildError("Cannot precompute input, as it is dependent on output")
예제 #9
0
    def merge_transforms(self, size1, trans1, slice1, node, slice2, trans2,
                         size2):
        """Return an equivalent transform to the two provided transforms.

        This is for finding a transform that converts this::

            a = nengo.Node(size1)
            b = nengo.Node(size2)
            nengo.Connection(a, node[slice1], transform=trans1)
            nengo.Connection(node[slice2], b, transform=trans2)

        Into this::

            a = nengo.Node(size1)
            b = nengo.Node(size2)
            nengo.Connection(a, b, transform=t)

        """
        if trans1.ndim == 0:  # scalar
            trans1 = np.eye(size1) * trans1
        elif trans1.ndim != 2:
            raise BuildError("Unhandled transform shape: %s" %
                             (trans1.shape, ))

        if trans2.ndim == 0:  # scalar
            trans2 = np.eye(size2) * trans2
        elif trans2.ndim != 2:
            raise BuildError("Unhandled transform shape: %s" %
                             (trans2.shape, ))

        mid_t = np.eye(node.size_in)[slice2, slice1]
        return np.dot(trans2, np.dot(mid_t, trans1))
예제 #10
0
def validate_block(block):
    # -- Compartment
    validate_compartment(block.compartment)

    # -- Axons
    n_axons = sum(a.axon_slots() for a in block.axons)
    if n_axons > MAX_OUT_AXONS:
        raise BuildError(
            f"{block}: Output axons ({n_axons}) exceeded max ({MAX_OUT_AXONS})"
        )

    for axon in block.axons:
        validate_axon(axon)

    # -- Synapses
    n_axons = sum(s.n_axons for s in block.synapses)
    if n_axons > MAX_IN_AXONS:
        raise BuildError(
            f"{block}: Input axons ({n_axons}) exceeded max ({MAX_IN_AXONS})")

    synapse_bits = sum(s.bits() for s in block.synapses)
    if synapse_bits > MAX_SYNAPSE_BITS:
        raise BuildError(
            f"{block}: Total synapse bits ({synapse_bits}) exceeded max "
            f"({MAX_SYNAPSE_BITS})")

    for synapse in block.synapses:
        validate_synapse(synapse)
예제 #11
0
    def __init__(self, network, precompute=False, remove_passthrough=True):
        self.network = network

        # subset of network: only nodes and ensembles;
        # probes are handled dynamically
        self._seen_objects = set()

        # subset of seen, marking which are run on the hardware;
        # those running on the host are "seen - chip"
        self._chip_objects = set()

        # Step 1. Place nodes on host
        self._seen_objects.update(network.all_nodes)

        # Step 2. Place all possible ensembles on chip
        # Note: assumes add_params already called by the simulator
        for ens in network.all_ensembles:
            if (network.config[ens].on_chip in (None, True)
                    and not isinstance(ens.neuron_type, Direct)):
                self._chip_objects.add(ens)
            self._seen_objects.add(ens)

        # Step 3. Move learning ensembles (post and error) to host
        for conn in network.all_connections:
            pre = base_obj(conn.pre)
            post = base_obj(conn.post)
            if (conn.learning_rule_type is not None
                    and isinstance(post, Ensemble)
                    and post in self._chip_objects):
                if network.config[post].on_chip:
                    raise BuildError("Post ensemble (%r) of learned "
                                     "connection (%r) must not be configured "
                                     "as on_chip." % (post, conn))
                self._chip_objects.remove(post)
            elif (isinstance(post, LearningRule)
                  and isinstance(pre, Ensemble)
                  and pre in self._chip_objects):
                if network.config[pre].on_chip:
                    raise BuildError("Pre ensemble (%r) of error "
                                     "connection (%r) must not be configured "
                                     "as on_chip." % (pre, conn))
                self._chip_objects.remove(pre)

        # Step 4. Mark passthrough nodes for removal
        if remove_passthrough:
            passthroughs = set(
                obj for obj in network.all_nodes if is_passthrough(obj))
            ignore = self._seen_objects - self._chip_objects - passthroughs
            self.passthrough = PassthroughSplit(network, ignore)
        else:
            self.passthrough = PassthroughSplit(None)

        # Step 5. Split precomputable parts of host
        # This is a subset of host, marking which are precomputable
        if precompute:
            self._host_precomputable_objects = self._preclosure()
        else:
            self._host_precomputable_objects = set()
예제 #12
0
    def scatter(self, dst, val, mode="update"):
        """Updates the base data corresponding to ``dst``.

        Parameters
        ----------
        dst : :class:`.TensorSignal`
            Signal indicating the data to be modified in base array
        val : ``tf.Tensor``
            Update data (same shape as ``dst``, i.e. a dense array <= the size
            of the base array)
        mode : "update" or "inc"
            Overwrite/add the data at ``dst`` with ``val``
        """

        if dst.tf_indices is None:
            raise BuildError("Indices for %s have not been loaded into "
                             "TensorFlow" % dst)
        # if not dst.minibatched:
        #     raise BuildError("Assigning to a trainable variable")
        if val.dtype.is_floating and val.dtype.base_dtype != self.dtype:
            raise BuildError("Tensor detected with wrong dtype (%s), should "
                             "be %s." % (val.dtype.base_dtype, self.dtype))

        # align val shape with dst base shape
        self.bases[dst.key].get_shape().assert_is_fully_defined()
        val.get_shape().assert_is_fully_defined()
        dst_shape = ((dst.shape[0], ) +
                     tuple(self.bases[dst.key].get_shape().as_list()[1:]))
        if val.get_shape() != dst_shape:
            val = tf.reshape(val, dst.tf_shape)

        logger.debug("scatter")
        logger.debug("values %s", val)
        logger.debug("dst %s", dst)
        logger.debug("indices %s", dst.indices)
        logger.debug("dst base %s", self.bases[dst.key])
        logger.debug("reads_by_base %s",
                     self.reads_by_base[self.bases[dst.key]])

        # make sure that any reads to the target signal happen before this
        # write (note: this is only any reads that have happened since the
        # last write, since each write changes the base array object)
        with tf.control_dependencies(self.reads_by_base[self.bases[dst.key]]):
            self.bases[dst.key] = self._scatter_f_var(dst, val, mode=mode)

        # update reads_by_base. the general workflow is
        # gather -> computation -> scatter
        # so when we get a scatter, we assume that that value indicates that
        # all the previous gathers are complete. so we block any writes to
        # those bases on the scatter value, to be sure that the
        # computation step is complete before the values can be overwritten
        for b in self.gather_bases:
            self.reads_by_base[b] += [self.bases[dst.key]]
        self.gather_bases = []

        logger.debug("new dst base %s", self.bases[dst.key])
예제 #13
0
 def on_chip(self, obj):
     if isinstance(obj, Probe):
         obj = base_obj(obj.target)
     if not isinstance(obj, (Ensemble, Node)):
         raise BuildError("Locations are only established for ensembles ",
                          "nodes, and probes -- not for %r" % (obj,))
     if obj not in self._seen_objects:
         raise BuildError("Object (%r) is not a part of the network"
                          % (obj,))
     return obj in self._chip_objects
예제 #14
0
 def on_chip(self, obj):
     if not isinstance(obj, (Ensemble, Node, Probe)):
         raise BuildError(
             "Locations are only established for ensembles ",
             "nodes, and probes -- not for %r" % (obj,),
         )
     if obj in self.chip_objs:
         return True
     elif obj in self.host_objs:
         return False
     raise BuildError("Object (%r) is not a part of the network" % (obj,))
예제 #15
0
def split_host_to_chip(networks, conn):
    dim = conn.size_out
    logger.debug("Creating ChipReceiveNode for %s", conn)
    receive = ChipReceiveNode(
        dim * 2, size_out=dim, add_to_container=False)
    networks.add(receive, "chip")
    receive2post = nengo.Connection(receive, conn.post,
                                    synapse=networks.node_tau,
                                    add_to_container=False)
    networks.add(receive2post, "chip")

    logger.debug("Creating NIF ensemble for %s", conn)
    if networks.node_neurons is None:
        raise BuildError(
            "DecodeNeurons must be specified for host->chip connection.")
    ens = networks.node_neurons.get_ensemble(dim)
    networks.add(ens, "host")

    if isinstance(conn.transform, Conv2D):
        raise BuildError(
            "Conv2D transforms not supported for off-chip to "
            "on-chip connections where `pre` is not a Neurons object.")

    # scale the input spikes based on the radius of the
    # target ensemble
    seed = networks.original.seed if conn.seed is None else conn.seed
    transform = nengo.dists.get_samples(
        conn.transform,
        n=conn.size_out,
        d=conn.size_mid,
        rng=np.random.RandomState(seed=seed))
    if isinstance(conn.post_obj, nengo.Ensemble):
        transform = transform / conn.post_obj.radius
    pre2ens = nengo.Connection(conn.pre, ens,
                               function=conn.function,
                               solver=conn.solver,
                               eval_points=conn.eval_points,
                               scale_eval_points=conn.scale_eval_points,
                               synapse=conn.synapse,
                               transform=transform,
                               add_to_container=False)
    networks.add(pre2ens, "host")

    logger.debug("Creating HostSendNode for %s", conn)
    send = HostSendNode(dim * 2, add_to_container=False)
    networks.add(send, "host")
    ensneurons2send = nengo.Connection(
        ens.neurons, send, synapse=None, add_to_container=False)
    networks.add(ensneurons2send, "host")
    networks.remove(conn)

    networks.host2chip_senders[send] = receive
예제 #16
0
def build_neurons(model, neurontype, neurons, input_sig=None, output_sig=None):
    """Builds a `.NeuronType` object into a model.

    This function adds a `.SimNeurons` operator connecting the input current to the
    neural output signals, and handles any additional state variables defined
    within the neuron type.

    Parameters
    ----------
    model : Model
        The model to build into.
    neurontype : NeuronType
        Neuron type to build.
    neuron : Neurons
        The neuron population object corresponding to the neuron type.

    Notes
    -----
    Does not modify ``model.params[]`` and can therefore be called
    more than once with the same `.NeuronType` instance.
    """
    input_sig = model.sig[neurons]["in"] if input_sig is None else input_sig
    output_sig = model.sig[neurons]["out"] if output_sig is None else output_sig

    dtype = input_sig.dtype
    n_neurons = neurons.size_in
    rng = np.random.RandomState(model.seeds[neurons.ensemble] + 1)
    state_init = neurontype.make_state(n_neurons, rng=rng, dtype=dtype)
    state = {}

    for key, init in state_init.items():
        if key in model.sig[neurons]:
            raise BuildError(
                f"State name '{key}' overlaps with existing signal name")
        if is_array_like(init):
            model.sig[neurons][key] = Signal(initial_value=init,
                                             name=f"{neurons}.{key}")
            state[key] = model.sig[neurons][key]
        elif isinstance(init, np.random.RandomState):
            # Pass through RandomState instances
            state[key] = init
        else:
            raise BuildError(
                f"State '{key}' is of type '{type(init).__name__}'. Only array-likes "
                "and RandomStates are currently supported.")

    model.add_op(
        SimNeurons(neurons=neurontype,
                   J=input_sig,
                   output=output_sig,
                   state=state))
예제 #17
0
    def _place_ensembles(self, network):
        """Place ensembles.

        Ensembles should go on the chip, unless:

        1. The user has specified they should not
        2. The ensemble is running in direct mode
        3. They are the ``post`` in a learned connection.
        4. They are the ``pre`` in a connection to a LearningRule
           (i.e., they provide the error signal for a learned connection).
        """

        # Enforce rules 1 and 2
        for ens in network.all_ensembles:
            if network.config[ens].on_chip is False or isinstance(
                ens.neuron_type, Direct
            ):
                self.host_objs.add(ens)
            else:
                self.chip_objs.add(ens)

        for conn in network.all_connections:
            pre, post = base_obj(conn.pre), base_obj(conn.post)

            # Enforce rule 3
            if (
                conn.learning_rule_type is not None
                and isinstance(post, Ensemble)
                and post in self.chip_objs
            ):
                if network.config[post].on_chip:
                    raise BuildError(
                        "Post ensemble (%r) of learned connection (%r) must not be "
                        "configured as on_chip." % (post, conn)
                    )
                self.host_objs.add(post)
                self.chip_objs.remove(post)

            # Enforce rule 4
            elif (
                isinstance(post, LearningRule)
                and isinstance(pre, Ensemble)
                and pre in self.chip_objs
            ):
                if network.config[pre].on_chip:
                    raise BuildError(
                        "Pre ensemble (%r) of error connection (%r) must not be "
                        "configured as on_chip." % (pre, conn)
                    )
                self.host_objs.add(pre)
                self.chip_objs.remove(pre)
예제 #18
0
    def get_prepost_signal(is_pre):
        target = conn.pre_obj if is_pre else conn.post_obj
        key = "out" if is_pre else "in"

        if target not in model.sig:
            raise BuildError("Building %s: the %r object %s is not in the "
                             "model, or has a size of zero." %
                             (conn, "pre" if is_pre else "post", target))
        if key not in model.sig[target]:
            raise BuildError(
                "Building %s: the %r object %s has a %r size of zero." %
                (conn, "pre" if is_pre else "post", target, key))

        return model.sig[target][key]
예제 #19
0
def build_neurons(model, neurontype, neurons):
    """Builds a `.NeuronType` object into a model.

    This function adds a `.SimNeurons` operator connecting the input current to the
    neural output signals, and handles any additional state variables defined
    within the neuron type.

    Parameters
    ----------
    model : Model
        The model to build into.
    neurontype : NeuronType
        Neuron type to build.
    neuron : Neurons
        The neuron population object corresponding to the neuron type.

    Notes
    -----
    Does not modify ``model.params[]`` and can therefore be called
    more than once with the same `.NeuronType` instance.
    """
    dtype = model.sig[neurons]["in"].dtype
    n_neurons = neurons.size_in
    rng = np.random.RandomState(model.seeds[neurons.ensemble] + 1)
    state_init = neurontype.make_state(n_neurons, rng=rng, dtype=dtype)
    state = {}

    for key, init in state_init.items():
        if key in model.sig[neurons]:
            raise BuildError("State name %r overlaps with existing signal name" % key)
        if is_array_like(init):
            model.sig[neurons][key] = Signal(
                initial_value=init, name="%s.%s" % (neurons, key)
            )
            state[key] = model.sig[neurons][key]
        elif isinstance(init, np.random.RandomState):
            # Pass through RandomState instances
            state[key] = init
        else:
            raise BuildError(
                "State %r is of type %r. Only array-likes and RandomStates are "
                "currently supported." % (key, type(init).__name__)
            )

    model.sig[neurons]["out"] = (
        state["spikes"] if neurontype.spiking else state["rates"]
    )
    model.add_op(
        SimNeurons(neurons=neurontype, J=model.sig[neurons]["in"], state=state)
    )
예제 #20
0
    def __init__(self, A, X, Y, tag=None):
        if X.ndim >= 2 and any(d > 1 for d in X.shape[1:]):
            raise BuildError("X must be a column vector")
        if Y.ndim >= 2 and any(d > 1 for d in Y.shape[1:]):
            raise BuildError("Y must be a column vector")

        self.A = A
        self.X = X
        self.Y = Y
        self.tag = tag

        self.sets = []
        self.incs = [Y]
        self.reads = [A, X]
        self.updates = []
예제 #21
0
    def get_prepost_signal(is_pre):
        target = conn.pre_obj if is_pre else conn.post_obj
        key = "out" if is_pre else "in"

        if target not in model.sig:
            raise BuildError(
                f"Building {conn}: the '{'pre' if is_pre else 'post'}' object {target} "
                "is not in the model, or has a size of zero.")
        signal = model.sig[target].get(key, None)
        if signal is None or signal.size == 0:
            raise BuildError(
                f"Building {conn}: the '{'pre' if is_pre else 'post'}' object {target} "
                f"has a '{key}' size of zero.")

        return signal
예제 #22
0
def reshape_dot(A, X, Y, tag=None):
    """Checks if the dot product needs to be reshaped.

    Also does a bunch of error checking based on the shapes of A and X.
    """
    badshape = False
    ashape = (1, ) if A.shape == () else A.shape
    xshape = (1, ) if X.shape == () else X.shape

    if A.shape == ():
        incshape = X.shape
    elif X.shape == ():
        incshape = A.shape
    elif X.ndim == 1:
        badshape = ashape[-1] != xshape[0]
        incshape = ashape[:-1]
    else:
        badshape = ashape[-1] != xshape[-2]
        incshape = ashape[:-1] + xshape[:-2] + xshape[-1:]

    if (badshape or incshape != Y.shape) and incshape != ():
        raise BuildError("shape mismatch in %s: %s x %s -> %s" %
                         (tag, A.shape, X.shape, Y.shape))

    # Reshape to handle case when np.dot(A, X) and Y are both scalars
    return (np.dot(A, X)).size == Y.size == 1
예제 #23
0
    def make_step(self, signals, dt, rng):
        src = signals[self.src]
        dst = signals[self.dst]
        src_slice = self.src_slice if self.src_slice is not None else Ellipsis
        dst_slice = self.dst_slice if self.dst_slice is not None else Ellipsis
        inc = self.inc

        # If there are repeated indices in dst_slice, special handling needed.
        repeats = False
        if npext.is_array_like(dst_slice):
            dst_slice = np.array(dst_slice)  # copy because we might modify it
            if dst_slice.dtype.kind != "b":
                # get canonical, positive indices first
                dst_slice[dst_slice < 0] += len(dst)
                repeats = len(np.unique(dst_slice)) < len(dst_slice)

        if inc and repeats:

            def step_copy():
                np.add.at(dst, dst_slice, src[src_slice])
        elif inc:

            def step_copy():
                dst[dst_slice] += src[src_slice]
        elif repeats:
            raise BuildError("%s: Cannot have repeated indices in "
                             "``dst_slice`` when copy is not an increment" %
                             self)
        else:

            def step_copy():
                dst[dst_slice] = src[src_slice]

        return step_copy
예제 #24
0
    def configure_lif(self, tau_rc=0.02, tau_ref=0.001, vth=1, dt=0.001,
                      min_voltage=0):
        """Configure these compartments as individual LIF neurons.

        Parameters
        ----------
        tau_rc : float
            Membrane time constant (in seconds) of the neurons.
        tau_ref : float
            Refractory period (in seconds) of the neurons.
        vth : float
            Voltage firing threshold of the neurons.
        dt : float
            Simulator time step length (in seconds).
        min_voltage : float
            The minimum voltage for the neurons.
        """

        self.decay_v[:] = -np.expm1(-dt / np.asarray(tau_rc))
        self.refract_delay[:] = np.round(tau_ref / dt) + 1
        self.vth[:] = vth
        self.vmin = min_voltage
        self.vmax = np.inf
        self.scale_v = np.all(self.decay_v > self.DECAY_SCALE_TH)
        if not self.scale_v:
            raise BuildError(
                "Voltage (V) scaling is required with LIF neurons. Perhaps "
                "the neuron tau_rc time constant is too large.")
예제 #25
0
def build_decoders(model, conn, rng, transform):
    encoders = model.params[conn.pre_obj].encoders
    gain = model.params[conn.pre_obj].gain
    bias = model.params[conn.pre_obj].bias

    eval_points = get_eval_points(model, conn, rng)
    targets = get_targets(conn, eval_points)

    x = np.dot(eval_points, encoders.T / conn.pre_obj.radius)
    E = None
    if conn.solver.weights:
        E = model.params[conn.post_obj].scaled_encoders.T[conn.post_slice]
        # include transform in solved weights
        targets = multiply(targets, transform.T)

    try:
        wrapped_solver = (model.decoder_cache.wrap_solver(solve_for_decoders)
                          if model.seeded[conn] else solve_for_decoders)
        decoders, solver_info = wrapped_solver(
            conn.solver, conn.pre_obj.neuron_type, gain, bias, x, targets,
            rng=rng, E=E)
    except BuildError:
        raise BuildError(
            "Building %s: 'activities' matrix is all zero for %s. "
            "This is because no evaluation points fall in the firing "
            "ranges of any neurons." % (conn, conn.pre_obj))

    weights = (decoders.T if conn.solver.weights else
               multiply(transform, decoders.T))
    return eval_points, weights, solver_info
예제 #26
0
def build_sparse(model, transform, sig_in, decoders=None, encoders=None, rng=np.random):
    """Build a `.Sparse` transform object."""

    if decoders is not None:
        raise BuildError(
            "Applying a sparse transform to a decoded connection is not supported"
        )

    # Shouldn't be possible for encoders to be non-None, since that only
    # occurs for a connection solver with weights=True, and those can only
    # be applied to decoded connections (which are disallowed above)
    assert encoders is None

    # Add output signal
    weighted = Signal(shape=transform.size_out, name="%s.weighted" % transform)
    model.add_op(Reset(weighted))

    weights = transform.sample(rng=rng)
    assert weights.ndim == 2

    # Add operator for applying weights
    weight_sig = Signal(weights, name="%s.weights" % transform, readonly=True)
    model.add_op(
        SparseDotInc(weight_sig, sig_in, weighted, tag="%s.apply_weights" % transform)
    )

    return weighted, weight_sig
예제 #27
0
파일: connection.py 프로젝트: Gracewx/nengo
def build_decoders(model, conn, rng):
    """Compute decoders for connection."""

    encoders = model.params[conn.pre_obj].encoders
    gain = model.params[conn.pre_obj].gain
    bias = model.params[conn.pre_obj].bias

    eval_points = get_eval_points(model, conn, rng)
    targets = get_targets(conn, eval_points, dtype=rc.float_dtype)

    if conn.solver.weights and not conn.solver.compositional:
        # solver is solving for the whole weight matrix, so apply
        # transform/encoders to targets
        if not isinstance(conn.transform, Dense):
            raise BuildError(
                "Non-compositional solvers only work with Dense transforms")
        transform = conn.transform.sample(rng=rng)
        targets = np.dot(targets, transform.T)
        # weight solvers only allowed on ensemble->ensemble connections
        assert isinstance(conn.post_obj, Ensemble)
        post_enc = model.params[conn.post_obj].scaled_encoders
        targets = np.dot(targets, post_enc.T[conn.post_slice])

    x = np.dot(eval_points, encoders.T / conn.pre_obj.radius)
    wrapped_solver = (model.decoder_cache.wrap_solver(solve_for_decoders)
                      if model.seeded[conn] else solve_for_decoders)
    decoders, solver_info = wrapped_solver(conn,
                                           gain,
                                           bias,
                                           x,
                                           targets,
                                           rng=rng)

    return eval_points, decoders.T, solver_info
예제 #28
0
def build_convolution(
    model, transform, sig_in, decoders=None, encoders=None, rng=np.random
):
    """Build a `.Convolution` transform object."""

    if decoders is not None:
        raise BuildError(
            "Applying a convolution transform to a decoded "
            "connection is not supported"
        )

    # Shouldn't be possible for encoders to be non-None, since that only
    # occurs for a connection solver with weights=True, and those can only
    # be applied to decoded connections (which are disallowed above)
    assert encoders is None

    weights = transform.sample(rng=rng)
    weight_sig = Signal(weights, readonly=True, name="%s.weights" % transform)
    weighted = Signal(shape=transform.size_out, name="%s.weighted" % transform)
    model.add_op(Reset(weighted))

    model.add_op(
        ConvInc(
            weight_sig, sig_in, weighted, transform, tag="%s.apply_weights" % transform
        )
    )

    return weighted, weight_sig
예제 #29
0
def build_node(model, node):
    # input signal
    if not is_array_like(node.output) and node.size_in > 0:
        sig_in = Signal(np.zeros(node.size_in), name="%s.in" % node)
        model.add_op(Reset(sig_in))
    else:
        sig_in = None

    # Provide output
    if node.output is None:
        sig_out = sig_in
    elif isinstance(node.output, Process):
        sig_out = Signal(np.zeros(node.size_out), name="%s.out" % node)
        model.build(node.output, sig_in, sig_out)
    elif callable(node.output):
        sig_out = (Signal(np.zeros(node.size_out), name="%s.out" %
                          node) if node.size_out > 0 else None)
        model.add_op(
            SimPyFunc(output=sig_out, fn=node.output, t=model.time, x=sig_in))
    elif is_array_like(node.output):
        sig_out = Signal(node.output, name="%s.out" % node)
    else:
        raise BuildError("Invalid node output type %r" %
                         node.output.__class__.__name__)

    model.sig[node]['in'] = sig_in
    model.sig[node]['out'] = sig_out
    model.params[node] = None
예제 #30
0
def build_host_to_learning_rule(model, conn):
    if not is_transform_type(conn.transform, ("Dense", "NoTransform")):
        # TODO: What needs to be done to support this? It looks like it should just work
        raise BuildError(
            f"{conn}: nengo-loihi does not yet support "
            f"'{type(conn.transform).__name__}' transforms on host to chip "
            "learning rule connections"
        )

    dim = conn.size_out
    host = model.host_model(base_obj(conn.pre))

    logger.debug("Creating HostSendNode for %s", conn)
    send = HostSendNode(
        dim,
        label=None if conn.label is None else "%s_send" % conn.label,
        add_to_container=False,
    )
    host.build(send)

    pre2send = Connection(
        conn.pre,
        send,
        function=conn.function,
        solver=conn.solver,
        eval_points=conn.eval_points,
        scale_eval_points=conn.scale_eval_points,
        synapse=conn.synapse,
        transform=conn.transform,
        label=conn.label,
        add_to_container=False,
    )
    model.host2chip_pes_senders[send] = model.needs_sender[conn.post_obj]
    _inherit_seed(host, pre2send, model, conn)
    host.build(pre2send)