Exemple #1
0
    def extend(self, keys, unitary=False):
        """Extends the vocabulary with additional keys.

        Creates and adds the semantic pointers listed in keys to the
        vocabulary.

        Parameters
        ----------
        keys : list
            List of semantic pointer names to be added to the vocabulary.
        unitary : bool or list, optional
            If True, all generated pointers will be unitary. If a list of
            strings, any pointer whose name is on the list will be forced to
            be unitary when created.
        """
        if is_iterable(unitary):
            if is_iterable(self.unitary):
                self.unitary.extend(unitary)
            else:
                self.unitary = list(unitary)
        elif unitary:
            if is_iterable(self.unitary):
                self.unitary.extend(keys)
            else:
                self.unitary = list(keys)

        for key in keys:
            if key not in self.keys:
                self[key]
Exemple #2
0
    def extend(self, keys, unitary=False):
        """Extends the vocabulary with additional keys.

        Creates and adds the semantic pointers listed in keys to the
        vocabulary.

        Parameters
        ----------
        keys : list
            List of semantic pointer names to be added to the vocabulary.
        unitary : bool or list, optional (Default: False)
            If True, all generated pointers will be unitary. If a list of
            strings, any pointer whose name is on the list will be forced to
            be unitary when created.
        """
        if is_iterable(unitary):
            if is_iterable(self.unitary):
                self.unitary.extend(unitary)
            else:
                self.unitary = list(unitary)
        elif unitary:
            if is_iterable(self.unitary):
                self.unitary.extend(keys)
            else:
                self.unitary = list(keys)

        for key in keys:
            if key not in self.keys:
                self[key]
Exemple #3
0
    def add_input_mapping(self, name, input_vectors, input_scales=1.0):
        """Adds a set of input vectors to the associative memory network.

        Creates a transform with the given input vectors between the
        a named input node and associative memory element input to enable the
        inputs to be mapped onto ensembles of the Associative Memory.

        Parameters
        ----------
        name: str
            Name to use for the input node. This name will be used as the name
            of the attribute for the associative memory network.
        input_vectors: array_like
            The list of vectors to be compared against.
        input_scales: float or array_like, optional
            Scaling factor to apply on each of the input vectors. Note that it
            is possible to scale each vector independently.
        """
        # --- Put arguments in canonical form
        n_vectors, d_vectors = input_vectors.shape
        if is_iterable(input_vectors):
            input_vectors = np.array(input_vectors, ndmin=2)
        if not is_iterable(input_scales):
            input_scales = input_scales * np.ones((1, n_vectors))
        else:
            input_scales = np.array(input_scales, ndmin=2)

        # --- Check some preconditions
        if input_scales.shape[1] != n_vectors:
            raise ValidationError(
                "Number of input_scale values (%d) does not "
                "match number of input vectors (%d)." %
                (input_scales.shape[1], n_vectors),
                attr="input_scales",
            )
        if hasattr(self, name):
            raise ValidationError(
                "Name '%s' already exists as a node in the "
                "associative memory." % name,
                attr="name",
            )

        # --- Finally, make the input node and connect it
        in_node = Node(size_in=d_vectors, label=name)
        setattr(self, name, in_node)
        Connection(
            in_node,
            self.elem_input,
            synapse=None,
            transform=input_vectors * input_scales.T,
        )
 def simplify(self):
     is_num = lambda x: isinstance(x, NumExp)
     if isinstance(self.fn, str):
         return self  # cannot simplify
     elif all(map(is_num, self.args)):
         # simplify scalar function
         return NumExp(self.fn(*[a.value for a in self.args]))
     elif all(
             is_num(a) or is_iterable(a) and all(map(is_num, a))
             for a in self.args):
         # simplify vector function
         return NumExp(
             self.fn([[aa.value for aa in a] if is_iterable(a) else a.value
                      for a in self.args]))
     else:
         return self  # cannot simplify
Exemple #5
0
 def coerce(self, instance, rule):
     if is_iterable(rule):
         for r in (rule.values() if isinstance(rule, dict) else rule):
             self.check_rule(instance, r)
     elif rule is not None:
         self.check_rule(instance, rule)
     return super().coerce(instance, rule)
Exemple #6
0
 def coerce(self, instance, rule):  # pylint: disable=arguments-renamed
     if is_iterable(rule):
         for r in rule.values() if isinstance(rule, dict) else rule:
             self.check_rule(instance, r)
     elif rule is not None:
         self.check_rule(instance, rule)
     return super().coerce(instance, rule)
Exemple #7
0
 def coerce(self, instance, rule):
     if is_iterable(rule):
         for r in (rule.values() if isinstance(rule, dict) else rule):
             self.check_rule(instance, r)
     elif rule is not None:
         self.check_rule(instance, rule)
     return super().coerce(instance, rule)
    def __getitem__(self, item):
        if isinstance(item, slice):
            item = np.arange(len(self))[item]

        if is_iterable(item):
            rval = self.__class__.__new__(self.__class__)
            rval.starts = [self.starts[i] for i in item]
            rval.shape0s = [self.shape0s[i] for i in item]
            rval.shape1s = [self.shape1s[i] for i in item]
            rval.stride0s = [self.stride0s[i] for i in item]
            rval.stride1s = [self.stride1s[i] for i in item]
            rval.buf = self.buf
            rval.names = [self.names[i] for i in item]
            return rval
        else:
            if isinstance(item, np.ndarray):
                item.shape = ()  # avoid numpy DeprecationWarning

            itemsize = self.dtype.itemsize
            shape = (self.shape0s[item], self.shape1s[item])
            byteoffset = itemsize * self.starts[item]
            bytestrides = (
                itemsize * self.stride0s[item],
                itemsize * self.stride1s[item],
            )
            return np.ndarray(
                shape=shape,
                dtype=self.dtype,
                buffer=self.buf.data,
                offset=byteoffset,
                strides=bytestrides,
            )
Exemple #9
0
    def add_output_mapping(self, name, output_vectors):
        """Adds another output to the associative memory network.

        Creates a transform with the given output vectors between the
        associative memory element output and a named output node to enable the
        selection of output vectors by the associative memory.

        Parameters
        ----------
        name: str
            Name to use for the output node. This name will be used as
            the name of the attribute for the associative memory network.
        output_vectors: array_like
            The list of vectors to be produced for each match.
        """
        # --- Put arguments in canonical form
        if is_iterable(output_vectors):
            output_vectors = np.array(output_vectors, ndmin=2)

        # --- Check preconditions
        if hasattr(self, name):
            raise ValidationError("Name '%s' already exists as a node in the "
                                  "associative memory." % name, attr='name')

        # --- Make the output node and connect it
        output = nengo.Node(size_in=output_vectors.shape[1], label=name)
        setattr(self, name, output)

        if self.thresh_ens is not None:
            c = nengo.Connection(self.thresh_ens.output, output,
                                 synapse=None, transform=output_vectors.T)
        else:
            c = nengo.Connection(self.elem_output, output,
                                 synapse=None, transform=output_vectors.T)
        self.out_conns.append(c)
Exemple #10
0
    def __init__(
        self,
        target=None,
        key=None,
        slice=None,
        weights=None,
        synapse=None,
        reindexing=None,
    ):
        self.key = key
        self.synapse = synapse

        iterable_target = is_iterable(target)
        self.target = ([] if target is None else
                       list(target) if iterable_target else [target])
        # targets can be LoihiBlock or None. `Model.add_probe` checks Nones are filled.
        assert all(
            isinstance(t, (LoihiBlock, type(None))) for t in self.target)

        self.slice = (
            [self._slice(None)
             for _ in self.target] if slice is None else slice if
            iterable_target  # a single `slice` can be e.g. a list, so use target
            else [slice])
        assert len(self.slice) == len(self.target)

        self.weights = (
            [None for _ in self.target] if weights is None else
            [np.asarray(w) if w is not None else None
             for w in weights] if iterable_target else [np.asarray(weights)])
        assert len(self.weights) == len(self.target)

        self.reindexing = reindexing
    def getitem_device(self, item):
        if isinstance(item, slice):
            item = np.arange(len(self))[item]

        if is_iterable(item):
            return CLRaggedArray.from_buffer(
                self.queue,
                self.cl_buf,
                self.starts[item],
                self.shape0s[item],
                self.shape1s[item],
                self.stride0s[item],
                self.stride1s[item],
                names=[self.names[i] for i in item],
            )
        else:
            s = self.dtype.itemsize
            return Array(
                self.queue,
                (self.shape0s[item], self.shape1s[item]),
                self.dtype,
                strides=(self.stride0s[item] * s, self.stride1s[item] * s),
                data=self.cl_buf.data,
                offset=self.starts[item] * s,
            )
Exemple #12
0
    def add_output_mapping(self, name, output_vectors):
        """Adds another output to the associative memory network.

        Creates a transform with the given output vectors between the
        associative memory element output and a named output node to enable the
        selection of output vectors by the associative memory.

        Parameters
        ----------
        name: str
            Name to use for the output node. This name will be used as
            the name of the attribute for the associative memory network.
        output_vectors: array_like
            The list of vectors to be produced for each match.
        """
        # --- Put arguments in canonical form
        if is_iterable(output_vectors):
            output_vectors = np.array(output_vectors, ndmin=2)

        # --- Check preconditions
        if hasattr(self, name):
            raise ValidationError("Name '%s' already exists as a node in the "
                                  "associative memory." % name, attr='name')

        # --- Make the output node and connect it
        output = nengo.Node(size_in=output_vectors.shape[1], label=name)
        setattr(self, name, output)

        if self.thresh_ens is not None:
            c = nengo.Connection(self.thresh_ens.output, output,
                                 synapse=None, transform=output_vectors.T)
        else:
            c = nengo.Connection(self.elem_output, output,
                                 synapse=None, transform=output_vectors.T)
        self.out_conns.append(c)
Exemple #13
0
 def function_args(self, conn, function):
     x = (
         conn.eval_points[0]
         if is_iterable(conn.eval_points)
         else np.zeros(conn.size_in)
     )
     return (x,)
    def __init__(self, fn, in_dims=None, out_dim=None):
        if in_dims is not None and not is_iterable(in_dims):
            in_dims = [in_dims]

        self.fn = fn
        self.in_dims = in_dims
        self.out_dim = out_dim
        self._translator = None
Exemple #15
0
    def __init__(self,
                 n_neurons,
                 n_ensembles,
                 ens_dimensions=1,
                 neuron_nodes=False,
                 label=None,
                 seed=None,
                 add_to_container=None,
                 **ens_kwargs):
        if "dimensions" in ens_kwargs:
            raise ValidationError(
                "'dimensions' is not a valid argument to EnsembleArray. "
                "To set the number of ensembles, use 'n_ensembles'. To set "
                "the number of dimensions per ensemble, use 'ens_dimensions'.",
                attr='dimensions',
                obj=self)

        super().__init__(label, seed, add_to_container)

        for param in ens_kwargs:
            if is_iterable(ens_kwargs[param]):
                ens_kwargs[param] = nengo.dists.Samples(ens_kwargs[param])

        self.config[nengo.Ensemble].update(ens_kwargs)

        label_prefix = "" if label is None else label + "_"

        self.n_neurons_per_ensemble = n_neurons
        self.n_ensembles = n_ensembles
        self.dimensions_per_ensemble = ens_dimensions

        # These may be set in add_neuron_input and add_neuron_output
        self.neuron_input, self.neuron_output = None, None

        self.ea_ensembles = []

        with self:
            self.input = nengo.Node(size_in=self.dimensions, label="input")

            for i in range(n_ensembles):
                e = nengo.Ensemble(n_neurons,
                                   self.dimensions_per_ensemble,
                                   label="%s%d" % (label_prefix, i))
                nengo.Connection(self.input[i * ens_dimensions:(i + 1) *
                                            ens_dimensions],
                                 e,
                                 synapse=None)
                self.ea_ensembles.append(e)

        if neuron_nodes:
            self.add_neuron_input()
            self.add_neuron_output()
            warnings.warn(
                "'neuron_nodes' argument will be removed in Nengo 2.2. Use "
                "'add_neuron_input' and 'add_neuron_output' methods instead.",
                DeprecationWarning)

        self.add_output('output', function=None)
Exemple #16
0
 def probe(cls, x):
     """Returns a function that consumes a simulator and produces probe data."""
     # Probes are created eagerly, but the actual probing is done lazily
     # through recursive calls that inject the simulator object.
     if is_iterable(x):
         p_funcs = tuple(cls.probe(xi) for xi in x)
         return lambda sim: tuple(p_func(sim) for p_func in p_funcs)
     p = nengo.Probe(x)
     return lambda sim: sim.data[p]
Exemple #17
0
    def add_input_mapping(self, name, input_vectors, input_scales=1.0):
        """Adds a set of input vectors to the associative memory network.

        Creates a transform with the given input vectors between the
        a named input node and associative memory element input to enable the
        inputs to be mapped onto ensembles of the Associative Memory.

        Parameters
        ----------
        name: str
            Name to use for the input node. This name will be used as the name
            of the attribute for the associative memory network.
        input_vectors: array_like
            The list of vectors to be compared against.
        input_scales: float or array_like, optional (Default: 1.0)
            Scaling factor to apply on each of the input vectors. Note that it
            is possible to scale each vector independently.
        """
        # --- Put arguments in canonical form
        n_vectors, d_vectors = input_vectors.shape
        if is_iterable(input_vectors):
            input_vectors = np.array(input_vectors, ndmin=2)
        if not is_iterable(input_scales):
            input_scales = input_scales * np.ones((1, n_vectors))
        else:
            input_scales = np.array(input_scales, ndmin=2)

        # --- Check some preconditions
        if input_scales.shape[1] != n_vectors:
            raise ValidationError("Number of input_scale values (%d) does not "
                                  "match number of input vectors (%d)."
                                  % (input_scales.shape[1], n_vectors),
                                  attr='input_scales')
        if hasattr(self, name):
            raise ValidationError("Name '%s' already exists as a node in the "
                                  "associative memory." % name, attr='name')

        # --- Finally, make the input node and connect it
        in_node = nengo.Node(size_in=d_vectors, label=name)
        setattr(self, name, in_node)
        nengo.Connection(in_node, self.elem_input,
                         synapse=None,
                         transform=input_vectors * input_scales.T)
Exemple #18
0
    def __init__(
        self,
        n_neurons,
        n_ensembles,
        ens_dimensions=1,
        label=None,
        seed=None,
        add_to_container=None,
        **ens_kwargs,
    ):
        if "dimensions" in ens_kwargs:
            raise ValidationError(
                "'dimensions' is not a valid argument to EnsembleArray. "
                "To set the number of ensembles, use 'n_ensembles'. To set "
                "the number of dimensions per ensemble, use 'ens_dimensions'.",
                attr="dimensions",
                obj=self,
            )

        super().__init__(label, seed, add_to_container)

        for param, value in ens_kwargs.items():
            if is_iterable(value):
                ens_kwargs[param] = Samples(value)

        self.config[Ensemble].update(ens_kwargs)

        label_prefix = "" if label is None else label + "_"

        self.n_neurons_per_ensemble = n_neurons
        self.n_ensembles = n_ensembles
        self.dimensions_per_ensemble = ens_dimensions

        # These may be set in add_neuron_input and add_neuron_output
        self.neuron_input, self.neuron_output = None, None

        self.ea_ensembles = []

        with self:
            self.input = Node(size_in=self.dimensions, label="input")

            for i in range(n_ensembles):
                e = Ensemble(
                    n_neurons,
                    self.dimensions_per_ensemble,
                    label=f"{label_prefix}{i}",
                )
                Connection(
                    self.input[i * ens_dimensions : (i + 1) * ens_dimensions],
                    e,
                    synapse=None,
                )
                self.ea_ensembles.append(e)

        self.add_output("output", function=None)
Exemple #19
0
    def __init__(self, n_neurons, n_ensembles, ens_dimensions=1,
                 neuron_nodes=False, label=None, seed=None,
                 add_to_container=None, **ens_kwargs):
        if "dimensions" in ens_kwargs:
            raise ValidationError(
                "'dimensions' is not a valid argument to EnsembleArray. "
                "To set the number of ensembles, use 'n_ensembles'. To set "
                "the number of dimensions per ensemble, use 'ens_dimensions'.",
                attr='dimensions', obj=self)

        super().__init__(label, seed, add_to_container)

        for param in ens_kwargs:
            if is_iterable(ens_kwargs[param]):
                ens_kwargs[param] = nengo.dists.Samples(ens_kwargs[param])

        self.config[nengo.Ensemble].update(ens_kwargs)

        label_prefix = "" if label is None else label + "_"

        self.n_neurons_per_ensemble = n_neurons
        self.n_ensembles = n_ensembles
        self.dimensions_per_ensemble = ens_dimensions

        # These may be set in add_neuron_input and add_neuron_output
        self.neuron_input, self.neuron_output = None, None

        self.ea_ensembles = []

        with self:
            self.input = nengo.Node(size_in=self.dimensions, label="input")

            for i in range(n_ensembles):
                e = nengo.Ensemble(n_neurons, self.dimensions_per_ensemble,
                                   label="%s%d" % (label_prefix, i))
                nengo.Connection(self.input[i * ens_dimensions:
                                            (i + 1) * ens_dimensions],
                                 e, synapse=None)
                self.ea_ensembles.append(e)

        if neuron_nodes:
            self.add_neuron_input()
            self.add_neuron_output()
            warnings.warn(
                "'neuron_nodes' argument will be removed in Nengo 2.2. Use "
                "'add_neuron_input' and 'add_neuron_output' methods instead.",
                DeprecationWarning)

        self.add_output('output', function=None)
    def _broadcast_args(self, func, args):
        """Apply 'func' element-wise to lists of args"""
        as_list = lambda x: list(x) if is_iterable(x) else [x]
        args = list(map(as_list, args))
        arg_lens = list(map(len, args))
        max_len = max(arg_lens)
        assert all(n in [0, 1, max_len] for n in arg_lens), (
            "Could not broadcast arguments with lengths %s" % arg_lens)

        result = [
            func(*[a[i] if len(a) > 1 else a[0] for a in args])
            for i in range(max_len)
        ]
        result = [r.simplify() for r in result]
        return result[0] if len(result) == 1 else result
 def visit_Return(self, expr):
     value = self.visit(expr.value)
     if is_iterable(value):
         self._check_vector_length(len(value))
         if not all(isinstance(v, Expression) for v in value):
             raise ValueError(
                 "Can only return a list of mathematical expressions")
         return [
             "%s[%d] = %s;" % (OUTPUT_NAME, i, v.to_ocl())
             for i, v in enumerate(value)
         ] + ["return;"]
     elif isinstance(value, Expression):
         return ["%s[0] = %s;" % (OUTPUT_NAME, value.to_ocl()), "return;"]
     else:
         raise ValueError("Can only return mathematical expressions, "
                          "or lists of expressions")
    def __setitem__(self, item, new_value):
        if isinstance(item, slice) or is_iterable(item):
            raise NotImplementedError("TODO")
        else:
            m, n = self.shape0s[item], self.shape1s[item]
            sm, sn = self.stride0s[item], self.stride1s[item]

            if (sm, sn) in [(1, m), (n, 1)]:
                # contiguous
                clarray = self.getitem_device(item)
                if isinstance(new_value, np.ndarray):
                    array = np.asarray(new_value, order="C", dtype=self.dtype)
                else:
                    array = np.zeros(clarray.shape, dtype=clarray.dtype)
                    array[...] = new_value

                array.shape = clarray.shape  # reshape to avoid warning
                assert equal_strides(array.strides, clarray.strides,
                                     clarray.shape)
                clarray.set(array)
            else:
                # discontiguous
                #   Copy a contiguous region off the device that surrounds the
                #   discontiguous, set the appropriate values, and copy back
                s = self.starts[item]
                array = to_host(
                    self.queue,
                    self.cl_buf.data,
                    self.dtype,
                    s,
                    (m, n),
                    (sm, sn),
                    is_blocking=True,
                )
                array[...] = new_value

                buf = array.base if array.base is not None else array
                bytestart = self.dtype.itemsize * s
                cl.enqueue_copy(
                    self.queue,
                    self.cl_buf.data,
                    buf,
                    device_offset=bytestart,
                    is_blocking=True,
                )
Exemple #23
0
def lti(u, system, state=lambda x: x, dt=0.001, method="zoh"):
    r"""Operator that solves \dot{x} = A.state(x) + B.u. where A, B = system.

    The state parameter can be any callable function that consumes a Stimulus operator
    and produces a Gyrus operator that consumes said operator as an input. For instance,
    nonlinear dynamical systems may be implemented by specifying a nonlinear function
    for the ``state``.
    """
    if not is_iterable(system) or len(system) != 2:
        raise NotImplementedError(
            f"lti currently only supports systems as two-tuples: (A, B); not {system}"
        )

    # Reshape and validate A, B matrices.
    A, B = system
    A = np.asarray(A)
    if A.ndim != 2 or A.shape[0] != A.shape[1]:
        raise ValueError(f"A ({A}) must be a square matrix")
    size_out = A.shape[0]
    B = np.asarray(B)
    if B.ndim == 1:
        B = B[:, None]
    if B.ndim != 2:
        raise ValueError(f"B ({B}) must be 1D or 2D, but is {B.ndim}")
    if B.shape[0] != size_out:
        raise ValueError(
            f"B ({B}) must be an array of length {size_out}, not {B.shape[0]}")
    C = np.zeros_like(B).T
    D = 0

    # Discretize the dynamical system, \dot{x} = Ax + Bu, to
    # x[t + dt] = Ax[t] + Bu[t], using some method (ZOH recommended for stability).
    Abar, Bbar, _, _, _ = cont2discrete((A, B, C, D), dt=dt, method=method)

    # Apply Voelker (2019) equation 5.30 with H(z) = dt / (z - 1).
    # This compensates for the discretized integrator such that the resulting
    # system is the one that was requested. In this particular case (with the
    # synapse being the discretized integrator) this reduces to the inverse
    # of Euler's method.
    Amap = (Abar - np.eye(len(Abar))) / dt
    Bmap = Bbar / dt

    # Finally express the Amap, Bmap system using vectorized Gyrus operators.
    return u.transform(Bmap).integrate(
        integrand=lambda x: state(x).transform(Amap))
 def __getitem__(self, item):
     """
     Getting one item returns a numpy array (on the host).
     Getting multiple items returns a view into the device.
     """
     if is_iterable(item):
         return self.getitem_device(item)
     else:
         buf = to_host(
             self.queue,
             self.cl_buf.data,
             self.dtype,
             self.starts[item],
             (self.shape0s[item], self.shape1s[item]),
             (self.stride0s[item], self.stride1s[item]),
         )
         buf.setflags(write=False)
         return buf
Exemple #25
0
    def learning_rule(self):
        """(LearningRule or iterable) Connectable learning rule object(s)."""
        if self.learning_rule_type is None:
            return None

        types = self.learning_rule_type
        if isinstance(types, dict):
            learning_rule = type(types)()  # dict of same type
            for k, v in types.items():
                learning_rule[k] = LearningRule(self, v)
        elif is_iterable(types):
            learning_rule = [LearningRule(self, v) for v in types]
        elif isinstance(types, LearningRuleType):
            learning_rule = LearningRule(self, types)
        else:
            assert False, "Validation should catch this"

        return learning_rule
Exemple #26
0
    def __getitem__(self, key):
        """Return the semantic pointer with the requested name.

        If one does not exist, automatically create one. The key must be
        a valid semantic pointer name, which is any Python identifier starting
        with a capital letter.
        """
        if not key[0].isupper():
            raise SpaParseError(
                "Semantic pointers must begin with a capital letter.")
        value = self.pointers.get(key, None)
        if value is None:
            if is_iterable(self.unitary):
                unitary = key in self.unitary
            else:
                unitary = self.unitary
            value = self.create_pointer(unitary=unitary)
            self.add(key, value)
        return value
Exemple #27
0
    def __getitem__(self, key):
        """Return the semantic pointer with the requested name.

        If one does not exist, automatically create one. The key must be
        a valid semantic pointer name, which is any Python identifier starting
        with a capital letter.
        """
        if not key[0].isupper():
            raise SpaParseError(
                "Semantic pointers must begin with a capital letter.")
        value = self.pointers.get(key, None)
        if value is None:
            if is_iterable(self.unitary):
                unitary = key in self.unitary
            else:
                unitary = self.unitary
            value = self.create_pointer(unitary=unitary)
            self.add(key, value)
        return value
Exemple #28
0
    def learning_rule(self):
        """(LearningRule or iterable) Connectable learning rule object(s)."""
        if self.learning_rule_type is None:
            return None

        types = self.learning_rule_type
        if isinstance(types, dict):
            learning_rule = type(types)()  # dict of same type
            for k, v in types.items():
                learning_rule[k] = LearningRule(self, v)
        elif is_iterable(types):
            learning_rule = [LearningRule(self, v) for v in types]
        elif isinstance(types, LearningRuleType):
            learning_rule = LearningRule(self, types)
        else:
            raise ValidationError("Invalid type %r" % type(types).__name__,
                                  attr='learning_rule_type',
                                  obj=self)

        return learning_rule
Exemple #29
0
    def learning_rule(self):
        """(LearningRule or iterable) Connectable learning rule object(s)."""
        if self.learning_rule_type is None:
            return None

        types = self.learning_rule_type
        if isinstance(types, dict):
            learning_rule = type(types)()  # dict of same type
            for k, v in types.items():
                learning_rule[k] = LearningRule(self, v)
        elif is_iterable(types):
            learning_rule = [LearningRule(self, v) for v in types]
        elif isinstance(types, LearningRuleType):
            learning_rule = LearningRule(self, types)
        else:
            raise ValidationError(
                "Invalid type %r" % type(types).__name__,
                attr='learning_rule_type', obj=self)

        return learning_rule
Exemple #30
0
def similarity(data, vocab, normalize=False):
    """Return the similarity between some data and the vocabulary.

    Computes the dot products between all data vectors and each
    vocabulary vector. If ``normalize=True``, normalizes all vectors
    to compute the cosine similarity.

    Parameters
    ----------
    data: array_like
        The data used for comparison.
    vocab: Vocabulary or array_like
        Vocabulary (or list of vectors) to use to calculate
        the similarity values.
    normalize : bool, optional (Default: False)
        Whether to normalize all vectors, to compute the cosine similarity.
    """
    from nengo.spa.vocab import Vocabulary

    if isinstance(vocab, Vocabulary):
        vectors = vocab.vectors
    elif npext.is_iterable(vocab):
        vectors = np.array(vocab, copy=False, ndmin=2)
    else:
        raise ValidationError("%r object is not a valid vocabulary" %
                              (type(vocab).__name__),
                              attr='vocab')

    data = np.array(data, copy=False, ndmin=2)
    dots = np.dot(data, vectors.T)

    if normalize:
        # Zero-norm vectors should return zero, so avoid divide-by-zero error
        eps = np.nextafter(0, 1)  # smallest float above zero
        dnorm = np.maximum(npext.norm(data, axis=1, keepdims=True), eps)
        vnorm = np.maximum(npext.norm(vectors, axis=1, keepdims=True), eps)

        dots /= dnorm
        dots /= vnorm.T

    return dots
Exemple #31
0
def similarity(data, vocab, normalize=False):
    """Return the similarity between some data and the vocabulary.

    Computes the dot products between all data vectors and each
    vocabulary vector. If ``normalize=True``, normalizes all vectors
    to compute the cosine similarity.

    Parameters
    ----------
    data: array_like
        The data used for comparison.
    vocab: Vocabulary or array_like
        Vocabulary (or list of vectors) to use to calculate
        the similarity values.
    normalize : bool, optional (Default: False)
        Whether to normalize all vectors, to compute the cosine similarity.
    """
    from nengo.spa.vocab import Vocabulary

    if isinstance(vocab, Vocabulary):
        vectors = vocab.vectors
    elif npext.is_iterable(vocab):
        vectors = np.array(vocab, copy=False, ndmin=2)
    else:
        raise ValidationError("%r object is not a valid vocabulary"
                              % (type(vocab).__name__), attr='vocab')

    data = np.array(data, copy=False, ndmin=2)
    dots = np.dot(data, vectors.T)

    if normalize:
        # Zero-norm vectors should return zero, so avoid divide-by-zero error
        eps = np.nextafter(0, 1)  # smallest float above zero
        dnorm = np.maximum(npext.norm(data, axis=1, keepdims=True), eps)
        vnorm = np.maximum(npext.norm(vectors, axis=1, keepdims=True), eps)

        dots /= dnorm
        dots /= vnorm.T

    return dots
Exemple #32
0
def build_connection(model, conn):
    """Builds a `.Connection` object into a model.

    A brief summary of what happens in the connection build process,
    in order:

    1. Solve for decoders.
    2. Combine transform matrix with decoders to get weights.
    3. Add operators for computing the function
       or multiplying neural activity by weights.
    4. Call build function for the synapse.
    5. Call build function for the learning rule.
    6. Add operator for applying learning rule delta to weights.

    Some of these steps may be altered or omitted depending on the parameters
    of the connection, in particular the pre and post types.

    Parameters
    ----------
    model : Model
        The model to build into.
    conn : Connection
        The connection to build.

    Notes
    -----
    Sets ``model.params[conn]`` to a `.BuiltConnection` instance.
    """

    # Create random number generator
    rng = np.random.RandomState(model.seeds[conn])

    # Get input and output connections from pre and post
    def get_prepost_signal(is_pre):
        target = conn.pre_obj if is_pre else conn.post_obj
        key = "out" if is_pre else "in"

        if target not in model.sig:
            raise BuildError("Building %s: the %r object %s is not in the "
                             "model, or has a size of zero." %
                             (conn, "pre" if is_pre else "post", target))
        signal = model.sig[target].get(key, None)
        if signal is None or signal.size == 0:
            raise BuildError(
                "Building %s: the %r object %s has a %r size of zero." %
                (conn, "pre" if is_pre else "post", target, key))

        return signal

    model.sig[conn]["in"] = get_prepost_signal(is_pre=True)
    model.sig[conn]["out"] = get_prepost_signal(is_pre=False)

    decoders = None
    encoders = None
    eval_points = None
    solver_info = None
    post_slice = conn.post_slice

    # Figure out the signal going across this connection
    in_signal = model.sig[conn]["in"]
    if isinstance(conn.pre_obj,
                  Node) or (isinstance(conn.pre_obj, Ensemble)
                            and isinstance(conn.pre_obj.neuron_type, Direct)):
        # Node or Decoded connection in directmode
        sliced_in = slice_signal(model, in_signal, conn.pre_slice)
        if conn.function is None:
            in_signal = sliced_in
        elif isinstance(conn.function, np.ndarray):
            raise BuildError("Cannot use function points in direct connection")
        else:
            in_signal = Signal(shape=conn.size_mid, name="%s.func" % conn)
            model.add_op(SimPyFunc(in_signal, conn.function, None, sliced_in))
    elif isinstance(conn.pre_obj, Ensemble):  # Normal decoded connection
        eval_points, decoders, solver_info = model.build(
            conn.solver, conn, rng)
        if isinstance(conn.post_obj, Ensemble) and conn.solver.weights:
            model.sig[conn]["out"] = model.sig[conn.post_obj.neurons]["in"]

            encoders = model.params[conn.post_obj].scaled_encoders.T
            encoders = encoders[conn.post_slice]

            # post slice already applied to encoders (either here or in
            # `build_decoders`), so don't apply later
            post_slice = None
    else:
        in_signal = slice_signal(model, in_signal, conn.pre_slice)

    # Build transform
    if conn.solver.weights and not conn.solver.compositional:
        # special case for non-compositional weight solvers, where
        # the solver is solving for the full weight matrix. so we don't
        # need to combine decoders/transform/encoders.
        weighted, weights = model.build(Dense(decoders.shape, init=decoders),
                                        in_signal,
                                        rng=rng)
    else:
        weighted, weights = model.build(conn.transform,
                                        in_signal,
                                        decoders=decoders,
                                        encoders=encoders,
                                        rng=rng)

    model.sig[conn]["weights"] = weights

    # Build synapse
    if conn.synapse is not None:
        weighted = model.build(conn.synapse, weighted, mode="update")

    # Store the weighted-filtered output in case we want to probe it
    model.sig[conn]["weighted"] = weighted

    if isinstance(conn.post_obj, Neurons):
        # Apply neuron gains (we don't need to do this if we're connecting to
        # an Ensemble, because the gains are rolled into the encoders)
        gains = Signal(
            model.params[conn.post_obj.ensemble].gain[post_slice],
            name="%s.gains" % conn,
        )

        if is_integer(post_slice) or isinstance(post_slice, slice):
            sliced_out = model.sig[conn]["out"][post_slice]
        else:
            # advanced indexing not supported on Signals, so we need to set up an
            # intermediate signal and use a Copy op to perform the indexing
            sliced_out = Signal(shape=gains.shape, name="%s.sliced_out" % conn)
            model.add_op(Reset(sliced_out))
            model.add_op(
                Copy(sliced_out,
                     model.sig[conn]["out"],
                     dst_slice=post_slice,
                     inc=True))

        model.add_op(
            ElementwiseInc(gains,
                           weighted,
                           sliced_out,
                           tag="%s.gains_elementwiseinc" % conn))
    else:
        # Copy to the proper slice
        model.add_op(
            Copy(
                weighted,
                model.sig[conn]["out"],
                dst_slice=post_slice,
                inc=True,
                tag="%s" % conn,
            ))

    # Build learning rules
    if conn.learning_rule is not None:
        # TODO: provide a general way for transforms to expose learnable params
        if not isinstance(conn.transform, (Dense, NoTransform)):
            raise NotImplementedError(
                "Learning on connections with %s transforms is not supported" %
                (type(conn.transform).__name__, ))

        rule = conn.learning_rule
        rule = [rule] if not is_iterable(rule) else rule
        targets = []
        for r in rule.values() if isinstance(rule, dict) else rule:
            model.build(r)
            targets.append(r.modifies)

        if "encoders" in targets:
            encoder_sig = model.sig[conn.post_obj]["encoders"]
            encoder_sig.readonly = False
        if "decoders" in targets or "weights" in targets:
            if weights.ndim < 2:
                raise BuildError(
                    "'transform' must be a 2-dimensional array for learning")
            model.sig[conn]["weights"].readonly = False

    model.params[conn] = BuiltConnection(
        eval_points=eval_points,
        solver_info=solver_info,
        transform=conn.transform,
        weights=getattr(weights, "initial_value", None),
    )
Exemple #33
0
 def probeable(cls, x):
     """Returns True iff the probe method can handle x."""
     return is_iterable(x) or is_probeable(x)
def is_symbolic(x):
    return isinstance(
        x, Expression) or (is_iterable(x)
                           and all(isinstance(xx, Expression) for xx in x))
Exemple #35
0
def discretize_synapse(synapse, w_max, w_scale, w_exp):
    """Discretize a `.Synapse` in-place.

    Turns a floating-point `.Synapse` into a discrete (integer)
    block appropriate for Loihi.

    Parameters
    ----------
    synapse : `.Synapse`
        The synapse to discretize.
    w_max : float
        The largest connection weight in the `.LoihiBlock` containing
        ``synapse``. Used to scale weights appropriately.
    w_scale : float
        Connection weight scaling factor. Usually computed by
        `.discretize_compartment`.
    w_exp : float
        Exponent on the connection weight scaling factor. Usually computed by
        `.discretize_compartment`.
    """
    w_max_i = synapse.max_abs_weight()
    if synapse.learning:
        w_exp2 = synapse.learning_wgt_exp
        dw_exp = w_exp - w_exp2
    elif w_max_i > 1e-16:
        dw_exp = int(np.floor(np.log2(w_max / w_max_i)))
        assert dw_exp >= 0
        w_exp2 = max(w_exp - dw_exp, -6)
    else:
        w_exp2 = -6
        dw_exp = w_exp - w_exp2
    synapse.format(weight_exp=w_exp2)
    for w, idxs in zip(synapse.weights, synapse.indices):
        ws = w_scale[idxs] if is_iterable(w_scale) else w_scale
        array_to_int(
            w, discretize_weights(synapse.synapse_cfg, w * ws * 2**dw_exp))

    # discretize learning
    if synapse.learning:
        synapse.tracing_tau = int(np.round(synapse.tracing_tau))

        if is_iterable(w_scale):
            assert np.all(w_scale == w_scale[0])
        w_scale_i = w_scale[0] if is_iterable(w_scale) else w_scale

        # incorporate weight scale and difference in weight exponents
        # to learning rate, since these affect speed at which we learn
        ws = w_scale_i * 2**dw_exp
        synapse.learning_rate *= ws

        # Loihi down-scales learning factors based on the number of
        # overflow bits. Increasing learning rate maintains true rate.
        synapse.learning_rate *= 2**learn_overflow_bits(2)

        # TODO: Currently, Loihi learning rate fixed at 2**-7.
        # We should explore adjusting it for better performance.
        lscale = 2**-7 / synapse.learning_rate
        synapse.learning_rate *= lscale
        synapse.tracing_mag /= lscale

        # discretize learning rate into mantissa and exponent
        lr_exp = int(np.floor(np.log2(synapse.learning_rate)))
        lr_int = int(np.round(synapse.learning_rate * 2**(-lr_exp)))
        synapse.learning_rate = lr_int * 2**lr_exp
        synapse._lr_int = lr_int
        synapse._lr_exp = lr_exp
        assert lr_exp >= -7

        # discretize tracing mag into integer and fractional components
        mag_int, mag_frac = tracing_mag_int_frac(synapse.tracing_mag)
        if mag_int > 127:
            warnings.warn("Trace increment exceeds upper limit "
                          "(learning rate may be too large)")
            mag_int = 127
            mag_frac = 127
        synapse.tracing_mag = mag_int + mag_frac / 128.0
Exemple #36
0
    def add_output(self, name, function, synapse=None, **conn_kwargs):
        """Adds a node that collects the decoded output of all ensembles.

        By default, this is called once in ``__init__`` with ``function=None``.
        However, this can be called multiple times with different functions,
        similar to the way in which an ensemble can be connected to many
        downstream ensembles with different functions.

        Note that in addition to the parameters below, parameters affecting
        all of the connections from the sub-ensembles to the new node
        can be passed to this function. For example::

            ea.add_output('output', None, solver=nengo.solers.Lstsq())

        creates a new output with the decoders of each connection solved for
        with the `.Lstsq` solver.

        Parameters
        ----------
        name : str
            The name of the output. This will also be the name of the attribute
            set on the ensemble array.
        function : callable or iterable of callables
            The function to compute across the connection from sub-ensembles
            to the new output node. If function is an iterable, it must be
            an iterable consisting of one function for each sub-ensemble.
        synapse : Synapse, optional (Default: None)
            The synapse model with which to filter the connections from
            sub-ensembles to the new output node. This is kept separate from
            the other ``conn_kwargs`` because this defaults to None rather
            than the default synapse model. In almost all cases the synapse
            should stay as None, and synaptic filtering should be performed in
            the connection from the output node.
        """
        dims_per_ens = self.dimensions_per_ensemble

        # get output size for each ensemble
        sizes = np.zeros(self.n_ensembles, dtype=int)

        if is_iterable(function) and all(callable(f) for f in function):
            if len(list(function)) != self.n_ensembles:
                raise ValidationError(
                    "Must have one function per ensemble", attr='function')

            for i, func in enumerate(function):
                sizes[i] = np.asarray(func(np.zeros(dims_per_ens))).size
        elif callable(function):
            sizes[:] = np.asarray(function(np.zeros(dims_per_ens))).size
            function = [function] * self.n_ensembles
        elif function is None:
            sizes[:] = dims_per_ens
            function = [None] * self.n_ensembles
        else:
            raise ValidationError("'function' must be a callable, list of "
                                  "callables, or None", attr='function')

        output = nengo.Node(output=None, size_in=sizes.sum(), label=name)
        setattr(self, name, output)

        indices = np.zeros(len(sizes) + 1, dtype=int)
        indices[1:] = np.cumsum(sizes)
        for i, e in enumerate(self.ea_ensembles):
            nengo.Connection(
                e, output[indices[i]:indices[i+1]], function=function[i],
                synapse=synapse, **conn_kwargs)

        return output
Exemple #37
0
    def add_output(self, name, function, synapse=None, **conn_kwargs):
        """Adds a node that collects the decoded output of all ensembles.

        By default, this is called once in ``__init__`` with ``function=None``.
        However, this can be called multiple times with different functions,
        similar to the way in which an ensemble can be connected to many
        downstream ensembles with different functions.

        Note that in addition to the parameters below, parameters affecting
        all of the connections from the sub-ensembles to the new node
        can be passed to this function. For example:

        .. testcode::

           ea.add_output("lstsq_output", None, solver=nengo.solvers.Lstsq())

        creates a new output at ``ea.lstsq_output`` with the decoders
        of each connection solved for with the `.Lstsq` solver.

        Parameters
        ----------
        name : str
            The name of the output. This will also be the name of the attribute
            set on the ensemble array.
        function : callable or iterable of callables
            The function to compute across the connection from sub-ensembles
            to the new output node. If function is an iterable, it must be
            an iterable consisting of one function for each sub-ensemble.
        synapse : Synapse, optional
            The synapse model with which to filter the connections from
            sub-ensembles to the new output node. This is kept separate from
            the other ``conn_kwargs`` because this defaults to None rather
            than the default synapse model. In almost all cases the synapse
            should stay as None, and synaptic filtering should be performed in
            the connection from the output node.
        """
        if hasattr(self, name):
            raise ValidationError(
                f"Cannot add output '{name}'; there is already an attribute "
                "with this name",
                attr="name",
                obj=self,
            )

        dims_per_ens = self.dimensions_per_ensemble

        # get output size for each ensemble
        sizes = np.zeros(self.n_ensembles, dtype=int)

        if is_iterable(function) and all(callable(f) for f in function):
            if len(list(function)) != self.n_ensembles:
                raise ValidationError(
                    "Must have one function per ensemble", attr="function", obj=self
                )

            for i, func in enumerate(function):
                sizes[i] = np.asarray(func(np.zeros(dims_per_ens))).size
        elif callable(function):
            sizes[:] = np.asarray(function(np.zeros(dims_per_ens))).size
            function = [function] * self.n_ensembles
        elif function is None:
            sizes[:] = dims_per_ens
            function = [None] * self.n_ensembles
        else:
            raise ValidationError(
                "'function' must be a callable, list of callables, or None",
                attr="function",
                obj=self,
            )

        output = Node(output=None, size_in=sizes.sum(), label=name)
        setattr(self, name, output)

        indices = np.zeros(len(sizes) + 1, dtype=int)
        indices[1:] = np.cumsum(sizes)
        for i, e in enumerate(self.ea_ensembles):
            Connection(
                e,
                output[indices[i] : indices[i + 1]],
                function=function[i],
                synapse=synapse,
                **conn_kwargs,
            )

        return output
Exemple #38
0
 def is_foldable(cls, input_ops):
     """Returns True iff input_ops is an acceptable argument to the constructor."""
     # There is no technical reason to disallow a fold to be the input, but
     # this would be a noop and likely not what the user intends to do.
     return is_iterable(input_ops) and not isinstance(input_ops, Fold)
Exemple #39
0
    def __init__(self, input_vectors, output_vectors=None,  # noqa: C901
                 n_neurons=50, threshold=0.3, input_scales=1.0,
                 inhibitable=False,
                 label=None, seed=None, add_to_container=None):
        super().__init__(label, seed, add_to_container)

        # --- Put arguments in canonical form
        if output_vectors is None:
            # If output vocabulary is not specified, use input vector list
            # (i.e autoassociative memory)
            output_vectors = input_vectors
        if is_iterable(input_vectors):
            input_vectors = np.array(input_vectors, ndmin=2)
        if is_iterable(output_vectors):
            output_vectors = np.array(output_vectors, ndmin=2)

        if input_vectors.shape[0] == 0:
            raise ValidationError("Number of input vectors cannot be 0.",
                                  attr='input_vectors', obj=self)
        elif input_vectors.shape[0] != output_vectors.shape[0]:
            # Fail if number of input items and number of output items don't
            # match
            raise ValidationError(
                "Number of input vectors does not match number of output "
                "vectors. %d != %d"
                % (input_vectors.shape[0], output_vectors.shape[0]),
                attr='input_vectors', obj=type(self))

        # Handle possible different threshold / input_scale values for each
        # element in the associative memory
        if not is_iterable(threshold):
            threshold = threshold * np.ones(input_vectors.shape[0])
        else:
            threshold = np.array(threshold)

        # --- Check preconditions
        self.n_items = input_vectors.shape[0]
        if self.n_items != output_vectors.shape[0]:
            raise ValidationError(
                "Number of input vectors (%d) does not match number of output "
                "vectors (%d)" % (self.n_items, output_vectors.shape[0]),
                attr='input_vectors', obj=self)
        if threshold.shape[0] != self.n_items:
            raise ValidationError(
                "Number of threshold values (%d) does not match number of "
                "input vectors (%d)." % (threshold.shape[0], self.n_items),
                attr='threshold', obj=self)

        # --- Set parameters
        self.out_conns = []  # Used in `add_threshold_to_output`
        # Used in `add_threshold_to_output`
        self.default_vector_inhibit_conns = []
        self.thresh_ens = None  # Will hold thresholded outputs
        self.is_wta = False
        self._inhib_scale = 1.5

        # -- Create the core network
        with self, self.am_ens_config:
            self.bias_node = nengo.Node(output=1)
            self.elem_input = nengo.Node(
                size_in=self.n_items, label="element input")
            self.elem_output = nengo.Node(
                size_in=self.n_items, label="element output")
            self.utilities = self.elem_output

            self.am_ensembles = []
            label_prefix = "" if label is None else label + "_"
            filt_scale = 15
            filt_step_func = lambda x: filtered_step(x, 0.0, scale=filt_scale)
            for i in range(self.n_items):
                e = nengo.Ensemble(n_neurons, 1, label=label_prefix + str(i))
                self.am_ensembles.append(e)

                # Connect input and output nodes
                nengo.Connection(self.bias_node, e, transform=-threshold[i])
                nengo.Connection(self.elem_input[i], e)
                nengo.Connection(
                    e, self.elem_output[i], function=filt_step_func)

            if inhibitable:
                # Input node for inhibitory gating signal (if enabled)
                self.inhibit = nengo.Node(size_in=1, label="inhibit")
                nengo.Connection(self.inhibit, self.elem_input,
                                 transform=-np.ones((self.n_items, 1))
                                 * self._inhib_scale)
                # Note: We can use a decoded connection here because all the
                # am_ensembles have [1] encoders
            else:
                self.inhibit = None

            self.thresh_bias = None
            self.thresholded_utilities = None

        self.add_input_mapping("input", input_vectors, input_scales)
        self.add_output_mapping("output", output_vectors)
Exemple #40
0
 def function_args(self, conn, function):
     x = (conn.eval_points[0] if is_iterable(conn.eval_points)
          else np.zeros(conn.size_in))
     return (x,)
Exemple #41
0
    def __init__(  # noqa: C901
        self,
        input_vectors,
        output_vectors=None,
        n_neurons=50,
        threshold=0.3,
        input_scales=1.0,
        inhibitable=False,
        label=None,
        seed=None,
        add_to_container=None,
    ):
        super().__init__(label, seed, add_to_container)

        # --- Put arguments in canonical form
        if output_vectors is None:
            # If output vocabulary is not specified, use input vector list
            # (i.e autoassociative memory)
            output_vectors = input_vectors
        if is_iterable(input_vectors):
            input_vectors = np.array(input_vectors, ndmin=2)
        if is_iterable(output_vectors):
            output_vectors = np.array(output_vectors, ndmin=2)

        if input_vectors.shape[0] == 0:
            raise ValidationError("Number of input vectors cannot be 0.",
                                  attr="input_vectors",
                                  obj=self)
        elif input_vectors.shape[0] != output_vectors.shape[0]:
            # Fail if number of input items and number of output items don't
            # match
            raise ValidationError(
                "Number of input vectors does not match number of output "
                "vectors. %d != %d" %
                (input_vectors.shape[0], output_vectors.shape[0]),
                attr="input_vectors",
                obj=type(self),
            )

        # Handle possible different threshold / input_scale values for each
        # element in the associative memory
        if not is_iterable(threshold):
            threshold = threshold * np.ones(input_vectors.shape[0])
        else:
            threshold = np.array(threshold)

        # --- Check preconditions
        self.n_items = input_vectors.shape[0]
        if threshold.shape[0] != self.n_items:
            raise ValidationError(
                "Number of threshold values (%d) does not match number of "
                "input vectors (%d)." % (threshold.shape[0], self.n_items),
                attr="threshold",
                obj=self,
            )

        # --- Set parameters
        self.out_conns = []  # Used in `add_threshold_to_output`
        # Used in `add_threshold_to_output`
        self.default_vector_inhibit_conns = []
        self.thresh_ens = None  # Will hold thresholded outputs
        self.is_wta = False
        self._inhib_scale = 1.5

        # -- Create the core network
        with self, self.am_ens_config:
            self.bias_node = Node(output=1)
            self.elem_input = Node(size_in=self.n_items, label="element input")
            self.elem_output = Node(size_in=self.n_items,
                                    label="element output")
            self.utilities = self.elem_output

            self.am_ensembles = []
            label_prefix = "" if label is None else label + "_"
            filt_scale = 15
            filt_step_func = lambda x: filtered_step(x, 0.0, scale=filt_scale)
            for i in range(self.n_items):
                e = Ensemble(n_neurons, 1, label=label_prefix + str(i))
                self.am_ensembles.append(e)

                # Connect input and output nodes
                Connection(self.bias_node, e, transform=-threshold[i])
                Connection(self.elem_input[i], e)
                Connection(e, self.elem_output[i], function=filt_step_func)

            if inhibitable:
                # Input node for inhibitory gating signal (if enabled)
                self.inhibit = Node(size_in=1, label="inhibit")
                Connection(
                    self.inhibit,
                    self.elem_input,
                    transform=-np.ones((self.n_items, 1)) * self._inhib_scale,
                )
                # Note: We can use a decoded connection here because all the
                # am_ensembles have [1] encoders
            else:
                self.inhibit = None

            self.thresh_bias = None
            self.thresholded_utilities = None

        self.add_input_mapping("input", input_vectors, input_scales)
        self.add_output_mapping("output", output_vectors)