Ejemplo n.º 1
0
    def __call__(self, A, Y, sigma, rng=None):
        Y, m, n, d, matrix_in = format_system(A, Y)
        sigma = np.asarray(sigma, dtype='float')
        sigma = sigma.reshape(sigma.size, 1)

        X = np.zeros((n, d)) if self.X0 is None else np.array(self.X0)
        if X.shape != (n, d):
            raise ValidationError("Must be shape %s, got %s" %
                                  ((n, d), X.shape),
                                  attr='X0',
                                  obj=self)

        damp = m * sigma**2
        rtol = self.tol * np.sqrt(m)
        G = lambda x: np.dot(A.T, np.dot(A, x)) + damp * x
        B = np.dot(A.T, Y)

        # --- conjugate gradient
        R = B - G(X)
        P = np.array(R)
        Rsold = np.dot(R.T, R)
        AP = np.zeros((n, d))

        maxiters = int(n / d)
        for i in range(maxiters):
            AP = G(P)
            alpha = np.linalg.solve(np.dot(P.T, AP), Rsold)
            X += np.dot(P, alpha)
            R -= np.dot(AP, alpha)

            Rsnew = np.dot(R.T, R)
            if (np.diag(Rsnew) < rtol**2).all():
                break

            beta = np.linalg.solve(Rsold, Rsnew)
            P = R + np.dot(P, beta)
            Rsold = Rsnew

        info = {'rmses': rmses(A, X, Y), 'iterations': i + 1}
        return X if matrix_in else X.ravel(), info
Ejemplo n.º 2
0
def similarity(data, vocab, normalize=False):
    """Return the similarity between some data and the vocabulary.

    Computes the dot products between all data vectors and each
    vocabulary vector. If ``normalize=True``, normalizes all vectors
    to compute the cosine similarity.

    Parameters
    ----------
    data: array_like
        The data used for comparison.
    vocab: Vocabulary or array_like
        Vocabulary (or list of vectors) to use to calculate
        the similarity values.
    normalize : bool, optional
        Whether to normalize all vectors, to compute the cosine similarity.
    """

    if isinstance(vocab, Vocabulary):
        vectors = vocab.vectors
    elif npext.is_iterable(vocab):
        vectors = np.array(vocab, copy=False, ndmin=2)
    else:
        raise ValidationError("%r object is not a valid vocabulary" %
                              (type(vocab).__name__),
                              attr="vocab")

    data = np.array(data, copy=False, ndmin=2)
    dots = np.dot(data, vectors.T)

    if normalize:
        # Zero-norm vectors should return zero, so avoid divide-by-zero error
        eps = np.nextafter(0, 1)  # smallest float above zero
        dnorm = np.maximum(npext.norm(data, axis=1, keepdims=True), eps)
        vnorm = np.maximum(npext.norm(vectors, axis=1, keepdims=True), eps)

        dots /= dnorm
        dots /= vnorm.T

    return dots
Ejemplo n.º 3
0
    def add_output_mapping(self, name, output_vectors):
        """Adds another output to the associative memory network.

        Creates a transform with the given output vectors between the
        associative memory element output and a named output node to enable the
        selection of output vectors by the associative memory.

        Parameters
        ----------
        name: str
            Name to use for the output node. This name will be used as
            the name of the attribute for the associative memory network.
        output_vectors: array_like
            The list of vectors to be produced for each match.
        """
        # --- Put arguments in canonical form
        if is_iterable(output_vectors):
            output_vectors = np.array(output_vectors, ndmin=2)

        # --- Check preconditions
        if hasattr(self, name):
            raise ValidationError("Name '%s' already exists as a node in the "
                                  "associative memory." % name,
                                  attr='name')

        # --- Make the output node and connect it
        output = nengo.Node(size_in=output_vectors.shape[1], label=name)
        setattr(self, name, output)

        if self.thresh_ens is not None:
            c = nengo.Connection(self.thresh_ens.output,
                                 output,
                                 synapse=None,
                                 transform=output_vectors.T)
        else:
            c = nengo.Connection(self.elem_output,
                                 output,
                                 synapse=None,
                                 transform=output_vectors.T)
        self.out_conns.append(c)
Ejemplo n.º 4
0
    def make_step(self, shape_in, shape_out, dt, rng):
        assert shape_in == (0, )

        nyquist_cutoff = 0.5 / dt
        if self.high > nyquist_cutoff:
            raise ValidationError("High must not exceed the Nyquist frequency "
                                  "for the given dt (%0.3f)" % nyquist_cutoff,
                                  attr='high',
                                  obj=self)

        n_coefficients = int(np.ceil(self.period / dt / 2.))
        shape = (n_coefficients + 1, ) + shape_out
        sigma = self.rms * np.sqrt(0.5)
        coefficients = 1j * rng.normal(0., sigma, size=shape)
        coefficients += rng.normal(0., sigma, size=shape)
        coefficients[0] = 0.
        coefficients[-1].imag = 0.

        set_to_zero = npext.rfftfreq(2 * n_coefficients, d=dt) > self.high
        coefficients[set_to_zero] = 0.
        power_correction = np.sqrt(1. - np.sum(set_to_zero, dtype=float) /
                                   n_coefficients)
        if power_correction > 0.:
            coefficients /= power_correction
        coefficients *= np.sqrt(2 * n_coefficients)
        signal = np.fft.irfft(coefficients, axis=0)

        if self.y0 is not None:
            # Starts each dimension off where it is closest to y0
            def shift(x):
                offset = np.argmin(abs(self.y0 - x))
                return np.roll(x, -offset + 1)  # +1 since t starts at dt

            signal = np.apply_along_axis(shift, 0, signal)

        def step_whitesignal(t):
            i = int(round(t / dt))
            return signal[i % signal.shape[0]]

        return step_whitesignal
Ejemplo n.º 5
0
    def trange(self, dt=None, sample_every=None):
        """Create a vector of times matching probed data.

        Note that the range does not start at 0 as one might expect, but at
        the first timestep (i.e., ``dt``).

        Parameters
        ----------
        sample_every : float, optional (Default: None)
            The sampling period of the probe to create a range for.
            If None, a time value for every ``dt`` will be produced.
        """
        if dt is not None:
            if sample_every is not None:
                raise ValidationError(
                    "Cannot specify both `dt` and `sample_every`. "
                    "Use `sample_every` only.", attr="dt", obj=self)
            warnings.warn("`dt` is deprecated. Use `sample_every` instead.")
            sample_every = dt
        period = 1 if sample_every is None else sample_every / self.dt
        steps = np.arange(1, self.n_steps + 1)
        return self.dt * steps[steps % period < 1]
Ejemplo n.º 6
0
    def learning_rule(self):
        """(LearningRule or iterable) Connectable learning rule object(s)."""
        if self.learning_rule_type is None:
            return None

        types = self.learning_rule_type
        if isinstance(types, dict):
            learning_rule = type(types)()  # dict of same type
            for k, v in types.items():
                learning_rule[k] = LearningRule(self, v)
        elif is_iterable(types):
            learning_rule = [LearningRule(self, v) for v in types]
        elif isinstance(types, LearningRuleType):
            learning_rule = LearningRule(self, types)
        else:
            raise ValidationError(
                "Invalid type %r" % type(types).__name__,
                attr="learning_rule_type",
                obj=self,
            )

        return learning_rule
Ejemplo n.º 7
0
 def __init__(
     self,
     full_shape,
     row_slice=slice(None),
     col_slice=slice(None),
     channel_slice=slice(None),
 ):
     if nengo_transforms is None:
         raise NotImplementedError("ImageSlice requires newer Nengo")
     if not (
         isinstance(full_shape, nengo_transforms.ChannelShape)
         and full_shape.dimensions == 2
     ):
         raise ValidationError(
             "must be 2-D ChannelShape (got %r)" % full_shape,
             attr="full_shape",
             obj=self,
         )
     self.full_shape = full_shape
     self.row_slice = row_slice
     self.col_slice = col_slice
     self.channel_slice = channel_slice
Ejemplo n.º 8
0
    def run(self, time_in_seconds, progress_bar=None):
        """Simulate for the given length of time.

        If the given length of time is not a multiple of ``dt``,
        it will be rounded to the nearest ``dt``. For example, if ``dt``
        is 0.001 and ``run`` is called with ``time_in_seconds=0.0006``,
        the simulator will advance one timestep, resulting in the actual
        simulator time being 0.001.

        The given length of time must be positive. The simulator cannot
        be run backwards.

        Parameters
        ----------
        time_in_seconds : float
            Amount of time to run the simulation for. Must be positive.
        progress_bar : bool or `.ProgressBar` or `.ProgressUpdater`, optional \
                       (Default: True)
            Progress bar for displaying the progress of the simulation run.

            If True, the default progress bar will be used.
            If False, the progress bar will be disabled.
            For more control over the progress bar, pass in a `.ProgressBar`
            or `.ProgressUpdater` instance.
        """
        if time_in_seconds < 0:
            raise ValidationError("Must be positive (got %g)" %
                                  (time_in_seconds, ),
                                  attr="time_in_seconds")

        steps = int(np.round(float(time_in_seconds) / self.dt))

        if steps == 0:
            warnings.warn("%g results in running for 0 timesteps. Simulator "
                          "still at time %g." % (time_in_seconds, self.time))
        else:
            logger.info("Running %s for %f seconds, or %d steps",
                        self.model.label, time_in_seconds, steps)
            self.run_steps(steps, progress_bar=progress_bar)
Ejemplo n.º 9
0
    def __set__(self, node, output):
        super(OutputParam, self).validate(node, output)

        size_in_set = node.size_in is not None
        node.size_in = node.size_in if size_in_set else 0

        # --- Validate and set the new size_out
        if output is None:
            if node.size_out is not None:
                warnings.warn("'Node.size_out' is being overwritten with "
                              "'Node.size_in' since 'Node.output=None'")
            node.size_out = node.size_in
        elif isinstance(output, Process):
            if not size_in_set:
                node.size_in = output.default_size_in
            if node.size_out is None:
                node.size_out = output.default_size_out
        elif callable(output):
            # We trust user's size_out if set, because calling output
            # may have unintended consequences (e.g., network communication)
            if node.size_out is None:
                result = self.validate_callable(node, output)
                node.size_out = 0 if result is None else result.size
        elif is_array_like(output):
            # Make into correctly shaped numpy array before validation
            output = npext.array(output,
                                 min_dims=1,
                                 copy=False,
                                 dtype=np.float64)
            self.validate_ndarray(node, output)
            node.size_out = output.size
        else:
            raise ValidationError("Invalid node output type %r" %
                                  type(output).__name__,
                                  attr=self.name,
                                  obj=node)

        # --- Set output
        self.data[node] = output
Ejemplo n.º 10
0
    def __init__(
        self,
        tensor_func,
        shape_in=Default,
        shape_out=Default,
        pass_time=Default,
        label=Default,
    ):
        # pylint: disable=non-parent-init-called,super-init-not-called
        # note: we bypass the Node constructor, because we don't want to
        # perform validation on `output`
        NengoObject.__init__(self, label=label, seed=None)

        self.shape_in = shape_in
        self.shape_out = shape_out
        self.pass_time = pass_time

        if not (self.shape_in or self.pass_time):
            raise ValidationError("Must specify either shape_in or pass_time",
                                  "TensorNode")

        self.tensor_func = tensor_func
Ejemplo n.º 11
0
    def __init__(
        self,
        dimensions,
        vocab=None,
        n_neurons=200,
        invert_a=False,
        invert_b=False,
        input_magnitude=1.0,
        label=None,
        seed=None,
        add_to_container=None,
    ):
        super().__init__(label, seed, add_to_container)
        if vocab is None:
            # use the default vocab for this number of dimensions
            vocab = dimensions
        elif vocab.dimensions != dimensions:
            raise ValidationError(
                "Dimensionality of given vocabulary (%d) does "
                "not match dimensionality of buffer (%d)" %
                (vocab.dimensions, dimensions),
                attr="dimensions",
                obj=self,
            )

        with self:
            self.cc = nengo.networks.CircularConvolution(
                n_neurons,
                dimensions,
                invert_a,
                invert_b,
                input_magnitude=input_magnitude,
            )
            self.A = self.cc.input_a
            self.B = self.cc.input_b
            self.output = self.cc.output

        self.inputs = dict(A=(self.A, vocab), B=(self.B, vocab))
        self.outputs = dict(default=(self.output, vocab))
Ejemplo n.º 12
0
    def __init__(self, base_type, amplitude=1.0, initial_state=None):
        super().__init__(initial_state)

        self.base_type = base_type
        self.amplitude = amplitude
        self.negative = base_type.negative

        if base_type.spiking:
            warnings.warn(
                "'base_type' is type %r, which is a spiking neuron type. We recommend "
                "using the non-spiking equivalent type, if one exists."
                % (type(base_type).__name__)
            )

        for s in self.state:
            if s in self.base_type.state:
                raise ValidationError(
                    "%s and %s have overlapping state variable (%s)"
                    % (self, self.base_type, s),
                    attr="state",
                    obj=self,
                )
Ejemplo n.º 13
0
def lowpass_filter(x, tau, kind='expon'):
    nt = x.shape[-1]

    if kind == 'expon':
        t = np.arange(0, 5 * tau)
        kern = np.exp(-t / tau) / tau
        delay = tau
    elif kind == 'gauss':
        std = tau / 2.
        t = np.arange(-4 * std, 4 * std)
        kern = np.exp(-0.5 * (t / std)**2) / np.sqrt(2 * np.pi * std**2)
        delay = 4 * std
    elif kind == 'alpha':
        alpha = 1. / tau
        t = np.arange(0, 5 * tau)
        kern = alpha**2 * t * np.exp(-alpha * t)
        delay = tau
    else:
        raise ValidationError("Unrecognized filter kind '%s'" % kind, 'kind')

    delay = int(np.round(delay))
    return np.array(
        [np.convolve(kern, xx, mode='full')[delay:nt + delay] for xx in x])
Ejemplo n.º 14
0
    def __set__(self, conn, function):
        if function is None:
            function_info = FunctionInfo(function=None, size=None)
        elif isinstance(function, FunctionInfo):
            function_info = function
        elif is_array_like(function):
            array = np.array(function, copy=False, dtype=np.float64)
            self.validate_array(conn, array)
            function_info = FunctionInfo(function=array, size=array.shape[1])
        elif callable(function):
            function_info = FunctionInfo(function=function,
                                         size=self.determine_size(
                                             conn, function))
            self.validate_callable(conn, function_info)
        else:
            raise ValidationError("Invalid connection function type %r "
                                  "(must be callable or array-like)" %
                                  type(function).__name__,
                                  attr=self.name,
                                  obj=conn)

        self.validate(conn, function_info)
        self.data[conn] = function_info
Ejemplo n.º 15
0
    def __call__(self, A, Y, sigma, rng=None):
        Y, m, n, d, matrix_in = format_system(A, Y)
        X = np.zeros((n, d)) if self.X0 is None else np.array(self.X0)
        if X.shape != (n, d):
            raise ValidationError(f"Must be shape {n, d}, got {X.shape}",
                                  attr="X0",
                                  obj=self)

        damp = m * sigma**2
        rtol = self.tol * np.sqrt(m)
        G = lambda x: np.dot(A.T, np.dot(A, x)) + damp * x
        B = np.dot(A.T, Y)

        iters = -np.ones(d, dtype="int")
        for i in range(d):
            X[:, i], iters[i] = self._conjgrad_iters(G,
                                                     B[:, i],
                                                     X[:, i],
                                                     maxiters=self.maxiters,
                                                     rtol=rtol)

        info = {"rmses": rmses(A, X, Y), "iterations": iters}
        return X if matrix_in else X.ravel(), info
Ejemplo n.º 16
0
    def make_step(self, shape_in, shape_out, dt, rng, y0=None,
                  dtype=np.float64, method='zoh'):
        """Returns a `.Step` instance that implements the linear filter."""
        assert shape_in == shape_out

        num, den = self.num, self.den
        if self.analog:
            num, den, _ = cont2discrete((num, den), dt, method=method)
            num = num.flatten()

        if den[0] != 1.:
            raise ValidationError("First element of the denominator must be 1",
                                  attr='den', obj=self)
        num = num[1:] if num[0] == 0 else num
        den = den[1:]  # drop first element (equal to 1)
        num, den = num.astype(dtype), den.astype(dtype)

        output = np.zeros(shape_out, dtype=dtype)
        if len(num) == 1 and len(den) == 0:
            return LinearFilter.NoDen(num, den, output)
        elif len(num) == 1 and len(den) == 1:
            return LinearFilter.Simple(num, den, output, y0=y0)
        return LinearFilter.General(num, den, output, y0=y0)
Ejemplo n.º 17
0
    def add_neuron_output(self):
        """Adds a node that collects the neural output of all ensembles.

        Direct neuron output is useful for plotting the spike raster of
        all neurons in the ensemble array.

        This node is accessible through the 'neuron_output' attribute
        of this ensemble array.
        """
        if self.neuron_output is not None:
            warnings.warn("neuron_output already exists. Returning.")
            return self.neuron_output

        if isinstance(self.ea_ensembles[0].neuron_type, Direct):
            raise ValidationError(
                "Ensembles use Direct neuron type. "
                "Cannot get neuron output from Direct neurons.",
                attr="ea_ensembles[0].neuron_type",
                obj=self,
            )

        self.neuron_output = Node(
            size_in=self.n_neurons_per_ensemble * self.n_ensembles,
            label="neuron_output",
        )

        for i, ens in enumerate(self.ea_ensembles):
            Connection(
                ens.neurons,
                self.neuron_output[
                    i
                    * self.n_neurons_per_ensemble : (i + 1)
                    * self.n_neurons_per_ensemble
                ],
                synapse=None,
            )
        return self.neuron_output
Ejemplo n.º 18
0
    def __init__(self, obj, key=slice(None)):
        self.obj = obj
        if is_integer(key):
            # single slices of the form [i] should be cast into
            # slice objects for convenience
            if key == -1:
                # special case because slice(-1, 0) gives the empty list
                key = slice(key, None)
            else:
                key = slice(key, key+1)
        self.slice = key

        # Node.size_in != size_out, so one of these can be invalid
        try:
            self.size_in = np.arange(self.obj.size_in)[self.slice].size
        except IndexError:
            self.size_in = None
        try:
            self.size_out = np.arange(self.obj.size_out)[self.slice].size
        except IndexError:
            self.size_out = None
        if self.size_in is None and self.size_out is None:
            raise ValidationError("Invalid slice '%s' of %s"
                                  % (self.slice, self.obj), attr='key')
Ejemplo n.º 19
0
    def __init__(self, indices, data, shape):
        super().__init__()

        self.indices = indices
        self.shape = shape

        # if data is not a distribution
        if is_array_like(data):
            data = np.asarray(data)

            # convert scalars to vectors
            if data.size == 1:
                data = data.item() * np.ones(self.indices.shape[0], dtype=data.dtype)

            if data.ndim != 1 or data.shape[0] != self.indices.shape[0]:
                raise ValidationError(
                    "Must be a vector of the same length as `indices`",
                    attr="data",
                    obj=self,
                )

        self.data = data
        self._allocated = None
        self._dense = None
Ejemplo n.º 20
0
    def gain_bias(self, max_rates, intercepts):
        """Compute the alpha and bias needed to satisfy max_rates, intercepts.

        Returns gain (alpha) and offset (j_bias) values of neurons.

        Parameters
        ----------
        max_rates : list of floats
            Maximum firing rates of neurons.
        intercepts : list of floats
            X-intercepts of neurons.
        """
        inv_tau_ref = 1. / self.tau_ref if self.tau_ref > 0 else np.inf
        if np.any(max_rates > inv_tau_ref):
            raise ValidationError("Max rates must be below the inverse "
                                  "refractory period (%0.3f)" % inv_tau_ref,
                                  attr='max_rates',
                                  obj=self)

        x = 1.0 / (1 - np.exp(
            (self.tau_ref - (1.0 / max_rates)) / self.tau_rc))
        gain = (1 - x) / (intercepts - 1.0)
        bias = 1 - gain * intercepts
        return gain, bias
    def current(self, x, gain, bias):
        """Compute current injected in each neuron given input, gain and bias.

        Note that ``x`` is assumed to be already projected onto the encoders
        associated with the neurons and normalized to radius 1, so the maximum
        expected current for a neuron occurs when input for that neuron is 1.

        Parameters
        ----------
        x : (n_samples,) or (n_samples, n_neurons) array_like
            Scalar inputs for which to calculate current.
        gain : (n_neurons,) array_like
            Gains associated with each neuron.
        bias : (n_neurons,) array_like
            Bias current associated with each neuron.

        Returns
        -------
        current : (n_samples, n_neurons)
            Current to be injected in each neuron.
        """
        x = np.array(x, dtype=float, copy=False, ndmin=1)
        gain = np.array(gain, dtype=float, copy=False, ndmin=1)
        bias = np.array(bias, dtype=float, copy=False, ndmin=1)

        if x.ndim == 1:
            x = x[:, np.newaxis]
        elif x.ndim >= 3 or x.shape[1] != gain.shape[0]:
            raise ValidationError(
                "Expected shape (%d, %d); got %s." %
                (x.shape[0], gain.shape[0], x.shape),
                attr="x",
                obj=self,
            )

        return gain * x + bias
Ejemplo n.º 22
0
    def gain_bias(self, max_rates, intercepts):
        # Make sure the input is a 1D array
        max_rates = np.array(max_rates, dtype=float, copy=False, ndmin=1)
        intercepts = np.array(intercepts, dtype=float, copy=False, ndmin=1)

        # Make sure the maximum rates are not surpassing the maximally
        # attainable rate
        tau_ref, _, i_th = self._lif_parameters()
        inv_tau_ref = 1. / tau_ref if tau_ref > 0. else np.inf
        if np.any(max_rates > inv_tau_ref):
            raise ValidationError("Max rates must be below the inverse "
                                  "of the sum of the refractory and spike "
                                  "period ({:0.3f})".format(inv_tau_ref),
                                  attr='max_rates',
                                  obj=self)

        # Solve the following linear system for gain, bias
        #   i_th  = gain * intercepts + bias
        #   i_max = gain              + bias
        i_max = self._lif_rate_inv(max_rates)
        gain = (i_max - i_th) / (1. - intercepts)
        bias = i_max - gain

        return gain, bias
Ejemplo n.º 23
0
    def add_neuron_input(self):
        """Adds a node that provides input to the neurons of all ensembles.

        Direct neuron input is useful for inhibiting the activity of all
        neurons in the ensemble array.

        This node is accessible through the 'neuron_input' attribute
        of this ensemble array.
        """
        if self.neuron_input is not None:
            warnings.warn("neuron_input already exists. Returning.")
            return self.neuron_input

        if isinstance(self.ea_ensembles[0].neuron_type, Direct):
            raise ValidationError(
                "Ensembles use Direct neuron type. "
                "Cannot give neuron input to Direct neurons.",
                attr="ea_ensembles[0].neuron_type",
                obj=self,
            )

        self.neuron_input = Node(
            size_in=self.n_neurons_per_ensemble * self.n_ensembles, label="neuron_input"
        )

        for i, ens in enumerate(self.ea_ensembles):
            Connection(
                self.neuron_input[
                    i
                    * self.n_neurons_per_ensemble : (i + 1)
                    * self.n_neurons_per_ensemble
                ],
                ens.neurons,
                synapse=None,
            )
        return self.neuron_input
Ejemplo n.º 24
0
    def __call__(self, A, Y, sigma, rng=None):
        Y, m, n, d, matrix_in = format_system(A, Y)
        X = np.zeros((n, d)) if self.X0 is None else np.array(self.X0)
        if X.shape != (n, d):
            raise ValidationError("Must be shape %s, got %s" %
                                  ((n, d), X.shape),
                                  attr='X0',
                                  obj=self)

        damp = m * sigma**2
        rtol = self.tol * np.sqrt(m)
        G = lambda x: np.dot(A.T, np.dot(A, x)) + damp * x
        B = np.dot(A.T, Y)

        iters = -np.ones(d, dtype='int')
        for i in range(d):
            X[:, i], iters[i] = self._conjgrad_iters(G,
                                                     B[:, i],
                                                     X[:, i],
                                                     maxiters=self.maxiters,
                                                     rtol=rtol)

        info = {'rmses': rmses(A, X, Y), 'iterations': iters}
        return X if matrix_in else X.flatten(), info
Ejemplo n.º 25
0
 def __init__(self, num, den, output):
     if len(den) > 0:
         raise ValidationError("'den' must be empty (got length %d)"
                               % len(den), attr='den', obj=self)
     super(LinearFilter.NoDen, self).__init__(num, den, output)
     self.b = num[0]
Ejemplo n.º 26
0
 def validate(self, instance, dct):
     if dct is not None and not isinstance(dct, dict):
         raise ValidationError("Must be a dictionary; got '%s'" % str(dct),
                               attr=self.name,
                               obj=instance)
     super(DictParam, self).validate(instance, dct)
Ejemplo n.º 27
0
def full_transform(  # noqa: C901
        conn, slice_pre=True, slice_post=True, allow_scalars=True):
    """Compute the full transform matrix for a Dense connection transform.

    Parameters
    ----------
    conn : Connection
        The connection for which to compute the full transform.
    slice_pre : boolean, optional (True)
        Whether to compute the pre slice as part of the transform.
    slice_post : boolean, optional (True)
        Whether to compute the post slice as part of the transform.
    allow_scalars : boolean, optional (True)
        If true (default), will not make scalars into full transforms when
        not using slicing, since these work fine in the reference builder.
        If false, these scalars will be turned into scaled identity matrices.
    """
    # imported here to avoid circular imports
    # pylint: disable=import-outside-toplevel
    from nengo import Dense
    from nengo.transforms import NoTransform

    if isinstance(conn.transform, NoTransform):
        transform = np.array(1.0)
    elif not isinstance(conn.transform, Dense):
        raise ValidationError(
            "full_transform can only be applied to Dense transforms",
            attr="transform",
            obj=conn,
        )
    else:
        transform = conn.transform.init

    pre_slice = conn.pre_slice if slice_pre and conn.function is None else slice(
        None)
    post_slice = conn.post_slice if slice_post else slice(None)

    eq_none_slice = lambda s: isinstance(s, slice) and s == slice(None)
    if eq_none_slice(pre_slice) and eq_none_slice(post_slice):
        if transform.ndim == 2:
            # transform is already full, so return a copy
            return np.array(transform)
        elif transform.size == 1 and allow_scalars:
            if transform.ndim == 1:
                return np.array(transform[0])
            else:
                return np.array(transform)

    # Create the new transform matching the pre/post dimensions
    func_size = conn.function_info.size
    size_in = ((conn.pre_obj.size_out if func_size is None else func_size)
               if slice_pre else conn.size_mid)
    size_out = conn.post_obj.size_in if slice_post else conn.size_out
    new_transform = np.zeros((size_out, size_in))

    if transform.ndim < 2:
        new_transform[np.arange(size_out)[post_slice],
                      np.arange(size_in)[pre_slice]] = transform
        return new_transform
    elif transform.ndim == 2:
        repeated_inds = lambda x: (not isinstance(x, slice) and np.unique(x).
                                   size != len(x))
        if repeated_inds(pre_slice):
            raise NotImplementedError(
                "Input object selection has repeated indices")
        if repeated_inds(post_slice):
            raise NotImplementedError(
                "Output object selection has repeated indices")

        rows_transform = np.array(new_transform[post_slice])
        rows_transform[:, pre_slice] = transform
        new_transform[post_slice] = rows_transform
        # Note: the above is a little obscure, but we do it so that lists of
        #  indices can specify selections of rows and columns, rather than
        #  just individual items
        return new_transform
    else:
        raise ValidationError("Transforms with > 2 dims not supported",
                              attr="transform",
                              obj=conn)
Ejemplo n.º 28
0
    def __init__(
            self,
            input_vectors,
            output_vectors=None,  # noqa: C901
            n_neurons=50,
            threshold=0.3,
            input_scales=1.0,
            inhibitable=False,
            label=None,
            seed=None,
            add_to_container=None):
        super(AssociativeMemory, self).__init__(label, seed, add_to_container)

        # --- Put arguments in canonical form
        if output_vectors is None:
            # If output vocabulary is not specified, use input vector list
            # (i.e autoassociative memory)
            output_vectors = input_vectors
        if is_iterable(input_vectors):
            input_vectors = np.array(input_vectors, ndmin=2)
        if is_iterable(output_vectors):
            output_vectors = np.array(output_vectors, ndmin=2)

        if input_vectors.shape[0] == 0:
            raise ValidationError("Number of input vectors cannot be 0.",
                                  attr='input_vectors',
                                  obj=self)
        elif input_vectors.shape[0] != output_vectors.shape[0]:
            # Fail if number of input items and number of output items don't
            # match
            raise ValidationError(
                "Number of input vectors does not match number of output "
                "vectors. %d != %d" %
                (input_vectors.shape[0], output_vectors.shape[0]),
                attr='input_vectors',
                obj=self.__class__)

        # Handle possible different threshold / input_scale values for each
        # element in the associative memory
        if not is_iterable(threshold):
            threshold = threshold * np.ones(input_vectors.shape[0])
        else:
            threshold = np.array(threshold)

        # --- Check preconditions
        self.n_items = input_vectors.shape[0]
        if self.n_items != output_vectors.shape[0]:
            raise ValidationError(
                "Number of input vectors (%d) does not match number of output "
                "vectors (%d)" % (self.n_items, output_vectors.shape[0]),
                attr='input_vectors',
                obj=self)
        if threshold.shape[0] != self.n_items:
            raise ValidationError(
                "Number of threshold values (%d) does not match number of "
                "input vectors (%d)." % (threshold.shape[0], self.n_items),
                attr='threshold',
                obj=self)

        # --- Set parameters
        self.out_conns = []  # Used in `add_threshold_to_output`
        # Used in `add_threshold_to_output`
        self.default_vector_inhibit_conns = []
        self.thresh_ens = None  # Will hold thresholded outputs
        self.is_wta = False
        self._inhib_scale = 1.5

        # -- Create the core network
        with self, self.am_ens_config:
            self.bias_node = nengo.Node(output=1)
            self.elem_input = nengo.Node(size_in=self.n_items,
                                         label="element input")
            self.elem_output = nengo.Node(size_in=self.n_items,
                                          label="element output")
            self.utilities = self.elem_output

            self.am_ensembles = []
            label_prefix = "" if label is None else label + "_"
            filt_scale = 15
            filt_step_func = lambda x: filtered_step(x, 0.0, scale=filt_scale)
            for i in range(self.n_items):
                e = nengo.Ensemble(n_neurons, 1, label=label_prefix + str(i))
                self.am_ensembles.append(e)

                # Connect input and output nodes
                nengo.Connection(self.bias_node, e, transform=-threshold[i])
                nengo.Connection(self.elem_input[i], e)
                nengo.Connection(e,
                                 self.elem_output[i],
                                 function=filt_step_func)

            if inhibitable:
                # Input node for inhibitory gating signal (if enabled)
                self.inhibit = nengo.Node(size_in=1, label="inhibit")
                nengo.Connection(self.inhibit,
                                 self.elem_input,
                                 transform=-np.ones(
                                     (self.n_items, 1)) * self._inhib_scale)
                # Note: We can use a decoded connection here because all the
                # am_ensembles have [1] encoders
            else:
                self.inhibit = None
        self.add_input_mapping("input", input_vectors, input_scales)
        self.add_output_mapping("output", output_vectors)
Ejemplo n.º 29
0
    def coerce(self, node, func):
        """
        Performs validation on the function passed to TensorNode, and sets
        ``shape_out`` if necessary.

        Parameters
        ----------
        node : `.TensorNode`
            The node whose ``tensor_func`` parameter is being set.
        func : callable
            The function being assigned to the TensorNode.

        Returns
        -------
        output : callable
            The function after validation is applied.
        """

        output = super().coerce(node, func)

        if not callable(func):
            raise ValidationError(
                "TensorNode output must be a function or Keras Layer",
                attr=self.name,
                obj=node,
            )

        if node.shape_out is None:
            if isinstance(func, tf.keras.layers.Layer):
                # we can use Keras' static shape inference to get the
                # output shape, which avoids having to build/call the layer
                if node.pass_time:
                    input_spec = [tf.TensorSpec(())]
                else:
                    input_spec = []
                if node.shape_in is not None:
                    input_spec += [tf.TensorSpec((1,) + node.shape_in)]

                if len(input_spec) == 1:
                    input_spec = input_spec[0]

                ctx = contextlib.suppress() if eager_enabled() else context.eager_mode()

                try:
                    with ctx:
                        result = func.compute_output_signature(input_spec)
                except Exception as e:
                    raise ValidationError(
                        "Attempting to automatically determine TensorNode output shape "
                        "by calling Layer.compute_output_signature produced an error. "
                        "If you would like to avoid this step, try manually setting "
                        "`TensorNode(..., shape_out=x)`. The error is shown below:\n%s"
                        % repr(e),
                        attr=self.name,
                        obj=node,
                    )

            else:
                if node.pass_time:
                    args = (tf.constant(0.0),)
                else:
                    args = ()
                if node.shape_in is not None:
                    args += (tf.zeros((1,) + node.shape_in),)

                try:
                    result = func(*args)
                except Exception as e:
                    raise ValidationError(
                        "Attempting to automatically determine TensorNode output shape "
                        "by calling TensorNode function produced an error. "
                        "If you would like to avoid this step, try manually setting "
                        "`TensorNode(..., shape_out=x)`. The error is shown below:\n%s"
                        % e,
                        attr=self.name,
                        obj=node,
                    )

            validate_output(result)

            node.shape_out = result.shape[1:]

        return output
Ejemplo n.º 30
0
    def validate(self, instance, ndarray):  # noqa: C901
        if isinstance(ndarray, np.ndarray):
            ndarray = ndarray.view()
        else:
            try:
                ndarray = np.array(ndarray, dtype=np.float64)
            except (ValueError, TypeError):
                raise ValidationError(
                    "Must be a float NumPy array (got type %r)" %
                    ndarray.__class__.__name__,
                    attr=self.name,
                    obj=instance)

        if self.readonly:
            ndarray.setflags(write=False)

        if '...' in self.shape:
            # Convert '...' to the appropriate number of '*'s
            nfixed = len(self.shape) - 1
            n = ndarray.ndim - nfixed
            if n < 0:
                raise ValidationError(
                    "ndarray must be at least %dD (got %dD)" %
                    (nfixed, ndarray.ndim),
                    attr=self.name,
                    obj=instance)

            i = self.shape.index('...')
            shape = list(self.shape[:i]) + (['*'] * n)
            if i < len(self.shape) - 1:
                shape.extend(self.shape[i + 1:])
        else:
            shape = self.shape

        if ndarray.ndim != len(shape):
            raise ValidationError("ndarray must be %dD (got %dD)" %
                                  (len(shape), ndarray.ndim),
                                  attr=self.name,
                                  obj=instance)

        for i, attr in enumerate(shape):
            assert is_integer(attr) or is_string(attr), (
                "shape can only be an int or str representing an attribute")
            if attr == '*':
                continue

            desired = attr if is_integer(attr) else getattr(instance, attr)

            if not is_integer(desired):
                raise ValidationError(
                    "%s not yet initialized; cannot determine if shape is "
                    "correct. Consider using a distribution instead." % attr,
                    attr=self.name,
                    obj=instance)

            if ndarray.shape[i] != desired:
                raise ValidationError("shape[%d] should be %d (got %d)" %
                                      (i, desired, ndarray.shape[i]),
                                      attr=self.name,
                                      obj=instance)
        return ndarray