Ejemplo n.º 1
0
class PresentInput(Process):
    """Present a series of inputs, each for the same fixed length of time.

    Parameters
    ----------
    inputs : array_like
        Inputs to present, where each row is an input. Rows will be flattened.
    presentation_time : float
        Show each input for this amount of time (in seconds).
    """

    inputs = NdarrayParam("inputs", shape=("...", ))
    presentation_time = NumberParam("presentation_time", low=0, low_open=True)

    def __init__(self, inputs, presentation_time, **kwargs):
        self.inputs = inputs
        self.presentation_time = presentation_time
        super().__init__(default_size_in=0,
                         default_size_out=self.inputs[0].size,
                         **kwargs)

    def make_step(self, shape_in, shape_out, dt, rng, state):
        assert shape_in == (0, )
        assert shape_out == (self.inputs[0].size, )

        n = len(self.inputs)
        inputs = self.inputs.reshape(n, -1)
        presentation_time = float(self.presentation_time)

        def step_presentinput(t):
            i = int((t - dt) / presentation_time + 1e-7)
            return inputs[i % n]

        return step_presentinput
Ejemplo n.º 2
0
class PresentInput(Process):
    """Present a series of inputs, each for the same fixed length of time.

    Parameters
    ----------
    inputs : array_like
        Inputs to present, where each row is an input. Rows will be flattened.
    presentation_time : float
        Show each input for `presentation_time` seconds.
    """
    inputs = NdarrayParam(shape=('...',))
    presentation_time = NumberParam(low=0, low_open=True)

    def __init__(self, inputs, presentation_time):
        self.inputs = inputs
        self.presentation_time = presentation_time
        super(PresentInput, self).__init__(
            default_size_out=self.inputs[0].size)

    def make_step(self, size_in, size_out, dt, rng):
        assert size_in == 0
        assert size_out == self.inputs[0].size

        n = len(self.inputs)
        inputs = self.inputs.reshape(n, -1)
        presentation_time = float(self.presentation_time)

        def step_image_input(t):
            i = int(t / presentation_time + 1e-7)
            return inputs[i % n]

        return step_image_input
Ejemplo n.º 3
0
class Mixture(Distribution):
    distributions = TupleParam('distributions')
    p = NdarrayParam('p', shape='*', optional=True)

    def __init__(self, distributions, p=None):
        super(Mixture, self).__init__()

        self.distributions = distributions
        if not all(isinstance(d, Distribution) for d in self.distributions):
            raise ValueError(
                "All elements in `distributions` must be Distributions")

        if p is not None:
            p = np.array(p)
            if p.ndim != 1 or p.size != len(self.distributions):
                raise ValueError(
                    "`p` must be a vector with one element per distribution")
            if (p < 0).any():
                raise ValueError("`p` must be all non-negative")
            p /= p.sum()
        self.p = p

    def sample(self, n, d=None, rng=np.random):
        dd = 1 if d is None else d
        samples = np.zeros((n, dd))

        ndims = len(self.distributions)
        i = (rng.randint(ndims, size=n)
             if self.p is None else rng.choice(ndims, p=self.p, size=n))
        c = np.bincount(i, minlength=ndims)

        for k in c.nonzero()[0]:
            samples[i == k] = self.distributions[k].sample(c[k], d=dd, rng=rng)

        return samples[:, 0] if d is None else samples
Ejemplo n.º 4
0
class MultivariateGaussian(Distribution):
    mean = NdarrayParam('mean', shape='d')
    cov = NdarrayParam('cov', shape=('d', 'd'))

    def __init__(self, mean, cov):
        super(MultivariateGaussian, self).__init__()

        self.d = len(mean)
        self.mean = mean
        cov = np.asarray(cov)
        self.cov = (cov * np.eye(self.d) if cov.size == 1 else
                    np.diag(cov) if cov.ndim == 1 else cov)

    def sample(self, n, d=None, rng=np.random):
        assert d is None or d == self.d
        return rng.multivariate_normal(self.mean, self.cov, size=n)
Ejemplo n.º 5
0
class PDF(Distribution):
    """An arbitrary distribution from a PDF.

    Parameters
    ----------
    x : vector_like (n,)
        Values of the points to sample from (interpolated).
    p : vector_like (n,)
        Probabilities of the ``x`` points.
    """

    x = NdarrayParam('x', shape='*')
    p = NdarrayParam('p', shape='*')

    def __init__(self, x, p):
        super(PDF, self).__init__()

        psum = np.sum(p)
        if np.abs(psum - 1) > 1e-8:
            raise ValidationError("PDF must sum to one (sums to %f)" % psum,
                                  attr='p',
                                  obj=self)

        self.x = x
        self.p = p
        if len(self.x) != len(self.p):
            raise ValidationError("`x` and `p` must be the same length",
                                  attr='p',
                                  obj=self)

        # make cumsum = [0] + cumsum, cdf = 0.5 * (cumsum[:-1] + cumsum[1:])
        cumsum = np.cumsum(p)
        cumsum *= 0.5
        cumsum[1:] = cumsum[:-1] + cumsum[1:]
        self.cdf = cumsum

    def __repr__(self):
        return "PDF(x=%r, p=%r)" % (self.x, self.p)

    def sample(self, n, d=None, rng=np.random):
        shape = self._sample_shape(n, d)
        return np.interp(rng.uniform(size=shape), self.cdf, self.x)
Ejemplo n.º 6
0
class Samples(Distribution):
    """A set of samples.

    This class is a subclass of `.Distribution` so that it can be used in any
    situation that calls for a  `.Distribution`. However, the call to
    `.Distribution.sample` must match the dimensions of the samples or
    a `.ValidationError` will be raised.

    Parameters
    ----------
    samples : (n, d) array_like
        ``n`` and ``d`` must match what is eventually passed to
         `.Distribution.sample`.
    """

    samples = NdarrayParam("samples", shape=("...", ))

    def __init__(self, samples):
        super().__init__()
        self.samples = samples

    def sample(self, n, d=None, rng=np.random):
        samples = np.array(self.samples)
        shape = (n, ) if d is None else (n, d)

        if d is None:
            samples = samples.squeeze()

        if d is not None and samples.ndim == 1:
            samples = samples[..., np.newaxis]

        if samples.shape[0] != shape[0]:
            raise ValidationError(
                "Wrong number of samples requested; got "
                f"{n}, should be {samples.shape[0]}",
                attr="samples",
                obj=self,
            )
        elif d is None and len(samples.shape) != 1:
            raise ValidationError(
                "Wrong sample dimensionality requested; got "
                f"'None', should be {samples.shape[1]}",
                attr="samples",
                obj=self,
            )
        elif d is not None and samples.shape[1] != shape[1]:
            raise ValidationError(
                "Wrong sample dimensionality requested; got "
                f"{d}, should be {samples.shape[1]}",
                attr="samples",
                obj=self,
            )

        return samples
Ejemplo n.º 7
0
class BlockConjgrad(LeastSquaresSolver):
    """Solve a multiple-RHS least-squares system using block conj. gradient."""

    tol = NumberParam('tol', low=0)
    X0 = NdarrayParam('X0', shape=('*', '*'), optional=True)

    def __init__(self, tol=1e-2, X0=None):
        super(BlockConjgrad, self).__init__()
        self.tol = tol
        self.X0 = X0

    def __call__(self, A, Y, sigma, rng=None):
        Y, m, n, d, matrix_in = format_system(A, Y)
        sigma = np.asarray(sigma, dtype='float')
        sigma = sigma.reshape(sigma.size, 1)

        X = np.zeros((n, d)) if self.X0 is None else np.array(self.X0)
        if X.shape != (n, d):
            raise ValidationError("Must be shape %s, got %s" %
                                  ((n, d), X.shape),
                                  attr='X0',
                                  obj=self)

        damp = m * sigma**2
        rtol = self.tol * np.sqrt(m)
        G = lambda x: np.dot(A.T, np.dot(A, x)) + damp * x
        B = np.dot(A.T, Y)

        # --- conjugate gradient
        R = B - G(X)
        P = np.array(R)
        Rsold = np.dot(R.T, R)
        AP = np.zeros((n, d))

        maxiters = int(n / d)
        for i in range(maxiters):
            AP = G(P)
            alpha = np.linalg.solve(np.dot(P.T, AP), Rsold)
            X += np.dot(P, alpha)
            R -= np.dot(AP, alpha)

            Rsnew = np.dot(R.T, R)
            if (np.diag(Rsnew) < rtol**2).all():
                break

            beta = np.linalg.solve(Rsold, Rsnew)
            P = R + np.dot(P, beta)
            Rsold = Rsnew

        info = {'rmses': rmses(A, X, Y), 'iterations': i + 1}
        return X if matrix_in else X.ravel(), info
Ejemplo n.º 8
0
class PresentInputWithPause(Process):
    """Present a series of inputs, each for the same fixed length of time.

    Parameters
    ----------
    inputs : array_like
        Inputs to present, where each row is an input. Rows will be flattened.
    presentation_time : float
        Show each input for this amount of time (in seconds).
    pause_time : float
        Pause time after each input (in seconds).
    """

    inputs = NdarrayParam("inputs", shape=("...", ))
    presentation_time = NumberParam("presentation_time", low=0, low_open=True)
    pause_time = NumberParam("pause_time", low=0, low_open=True)

    def __init__(self, inputs, presentation_time, pause_time, pause_value,
                 **kwargs):
        self.inputs = inputs
        self.presentation_time = presentation_time
        self.pause_time = pause_time
        self.pause_value = pause_value
        self.localT = 0
        self.index = 0
        super().__init__(default_size_in=0,
                         default_size_out=self.inputs[0].size,
                         **kwargs)

    def make_step(self, shape_in, shape_out, dt, rng, state):
        assert shape_in == (0, )
        assert shape_out == (self.inputs[0].size, )
        n = len(self.inputs)
        inputs = self.inputs.reshape(n, -1)
        presentation_time = float(self.presentation_time)
        pause_time = float(self.pause_time)
        self.localT = round((dt if self.localT == 0 else self.localT), 2)

        def step_presentinput(t):

            t = round(t, 6)

            total_time = presentation_time + pause_time
            i = int(t / total_time)
            ti = t % total_time
            return np.ones_like(
                inputs[0]
            ) * self.pause_value if ti > presentation_time else inputs[i % n]

        return step_presentinput
Ejemplo n.º 9
0
class NoSolver(Solver):
    """Manually pass in weights, bypassing the decoder solver.

    Parameters
    ----------
    values : (n_neurons, size_out) array_like, optional
        The array of decoders to use.
        ``size_out`` is the dimensionality of the decoded signal (determined
        by the connection function).
        If ``None``, which is the default, the solver will return an
        appropriately sized array of zeros.
    weights : bool, optional
        If False, connection will use factored weights (decoders from this
        solver, transform, and encoders).
        If True, connection will use a full weight matrix (created by
        linearly combining decoder, transform, and encoders).

    Attributes
    ----------
    values : (n_neurons, size_out) array_like, optional
        The array of decoders to use.
        ``size_out`` is the dimensionality of the decoded signal (determined
        by the connection function).
        If ``None``, which is the default, the solver will return an
        appropriately sized array of zeros.
    weights : bool, optional
        If False, connection will use factored weights (decoders from this
        solver, transform, and encoders).
        If True, connection will use a full weight matrix (created by
        linearly combining decoder, transform, and encoders).
    """

    compositional = True

    values = NdarrayParam("values", optional=True, shape=("*", "*"))

    def __init__(self, values=None, weights=False):
        super().__init__(weights=weights)
        self.values = values

    def __call__(self, A, Y, rng=None):
        if self.values is None:
            n_neurons = np.asarray(A).shape[1]

            return np.zeros((n_neurons, np.asarray(Y).shape[1])), {}

        return self.values, {}
Ejemplo n.º 10
0
class NoSolver(Solver):
    """Manually pass in weights, bypassing the decoder solver.

    Parameters
    ----------
    values : (n_neurons, n_weights) array_like, optional (Default: None)
        The array of decoders or weights to use.
        If ``weights`` is ``False``, ``n_weights`` is the expected
        output dimensionality. If ``weights`` is ``True``,
        ``n_weights`` is the number of neurons in the post ensemble.
        If ``None``, which is the default, the solver will return an
        appropriately sized array of zeros.
    weights : bool, optional (Default: False)
        If False, ``values`` is interpreted as decoders.
        If True, ``values`` is interpreted as weights.

    Attributes
    ----------
    values : (n_neurons, n_weights) array_like, optional (Default: None)
        The array of decoders or weights to use.
        If ``weights`` is ``False``, ``n_weights`` is the expected
        output dimensionality. If ``weights`` is ``True``,
        ``n_weights`` is the number of neurons in the post ensemble.
        If ``None``, which is the default, the solver will return an
        appropriately sized array of zeros.
    weights : bool, optional (Default: False)
        If False, ``values`` is interpreted as decoders.
        If True, ``values`` is interpreted as weights.
    """

    values = NdarrayParam("values", optional=True, shape=("*", "*"))

    def __init__(self, values=None, weights=False):
        super(NoSolver, self).__init__(weights=weights)
        self.values = values

    def __call__(self, A, Y, rng=None, E=None):
        if self.values is None:
            n_neurons = np.asarray(A).shape[1]
            if self.weights:
                return np.zeros((n_neurons, np.asarray(E).shape[1])), {}
            else:
                return np.zeros((n_neurons, np.asarray(Y).shape[1])), {}

        return self.values, {}
Ejemplo n.º 11
0
def test_copy_instance_params():
    with nengo.Network() as orig_net:
        orig_net.config[nengo.Ensemble].set_param(
            "test", IntParam("test", optional=True))
        orig_net.config[nengo.Ensemble].set_param(
            "test2", NdarrayParam("test2", optional=True))

        orig_ens = nengo.Ensemble(10, 1)
        orig_net.config[orig_ens].test = 42
        orig_net.config[orig_ens].test2 = np.array([49])

    # test copy function
    copy_net = orig_net.copy()
    copy_ens = copy_net.ensembles[0]
    assert copy_net.config[copy_ens].test == 42
    assert np.array_equal(copy_net.config[copy_ens].test2, np.array([49]))

    # test copy via pickle
    copy_net = pickle.loads(pickle.dumps(orig_net))
    copy_ens = copy_net.ensembles[0]
    assert copy_net.config[copy_ens].test == 42
    assert np.array_equal(copy_net.config[copy_ens].test2, np.array([49]))
Ejemplo n.º 12
0
class Tile(Distribution):
    """Choose values in order from an array

    This distribution is not random, but rather tiles an array to be a
    particular size. This is useful for example if you want to pass an array
    for a neuron parameter, but are not sure how many neurons there will be.

    Parameters
    ----------
    values : array_like
        The values to tile.
    """

    values = NdarrayParam('values', shape=('*', '*'))

    def __init__(self, values):
        super(Tile, self).__init__()

        values = np.asarray(values)
        self.values = values.reshape(-1, 1) if values.ndim < 2 else values

    def __repr__(self):
        return "Tile(values=%s)" % (self.values)

    def sample(self, n, d=None, rng=np.random):
        out1 = d is None
        d = 1 if d is None else d
        nv, dv = self.values.shape

        if n > nv or d > dv:
            values = np.tile(
                self.values,
                (int(np.ceil(float(n) / nv)), int(np.ceil(float(d) / dv))))
        else:
            values = self.values

        values = values[:n, :d]
        return values[:, 0] if out1 else values
Ejemplo n.º 13
0
class Conv2d(Process):
    """Perform 2-D (image) convolution on an input.

    Parameters
    ----------
    shape_in : 3-tuple (n_channels, height, width)
        Shape of the input images: channels, height, width.
    filters : array_like (n_filters, n_channels, f_height, f_width)
        Static filters to convolve with the input. Shape is number of filters,
        number of input channels, filter height, and filter width. Shape can
        also be (n_filters, height, width, n_channels, f_height, f_width)
        to apply different filters at each point in the image, where 'height'
        and 'width' are the input image height and width.
    biases : array_like (1,) or (n_filters,) or (n_filters, height, width)
        Biases to add to outputs. Can have one bias across the entire output
        space, one bias per filter, or a unique bias for each output pixel.
    strides : 2-tuple (vertical, horizontal) or int
        Spacing between filter placements. If an integer
        is provided, the same spacing is used in both dimensions.
    padding : 2-tuple (vertical, horizontal) or int
        Amount of zero-padding around the outside of the input image. Padding
        is applied to both sides, e.g. ``padding=(1, 0)`` will add one pixel
        of padding to the top and bottom, and none to the left and right.
    """

    shape_in = ShapeParam('shape_in', length=3, low=1)
    shape_out = ShapeParam('shape_out', length=3, low=1)
    strides = ShapeParam('strides', length=2, low=1)
    padding = ShapeParam('padding', length=2)
    filters = NdarrayParam('filters', shape=('...', ))
    biases = NdarrayParam('biases', shape=('...', ), optional=True)

    def __init__(self,
                 shape_in,
                 filters,
                 biases=None,
                 strides=1,
                 padding=0):  # noqa: C901
        self.shape_in = shape_in
        self.filters = filters
        if self.filters.ndim not in [4, 6]:
            raise ValueError(
                "`filters` must have four or six dimensions "
                "(filters, [height, width,] channels, f_height, f_width)")
        if self.filters.shape[-3] != self.shape_in[0]:
            raise ValueError(
                "Filter channels (%d) and input channels (%d) must match" %
                (self.filters.shape[-3], self.shape_in[0]))
        if not all(s % 2 == 1 for s in self.filters.shape[-2:]):
            raise ValueError("Filter shapes must be odd (got %r)" %
                             (self.filters.shape[-2:], ))

        self.strides = strides if is_iterable(strides) else [strides] * 2
        self.padding = padding if is_iterable(padding) else [padding] * 2

        nf = self.filters.shape[0]
        nxi, nxj = self.shape_in[1:]
        si, sj = self.filters.shape[-2:]
        pi, pj = self.padding
        sti, stj = self.strides
        nyi = 1 + max(int(np.ceil(float(2 * pi + nxi - si) / sti)), 0)
        nyj = 1 + max(int(np.ceil(float(2 * pj + nxj - sj) / stj)), 0)
        self.shape_out = (nf, nyi, nyj)
        if self.filters.ndim == 6 and self.filters.shape[1:3] != (nyi, nyj):
            raise ValueError("Number of local filters %r must match out shape "
                             "%r" % (self.filters.shape[1:3], (nyi, nyj)))

        self.biases = biases if biases is not None else None
        if self.biases is not None:
            if self.biases.size == 1:
                self.biases.shape = (1, 1, 1)
            elif self.biases.size == np.prod(self.shape_out):
                self.biases.shape = self.shape_out
            elif self.biases.size == self.shape_out[0]:
                self.biases.shape = (self.shape_out[0], 1, 1)
            elif self.biases.size == np.prod(self.shape_out[1:]):
                self.biases.shape = (1, ) + self.shape_out[1:]
            else:
                raise ValueError(
                    "Biases size (%d) does not match output shape %s" %
                    (self.biases.size, self.shape_out))

        super(Conv2d, self).__init__(default_size_in=np.prod(self.shape_in),
                                     default_size_out=np.prod(self.shape_out))

    def make_step(self, shape_in, shape_out, dt, rng):
        assert np.prod(shape_in) == np.prod(self.shape_in)
        assert np.prod(shape_out) == np.prod(self.shape_out)
        shape_in, shape_out = self.shape_in, self.shape_out

        filters = self.filters
        local_filters = filters.ndim == 6
        biases = self.biases

        nxi, nxj = shape_in[-2:]
        nyi, nyj = shape_out[-2:]
        nf = filters.shape[0]
        si, sj = filters.shape[-2:]
        pi, pj = self.padding
        sti, stj = self.strides

        def step_conv2d(t, x):
            x = x.reshape(shape_in)
            y = np.zeros(shape_out)

            for i in range(nyi):
                for j in range(nyj):
                    i0 = i * sti - pi
                    j0 = j * stj - pj
                    i1, j1 = i0 + si, j0 + sj
                    sli = slice(max(-i0, 0), min(nxi + si - i1, si))
                    slj = slice(max(-j0, 0), min(nxj + sj - j1, sj))
                    w = (filters[:, i, j, :, sli,
                                 slj] if local_filters else filters[:, :, sli,
                                                                    slj])
                    xij = x[:,
                            max(i0, 0):min(i1, nxi),
                            max(j0, 0):min(j1, nxj)]
                    y[:, i, j] = np.dot(w.reshape(nf, -1), xij.ravel())

            if biases is not None:
                y += biases

            return y.ravel()

        return step_conv2d
Ejemplo n.º 14
0
class Conjgrad(LeastSquaresSolver):
    """Solve a least-squares system using conjugate gradient."""

    tol = NumberParam('tol', low=0)
    maxiters = IntParam('maxiters', low=1, optional=True)
    X0 = NdarrayParam('X0', shape=('*', '*'), optional=True)

    def __init__(self, tol=1e-2, maxiters=None, X0=None):
        super(Conjgrad, self).__init__()
        self.tol = tol
        self.maxiters = maxiters
        self.X0 = X0

    def __call__(self, A, Y, sigma, rng=None):
        Y, m, n, d, matrix_in = format_system(A, Y)
        X = np.zeros((n, d)) if self.X0 is None else np.array(self.X0)
        if X.shape != (n, d):
            raise ValidationError("Must be shape %s, got %s" %
                                  ((n, d), X.shape),
                                  attr='X0',
                                  obj=self)

        damp = m * sigma**2
        rtol = self.tol * np.sqrt(m)
        G = lambda x: np.dot(A.T, np.dot(A, x)) + damp * x
        B = np.dot(A.T, Y)

        iters = -np.ones(d, dtype='int')
        for i in range(d):
            X[:, i], iters[i] = self._conjgrad_iters(G,
                                                     B[:, i],
                                                     X[:, i],
                                                     maxiters=self.maxiters,
                                                     rtol=rtol)

        info = {'rmses': rmses(A, X, Y), 'iterations': iters}
        return X if matrix_in else X.ravel(), info

    @staticmethod
    def _conjgrad_iters(calcAx, b, x, maxiters=None, rtol=1e-6):
        """Solve a single-RHS linear system using conjugate gradient."""

        if maxiters is None:
            maxiters = b.shape[0]

        r = b - calcAx(x)
        p = r.copy()
        rsold = np.dot(r, r)

        for i in range(maxiters):
            Ap = calcAx(p)
            alpha = rsold / np.dot(p, Ap)
            x += alpha * p
            r -= alpha * Ap

            rsnew = np.dot(r, r)
            beta = rsnew / rsold

            if np.sqrt(rsnew) < rtol:
                break

            if beta < 1e-12:  # no perceptible change in p
                break

            # p = r + beta*p
            p *= beta
            p += r
            rsold = rsnew

        return x, i + 1
Ejemplo n.º 15
0
 def validate(self, instance, distorarray):
     if isinstance(distorarray, Distribution):
         Parameter.validate(self, instance, distorarray)
         return distorarray
     else:
         return NdarrayParam.validate(self, instance, distorarray)
Ejemplo n.º 16
0
class Conv2(Process):
    """Perform 2-D (image) convolution on an input.

    Parameters
    ----------
    filters : array_like (n_filters, n_channels, f_height, f_width)
        Static filters to convolve with the input. Shape is number of filters,
        number of input channels, filter height, and filter width. Shape can
        also be (n_filters, height, width, n_channels, f_height, f_width)
        to apply different filters at each point in the image, where 'height'
        and 'width' are the input image height and width.
    shape_in : 3-tuple (n_channels, height, width)
        Shape of the input images: channels, height, width.
    """

    shape_in = TupleParam(length=3)
    shape_out = TupleParam(length=3)
    filters = NdarrayParam(shape=('...',))
    biases = NdarrayParam(shape=('...',), optional=True)

    def __init__(self, shape_in, filters, biases=None):
        self.shape_in = tuple(shape_in)
        if len(self.shape_in) != 3:
            raise ValueError("`shape_in` must have three dimensions "
                             "(channels, height, width)")

        self.filters = filters
        self.shape_out = (self.filters.shape[0],) + self.shape_in[1:]
        if len(self.filters.shape) not in [4, 6]:
            raise ValueError(
                "`filters` must have four or six dimensions "
                "(filters, [height, width,] channels, f_height, f_width)")
        if self.filters.shape[-3] != self.shape_in[0]:
            raise ValueError(
                "Filter channels (%d) and input channels (%d) must match"
                % (self.filters.shape[-3], self.shape_in[0]))

        self.biases = biases if biases is not None else None
        if self.biases is not None:
            if self.biases.size == 1:
                self.biases.shape = (1, 1, 1)
            elif self.biases.size == np.prod(self.shape_out):
                self.biases.shape = self.shape_out
            elif self.biases.size == self.shape_out[0]:
                self.biases.shape = (self.shape_out[0], 1, 1)
            elif self.biases.size == np.prod(self.shape_out[1:]):
                self.biases.shape = (1,) + self.shape_out[1:]
            else:
                raise ValueError(
                    "Biases size (%d) does not match output shape %s"
                    % (self.biases.size, self.shape_out))

        super(Conv2, self).__init__(
            default_size_in=np.prod(self.shape_in),
            default_size_out=np.prod(self.shape_out))

    def make_step(self, size_in, size_out, dt, rng):
        assert size_in == np.prod(self.shape_in)
        assert size_out == np.prod(self.shape_out)

        filters = self.filters
        local_filters = filters.ndim == 6
        biases = self.biases
        shape_in = self.shape_in
        shape_out = self.shape_out

        def step_conv2(t, x):
            x = x.reshape(shape_in)
            ni, nj = shape_in[-2:]
            f = filters.shape[0]
            si, sj = filters.shape[-2:]
            si2 = (si - 1) / 2
            sj2 = (sj - 1) / 2

            y = np.zeros(shape_out)
            for i in range(ni):
                for j in range(nj):
                    i0, i1 = i - si2, i + si2 + 1
                    j0, j1 = j - sj2, j + sj2 + 1
                    sli = slice(max(-i0, 0), min(ni + si - i1, si))
                    slj = slice(max(-j0, 0), min(nj + sj - j1, sj))
                    w = (filters[:, i, j, :, sli, slj] if local_filters else
                         filters[:, :, sli, slj])
                    xij = x[:, max(i0, 0):min(i1, ni), max(j0, 0):min(j1, nj)]
                    y[:, i, j] = np.dot(xij.ravel(), w.reshape(f, -1).T)

            if biases is not None:
                y += biases

            return y.ravel()

        return step_conv2
Ejemplo n.º 17
0
class Choice(Distribution):
    """Discrete distribution across a set of possible values.

    The same as Numpy random's `~numpy.random.RandomState.choice`,
    except can take vector or matrix values for the choices.

    Parameters
    ----------
    options : (N, ...) array_like
        The options (choices) to choose between. The choice is always done
        along the first axis, so if ``options`` is a matrix, the options are
        the rows of that matrix.
    weights : (N,) array_like, optional
        Weights controlling the probability of selecting each option. Will
        automatically be normalized. If None, weights be uniformly distributed.
    """

    options = NdarrayParam("options", shape=("*", "..."))
    weights = NdarrayParam("weights", shape=("*"), optional=True)

    def __init__(self, options, weights=None):
        super().__init__()
        self.options = options
        self.weights = weights

        weights = np.ones(len(
            self.options)) if self.weights is None else self.weights
        if len(weights) != len(self.options):
            raise ValidationError(
                f"Number of weights ({len(weights)}) must match "
                f"number of options ({len(self.options)})",
                attr="weights",
                obj=self,
            )
        if not all(weights >= 0):
            raise ValidationError("All weights must be non-negative",
                                  attr="weights",
                                  obj=self)
        total = float(weights.sum())
        if total <= 0:
            raise ValidationError(
                f"Sum of weights must be positive (got {total:f})",
                attr="weights",
                obj=self,
            )
        self.p = weights / total

    @property
    def dimensions(self):
        return 0 if self.options.ndim == 1 else np.prod(self.options.shape[1:])

    def sample(self, n, d=None, rng=np.random):
        if d is not None and self.dimensions != d:
            raise ValidationError(
                f"Options must be of dimensionality {d} (got {self.dimensions})",
                attr="options",
                obj=self,
            )

        i = np.searchsorted(np.cumsum(self.p), rng.rand(n))
        return self.options[i]
Ejemplo n.º 18
0
class LinearFilter(Synapse):
    """General linear time-invariant (LTI) system synapse.

    This class can be used to implement any linear filter, given the
    filter's transfer function. [1]_

    Parameters
    ----------
    num : array_like
        Numerator coefficients of transfer function.
    den : array_like
        Denominator coefficients of transfer function.
    analog : boolean, optional
        Whether the synapse coefficients are analog (i.e. continuous-time),
        or discrete. Analog coefficients will be converted to discrete for
        simulation using the simulator ``dt``.
    method : string
        The method to use for discretization (if ``analog`` is True). See
        `scipy.signal.cont2discrete` for information about the options.

        .. versionadded:: 3.0.0

    Attributes
    ----------
    analog : boolean
        Whether the synapse coefficients are analog (i.e. continuous-time),
        or discrete. Analog coefficients will be converted to discrete for
        simulation using the simulator ``dt``.
    den : ndarray
        Denominator coefficients of transfer function.
    num : ndarray
        Numerator coefficients of transfer function.
    method : string
        The method to use for discretization (if ``analog`` is True). See
        `scipy.signal.cont2discrete` for information about the options.

    References
    ----------
    .. [1] https://en.wikipedia.org/wiki/Filter_%28signal_processing%29
    """

    num = NdarrayParam("num", shape="*")
    den = NdarrayParam("den", shape="*")
    analog = BoolParam("analog")
    method = EnumParam(
        "method", values=("gbt", "bilinear", "euler", "backward_diff", "zoh")
    )

    def __init__(self, num, den, analog=True, method="zoh", **kwargs):
        super().__init__(**kwargs)
        self.num = num
        self.den = den
        self.analog = analog
        self.method = method

    def combine(self, obj):
        """Combine in series with another LinearFilter."""
        if not isinstance(obj, LinearFilter):
            raise ValidationError(
                "Can only combine with other LinearFilters", attr="obj"
            )
        if self.analog != obj.analog:
            raise ValidationError(
                "Cannot combine analog and digital filters", attr="obj"
            )
        num = np.polymul(self.num, obj.num)
        den = np.polymul(self.den, obj.den)
        return LinearFilter(
            num,
            den,
            analog=self.analog,
            default_size_in=self.default_size_in,
            default_size_out=self.default_size_out,
            default_dt=self.default_dt,
            seed=self.seed,
        )

    def evaluate(self, frequencies):
        """Evaluate the transfer function at the given frequencies.

        Examples
        --------
        Using the ``evaluate`` function to make a Bode plot:

        .. testcode::

           import matplotlib.pyplot as plt

           synapse = nengo.synapses.LinearFilter([1], [0.02, 1])
           f = np.logspace(-1, 3, 100)
           y = synapse.evaluate(f)
           plt.subplot(211); plt.semilogx(f, 20*np.log10(np.abs(y)))
           plt.xlabel('frequency [Hz]'); plt.ylabel('magnitude [dB]')
           plt.subplot(212); plt.semilogx(f, np.angle(y))
           plt.xlabel('frequency [Hz]'); plt.ylabel('phase [radians]')
        """
        frequencies = 2.0j * np.pi * frequencies
        w = frequencies if self.analog else np.exp(frequencies)
        y = np.polyval(self.num, w) / np.polyval(self.den, w)
        return y

    def _get_ss(self, dt):
        A, B, C, D = tf2ss(self.num, self.den)

        # discretize (if len(A) == 0, filter is stateless and already discrete)
        if self.analog and len(A) > 0:
            A, B, C, D, _ = cont2discrete((A, B, C, D), dt, method=self.method)

        return A, B, C, D

    def make_state(self, shape_in, shape_out, dt, dtype=None, y0=0):
        assert shape_in == shape_out

        dtype = rc.float_dtype if dtype is None else np.dtype(dtype)
        if dtype.kind != "f":
            raise ValidationError(
                f"Only float data types are supported (got {dtype}). Please cast "
                "your data to a float type.",
                attr="dtype",
                obj=self,
            )

        A, B, C, D = self._get_ss(dt)

        # create state memory variable X
        X = np.zeros((A.shape[0],) + shape_out, dtype=dtype)

        # initialize X using y0 as steady-state output
        y0 = np.array(y0, copy=False, ndmin=2)
        if (y0 == 0).all():
            # just leave X as zeros in this case, so that this value works
            # for unstable systems
            pass
        elif LinearFilter.OneX.check(A, B, C, D, X):
            # OneX combines B and C into one scaling value `b`
            b = B.item() * C.item()
            X[:] = (b / (1 - A.item())) * y0
        else:
            # Solve for u0 (input) given y0 (output), then X given u0
            assert B.ndim == 1 or B.ndim == 2 and B.shape[1] == 1
            y0 = np.array(y0, copy=False, ndmin=2)
            IAB = np.linalg.solve(np.eye(len(A)) - A, B)
            Q = C.dot(IAB) + D  # multiplier from input to output (DC gain)
            assert Q.size == 1
            if np.abs(Q.item()) > 1e-8:
                u0 = y0 / Q.item()
                X[:] = IAB.dot(u0)
            else:
                raise ValidationError(
                    "Cannot solve for state if DC gain is zero. Please set `y0=0`.",
                    "y0",
                    obj=self,
                )

        return {"X": X}

    def make_step(self, shape_in, shape_out, dt, rng, state):
        """Returns a `.Step` instance that implements the linear filter."""
        assert shape_in == shape_out
        assert state is not None

        A, B, C, D = self._get_ss(dt)
        X = state["X"]

        if LinearFilter.NoX.check(A, B, C, D, X):
            return LinearFilter.NoX(A, B, C, D, X)
        if LinearFilter.OneXScalar.check(A, B, C, D, X):
            return LinearFilter.OneXScalar(A, B, C, D, X)
        elif LinearFilter.OneX.check(A, B, C, D, X):
            return LinearFilter.OneX(A, B, C, D, X)
        elif LinearFilter.NoD.check(A, B, C, D, X):
            return LinearFilter.NoD(A, B, C, D, X)
        else:
            assert LinearFilter.General.check(A, B, C, D, X)
            return LinearFilter.General(A, B, C, D, X)

    class Step:
        """Abstract base class for LTI filtering step functions."""

        def __init__(self, A, B, C, D, X):
            if not self.check(A, B, C, D, X):
                raise ValidationError(
                    "Matrices do not meet the requirements for this Step",
                    attr="A,B,C,D,X",
                    obj=self,
                )
            self.A = A
            self.B = B
            self.C = C
            self.D = D
            self.X = X

        def __call__(self, t, signal):
            raise NotImplementedError("Step object must implement __call__")

        @classmethod
        def check(cls, A, B, C, D, X):
            if A.size == 0:
                return X.size == B.size == C.size == 0 and D.size == 1
            else:
                return (
                    A.shape[0] == A.shape[1] == B.shape[0] == C.shape[1]
                    and A.shape[0] == X.shape[0]
                    and C.shape[0] == B.shape[1] == 1
                    and D.size == 1
                )

    class NoX(Step):
        """Step for system with no state, only passthrough matrix (D)."""

        def __init__(self, A, B, C, D, X):
            super().__init__(A, B, C, D, X)
            self.d = D.item()

        def __call__(self, t, signal):
            return self.d * signal

        @classmethod
        def check(cls, A, B, C, D, X):
            return super().check(A, B, C, D, X) and A.size == 0

    class OneX(Step):
        """Step for systems with one state element and no passthrough (D)."""

        def __init__(self, A, B, C, D, X):
            super().__init__(A, B, C, D, X)
            self.a = A.item()
            self.b = C.item() * B.item()

        def __call__(self, t, signal):
            self.X *= self.a
            self.X += self.b * signal
            return self.X[0]

        @classmethod
        def check(cls, A, B, C, D, X):
            return super().check(A, B, C, D, X) and (len(A) == 1 and (D == 0).all())

    class OneXScalar(OneX):
        """Step for systems with one state element, no passthrough, and a size-1 input.

        Using the builtin float math improves performance.
        """

        def __call__(self, t, signal):
            self.X[:] = self.a * self.X.item() + self.b * signal.item()
            return self.X[0]

        @classmethod
        def check(cls, A, B, C, D, X):
            return super().check(A, B, C, D, X) and X.size == 1

    class NoD(Step):
        """Step for systems with no passthrough matrix (D).

        Implements::

            x[t] = A x[t-1] + B u[t]
            y[t] = C x[t]

        Note how the input has been advanced one step as compared with the
        General system below, to remove the unnecessary delay.
        """

        def __call__(self, t, signal):
            self.X[:] = np.dot(self.A, self.X) + self.B * signal
            return np.dot(self.C, self.X)[0]

        @classmethod
        def check(cls, A, B, C, D, X):
            return super().check(A, B, C, D, X) and (len(A) >= 1 and (D == 0).all())

    class General(Step):
        """Step for any LTI system with at least one state element (X).

        Implements::

            x[t+1] = A x[t] + B u[t]
            y[t] = C x[t] + D u[t]

        Use ``NoX`` for systems with no state elements.
        """

        def __call__(self, t, signal):
            Y = np.dot(self.C, self.X)[0] + self.D * signal
            self.X[:] = np.dot(self.A, self.X) + self.B * signal
            return Y

        @classmethod
        def check(cls, A, B, C, D, X):
            return super().check(A, B, C, D, X) and len(A) >= 1
Ejemplo n.º 19
0
class LinearFilter(Synapse):
    """General linear time-invariant (LTI) system synapse.

    This class can be used to implement any linear filter, given the
    filter's transfer function. [1]_


    Parameters
    ----------
    num : array_like
        Numerator coefficients of continuous-time transfer function.
    den : array_like
        Denominator coefficients of continuous-time transfer function.

    References
    ----------
    .. [1] http://en.wikipedia.org/wiki/Filter_%28signal_processing%29
    """

    num = NdarrayParam('num', shape='*')
    den = NdarrayParam('den', shape='*')
    analog = BoolParam('analog')

    def __init__(self, num, den, analog=True):
        super(LinearFilter, self).__init__()
        self.num = num
        self.den = den
        self.analog = analog

    def __repr__(self):
        return "%s(%s, %s, analog=%r)" % (self.__class__.__name__, self.num,
                                          self.den, self.analog)

    def make_step(self, dt, output, method='zoh'):
        num, den = self.num, self.den
        if self.analog:
            num, den, _ = cont2discrete((num, den), dt, method=method)
            num = num.flatten()

        if den[0] != 1.:
            raise ValidationError("First element of the denominator must be 1",
                                  attr='den',
                                  obj=self)
        num = num[1:] if num[0] == 0 else num
        den = den[1:]  # drop first element (equal to 1)

        if len(num) == 1 and len(den) == 0:
            return LinearFilter.NoDen(num, den, output)
        elif len(num) == 1 and len(den) == 1:
            return LinearFilter.Simple(num, den, output)
        return LinearFilter.General(num, den, output)

    class Step(object):
        """Abstract base class for LTI filtering step functions."""
        def __init__(self, num, den, output):
            self.num = num
            self.den = den
            self.output = output

        def __call__(self, signal):
            raise NotImplementedError("Step functions must implement __call__")

    class NoDen(Step):
        """An LTI step function for transfer functions with no denominator.

        This step function should be much faster than the equivalent general
        step function.
        """
        def __init__(self, num, den, output):
            if len(den) > 0:
                raise ValidationError("'den' must be empty (got length %d)" %
                                      len(den),
                                      attr='den',
                                      obj=self)
            super(LinearFilter.NoDen, self).__init__(num, den, output)
            self.b = num[0]

        def __call__(self, signal):
            self.output[...] = self.b * signal

    class Simple(Step):
        """An LTI step function for transfer functions with one num and den.

        This step function should be much faster than the equivalent general
        step function.
        """
        def __init__(self, num, den, output):
            if len(num) != 1:
                raise ValidationError("'num' must be length 1 (got %d)" %
                                      len(num),
                                      attr='num',
                                      obj=self)
            if len(den) != 1:
                raise ValidationError("'den' must be length 1 (got %d)" %
                                      len(den),
                                      attr='den',
                                      obj=self)

            super(LinearFilter.Simple, self).__init__(num, den, output)
            self.b = num[0]
            self.a = den[0]

        def __call__(self, signal):
            self.output *= -self.a
            self.output += self.b * signal

    class General(Step):
        """An LTI step function for any given transfer function.

        Implements a discrete-time LTI system using the difference equation
        [1]_ for the given transfer function (num, den).

        References
        ----------
        .. [1] http://en.wikipedia.org/wiki/Digital_filter#Difference_equation
        """
        def __init__(self, num, den, output):
            super(LinearFilter.General, self).__init__(num, den, output)
            self.x = collections.deque(maxlen=len(num))
            self.y = collections.deque(maxlen=len(den))

        def __call__(self, signal):
            self.output[...] = 0

            self.x.appendleft(np.array(signal))
            for k, xk in enumerate(self.x):
                self.output += self.num[k] * xk
            for k, yk in enumerate(self.y):
                self.output -= self.den[k] * yk
            self.y.appendleft(np.array(self.output))
Ejemplo n.º 20
0
class PresentInputWithPause(Process):
    """Present a series of inputs, each for the same fixed length of time.

    Parameters
    ----------
    inputs : array_like
        Inputs to present, where each row is an input. Rows will be flattened.
    presentation_time : float
        Show each input for this amount of time (in seconds).
    pause_time : float
        Pause time after each input (in seconds).
    """

    inputs = NdarrayParam("inputs", shape=("...", ))
    presentation_time = NumberParam("presentation_time", low=0, low_open=True)
    pause_time = NumberParam("pause_time", low=0, low_open=True)

    def __init__(self, inputs, presentation_time, pause_time, **kwargs):
        self.inputs = inputs
        self.presentation_time = presentation_time
        self.pause_time = pause_time
        self.localT = 0
        self.index = 0
        super().__init__(default_size_in=0,
                         default_size_out=self.inputs[0].size,
                         **kwargs)

    def make_step(self, shape_in, shape_out, dt, rng, state):
        assert shape_in == (0, )
        assert shape_out == (self.inputs[0].size, )
        n = len(self.inputs)
        inputs = self.inputs.reshape(n, -1)
        presentation_time = float(self.presentation_time)
        pause_time = float(self.pause_time)
        self.localT = round((dt if self.localT == 0 else self.localT), 2)

        def step_presentinput(t):
            #t = abs(t - pause_time)
            t = round(t, 6)
            # Pause
            if t > ((presentation_time + pause_time) * self.index +
                    presentation_time) and t < round(
                        (presentation_time + pause_time) *
                        (self.index + 1), 6):
                return None
            else:
                # Send input
                #if t >= (presentation_time + pause_time) * i:
                #    t = t - (pause_time * i)

                i = int((self.localT - dt) / (presentation_time))
                self.localT += dt

                if t == round(
                    (presentation_time + pause_time) * (self.index + 1), 6):
                    self.index += 1

                i = 0
                return inputs[i % n]

        return step_presentinput
Ejemplo n.º 21
0
class Conv3(Process):
    shape_in = TupleParam(length=4)
    shape_out = TupleParam(length=4)
    filters = NdarrayParam(shape=('...',))
    biases = NdarrayParam(shape=('...',), optional=True)

    def __init__(self, shape_in, filters, biases=None):
        self.shape_in = tuple(shape_in)
        if len(self.shape_in) != 4:
            raise ValueError("`shape_in` must have four dimensions "
                             "(channels,depth, height, width)")

        self.filters = filters
        self.shape_out = (self.filters.shape[0],) + self.shape_in[1:]
        if len(self.filters.shape) not in [5]:
            raise ValueError(
                "`filters` must have five dimensions ")
        if self.filters.shape[-4] != self.shape_in[0]:
            raise ValueError(
                "Filter channels (%d) and input channels (%d) must match"
                % (self.filters.shape[-4], self.shape_in[0]))

        self.biases = biases if biases is not None else None
        if self.biases is not None:
            if self.biases.size == 1:
                self.biases.shape = (1, 1, 1)
            elif self.biases.size == np.prod(self.shape_out):
                self.biases.shape = self.shape_out
            elif self.biases.size == self.shape_out[0]:
                self.biases.shape = (self.shape_out[0], 1, 1, 1)
            elif self.biases.size == np.prod(self.shape_out[1:]):
                self.biases.shape = (1,) + self.shape_out[1:]
            else:
                raise ValueError(
                    "Biases size (%d) does not match output shape %s"
                    % (self.biases.size, self.shape_out))

        super(Conv3, self).__init__(
            default_size_in=np.prod(self.shape_in),
            default_size_out=np.prod(self.shape_out))

    def make_step(self, size_in, size_out, dt, rng):
        assert size_in == np.prod(self.shape_in)
        assert size_out == np.prod(self.shape_out)

        filters = self.filters
        local_filters = filters.ndim == 6
        biases = self.biases
        shape_in = self.shape_in
        shape_out = self.shape_out

        def step_conv3(t, x):
            x = x.reshape(shape_in)
            nk, ni, nj = shape_in[-3:]
            f = filters.shape[0]
            sk, si, sj = filters.shape[-3:]
            si2 = (si - 1) / 2
            sj2 = (sj - 1) / 2
            sk2 = (sk - 1) / 2

            y = np.zeros(shape_out)
            for k in range(nk):
                for i in range(ni):
                    for j in range(nj):
                        i0, i1 = i - si2, i + si2 + 1
                        j0, j1 = j - sj2, j + sj2 + 1
                        k0, k1 = k - sk2, k + sk2 + 1
                        sli = slice(max(-i0, 0), min(ni + si - i1, si))
                        slj = slice(max(-j0, 0), min(nj + sj - j1, sj))
                        slk = slice(max(-k0, 0), min(nk + sk - k1, sk))
                        w = (filters[:, i, j, :, sli, slj] if local_filters else
                             filters[:, :,slk, sli, slj])
                        xkij = x[:,max(k0, 0):min(k1, nk), max(i0, 0):min(i1, ni), max(j0, 0):min(j1, nj)]
                        y[:,k, i, j] = np.dot(xkij.ravel(), w.reshape(f, -1).T)

            if biases is not None:
                y += biases

            return y.ravel()

        return step_conv3
Ejemplo n.º 22
0
class LinearFilter(Synapse):
    """General linear time-invariant (LTI) system synapse.

    This class can be used to implement any linear filter, given the
    filter's transfer function. [1]_

    Parameters
    ----------
    num : array_like
        Numerator coefficients of transfer function.
    den : array_like
        Denominator coefficients of transfer function.
    analog : boolean, optional (Default: True)
        Whether the synapse coefficients are analog (i.e. continuous-time),
        or discrete. Analog coefficients will be converted to discrete for
        simulation using the simulator ``dt``.

    Attributes
    ----------
    analog : boolean
        Whether the synapse coefficients are analog (i.e. continuous-time),
        or discrete. Analog coefficients will be converted to discrete for
        simulation using the simulator ``dt``.
    den : ndarray
        Denominator coefficients of transfer function.
    num : ndarray
        Numerator coefficients of transfer function.

    References
    ----------
    .. [1] http://en.wikipedia.org/wiki/Filter_%28signal_processing%29
    """

    num = NdarrayParam('num', shape='*')
    den = NdarrayParam('den', shape='*')
    analog = BoolParam('analog')

    def __init__(self, num, den, analog=True, **kwargs):
        super(LinearFilter, self).__init__(**kwargs)
        self.num = num
        self.den = den
        self.analog = analog

    def __repr__(self):
        return "%s(%s, %s, analog=%r)" % (
            type(self).__name__, self.num, self.den, self.analog)

    def evaluate(self, frequencies):
        """Evaluate the transfer function at the given frequencies.

        Examples
        --------

        Using the ``evaluate`` function to make a Bode plot::

            synapse = nengo.synapses.LinearFilter([1], [0.02, 1])
            f = numpy.logspace(-1, 3, 100)
            y = synapse.evaluate(f)
            plt.subplot(211); plt.semilogx(f, 20*np.log10(np.abs(y)))
            plt.xlabel('frequency [Hz]'); plt.ylabel('magnitude [dB]')
            plt.subplot(212); plt.semilogx(f, np.angle(y))
            plt.xlabel('frequency [Hz]'); plt.ylabel('phase [radians]')
        """
        frequencies = 2.j*np.pi*frequencies
        w = frequencies if self.analog else np.exp(frequencies)
        y = np.polyval(self.num, w) / np.polyval(self.den, w)
        return y

    def make_step(self, shape_in, shape_out, dt, rng, y0=None,
                  dtype=np.float64, method='zoh'):
        """Returns a `.Step` instance that implements the linear filter."""
        assert shape_in == shape_out

        num, den = self.num, self.den
        if self.analog:
            num, den, _ = cont2discrete((num, den), dt, method=method)
            num = num.flatten()

        if den[0] != 1.:
            raise ValidationError("First element of the denominator must be 1",
                                  attr='den', obj=self)
        num = num[1:] if num[0] == 0 else num
        den = den[1:]  # drop first element (equal to 1)
        num, den = num.astype(dtype), den.astype(dtype)

        output = np.zeros(shape_out, dtype=dtype)
        if len(num) == 1 and len(den) == 0:
            return LinearFilter.NoDen(num, den, output)
        elif len(num) == 1 and len(den) == 1:
            return LinearFilter.Simple(num, den, output, y0=y0)
        return LinearFilter.General(num, den, output, y0=y0)

    @staticmethod
    def _make_zero_step(shape_in, shape_out, dt, rng, y0=None,
                        dtype=np.float64):
        output = np.zeros(shape_out, dtype=dtype)
        if y0 is not None:
            output[:] = y0

        return LinearFilter.NoDen(np.array([1.]), np.array([]), output)

    class Step(object):
        """Abstract base class for LTI filtering step functions."""
        def __init__(self, num, den, output):
            self.num = num
            self.den = den
            self.output = output

        def __call__(self, t, signal):
            raise NotImplementedError("Step functions must implement __call__")

    class NoDen(Step):
        """An LTI step function for transfer functions with no denominator.

        This step function should be much faster than the equivalent general
        step function.
        """
        def __init__(self, num, den, output):
            if len(den) > 0:
                raise ValidationError("'den' must be empty (got length %d)"
                                      % len(den), attr='den', obj=self)
            super(LinearFilter.NoDen, self).__init__(num, den, output)
            self.b = num[0]

        def __call__(self, t, signal):
            self.output[...] = self.b * signal
            return self.output

    class Simple(Step):
        """An LTI step function for transfer functions with one num and den.

        This step function should be much faster than the equivalent general
        step function.
        """
        def __init__(self, num, den, output, y0=None):
            if len(num) != 1:
                raise ValidationError("'num' must be length 1 (got %d)"
                                      % len(num), attr='num', obj=self)
            if len(den) != 1:
                raise ValidationError("'den' must be length 1 (got %d)"
                                      % len(den), attr='den', obj=self)

            super(LinearFilter.Simple, self).__init__(num, den, output)
            self.b = num[0]
            self.a = den[0]
            if y0 is not None:
                self.output[...] = y0

        def __call__(self, t, signal):
            self.output *= -self.a
            self.output += self.b * signal
            return self.output

    class General(Step):
        """An LTI step function for any given transfer function.

        Implements a discrete-time LTI system using the difference equation
        [1]_ for the given transfer function (num, den).

        References
        ----------
        .. [1] http://en.wikipedia.org/wiki/Digital_filter#Difference_equation
        """
        def __init__(self, num, den, output, y0=None):
            super(LinearFilter.General, self).__init__(num, den, output)
            self.x = collections.deque(maxlen=len(num))
            self.y = collections.deque(maxlen=len(den))
            if y0 is not None:
                self.output[...] = y0
                for _ in num:
                    self.x.appendleft(np.array(self.output))
                for _ in den:
                    self.y.appendleft(np.array(self.output))

        def __call__(self, t, signal):
            self.output[...] = 0

            self.x.appendleft(np.array(signal))
            for k, xk in enumerate(self.x):
                self.output += self.num[k] * xk
            for k, yk in enumerate(self.y):
                self.output -= self.den[k] * yk
            self.y.appendleft(np.array(self.output))

            return self.output
Ejemplo n.º 23
0
class MultivariateCopula(Distribution):
    """Generalized multivariate distribution.

    Uses the copula method to sample from a general multivariate distribution,
    given marginal distributions and copula covariances [1]_.

    Parameters
    ----------
    marginal_icdfs : iterable
        List of functions, each one being the inverse CDF of the marginal
        distribution across that dimension.
    rho : array_like (optional)
        Array of copula covariances [1]_ between parameters. Defaults to
        the identity matrix (independent parameters).

    See also
    --------
    gaussian_icdf, loggaussian_icdf, uniform_icdf

    References
    ----------
    .. [1] Copula (probability theory). Wikipedia.
       https://en.wikipedia.org/wiki/Copula_(probability_theory)
    """

    marginal_icdfs = TupleParam('marginal_icdfs', readonly=True)
    rho = NdarrayParam('rho', shape=('*', '*'), optional=True, readonly=True)

    def __init__(self, marginal_icdfs, rho=None):
        import scipy.stats  # we need this for sampling
        assert scipy.stats

        super(MultivariateCopula, self).__init__()
        self.marginal_icdfs = marginal_icdfs
        self.rho = rho

        d = len(self.marginal_icdfs)
        if not all(callable(f) for f in self.marginal_icdfs):
            raise ValueError("`marginal_icdfs` must be a list of callables")
        if self.rho is not None:
            if self.rho.shape != (d, d):
                raise ValueError("`rho` must be a %d x %d array" % (d, d))
            if not np.array_equal(self.rho, self.rho.T):
                raise ValueError(
                    "`rho` must be a symmetrical positive-definite array")

    def sample(self, n, d=None, rng=np.random):
        import scipy.stats as sps

        assert d is None or d == len(self.marginal_icdfs)
        d = len(self.marginal_icdfs)

        # normalize rho
        rho = np.eye(d) if self.rho is None else self.rho
        stds = np.sqrt(np.diag(rho))
        rho = rho / np.outer(stds, stds)

        # sample from copula
        x = sps.norm.cdf(sps.multivariate_normal.rvs(cov=rho, size=n))

        # apply marginal inverse CDFs
        for i in range(d):
            x[:, i] = self.marginal_icdfs[i](x[:, i])

        return x
Ejemplo n.º 24
0
class Choice(Distribution):
    """Discrete distribution across a set of possible values.

    The same as `numpy.random.choice`, except can take vector or matrix values
    for the choices.

    Parameters
    ----------
    options : (N, ...) array_like
        The options (choices) to choose between. The choice is always done
        along the first axis, so if ``options`` is a matrix, the options are
        the rows of that matrix.
    weights : (N,) array_like, optional (Default: None)
        Weights controlling the probability of selecting each option. Will
        automatically be normalized. If None, weights be uniformly distributed.
    """

    options = NdarrayParam('options', shape=('*', '...'))
    weights = NdarrayParam('weights', shape=('*'), optional=True)

    def __init__(self, options, weights=None):
        super(Choice, self).__init__()
        self.options = options
        self.weights = weights

        weights = (np.ones(len(self.options))
                   if self.weights is None else self.weights)
        if len(weights) != len(self.options):
            raise ValidationError(
                "Number of weights (%d) must match number of options (%d)" %
                (len(weights), len(self.options)),
                attr='weights',
                obj=self)
        if not all(weights >= 0):
            raise ValidationError("All weights must be non-negative",
                                  attr='weights',
                                  obj=self)
        total = float(weights.sum())
        if total <= 0:
            raise ValidationError("Sum of weights must be positive (got %f)" %
                                  total,
                                  attr='weights',
                                  obj=self)
        self.p = weights / total

    def __repr__(self):
        return "Choice(options=%r%s)" % (self.options,
                                         "" if self.weights is None else
                                         ", weights=%r" % self.weights)

    @property
    def dimensions(self):
        return np.prod(self.options.shape[1:])

    def sample(self, n, d=None, rng=np.random):
        if d is not None and self.dimensions != d:
            raise ValidationError("Options must be of dimensionality %d "
                                  "(got %d)" % (d, self.dimensions),
                                  attr='options',
                                  obj=self)

        i = np.searchsorted(np.cumsum(self.p), rng.rand(n))
        return self.options[i]
Ejemplo n.º 25
0
class PresentJitteredImages(Process):
    images = NdarrayParam('images', shape=('...', ))
    image_shape = ShapeParam('image_shape', length=3, low=1)
    output_shape = ShapeParam('output_shape', length=2, low=1)
    presentation_time = NumberParam('presentation_time', low=0, low_open=True)
    jitter_std = NumberParam('jitter_std', low=0, low_open=True, optional=True)
    jitter_tau = NumberParam('jitter_tau', low=0, low_open=True)

    def __init__(self,
                 images,
                 presentation_time,
                 output_shape,
                 jitter_std=None,
                 jitter_tau=None,
                 **kwargs):
        import scipy.ndimage.interpolation
        # ^ required for simulation, so check it here

        self.images = images
        self.presentation_time = presentation_time
        self.image_shape = images.shape[1:]
        self.output_shape = output_shape
        self.jitter_std = jitter_std
        self.jitter_tau = (presentation_time
                           if jitter_tau is None else jitter_tau)

        nc = self.image_shape[0]
        nyi, nyj = self.output_shape
        super(PresentJitteredImages,
              self).__init__(default_size_in=0,
                             default_size_out=nc * nyi * nyj,
                             **kwargs)

    def make_step(self, shape_in, shape_out, dt, rng):
        import scipy.ndimage.interpolation

        nc, nxi, nxj = self.image_shape
        nyi, nyj = self.output_shape
        ni, nj = nxi - nyi, nxj - nyj
        nij = np.array([ni, nj])
        assert shape_in == (0, )
        assert shape_out == (nc * nyi * nyj, )

        if self.jitter_std is None:
            si, sj = ni / 4., nj / 4.
        else:
            si = sj = self.jitter_std

        tau = self.jitter_tau

        n = len(self.images)
        images = self.images.reshape((n, nc, nxi, nxj))
        presentation_time = float(self.presentation_time)

        cij = (nij - 1) / 2.
        dt7tau = dt / tau
        sigma2 = np.sqrt(2. * dt / tau) * np.array([si, sj])
        ij = cij.copy()

        def step_presentjitteredimages(t):
            # update jitter position
            ij0 = dt7tau * (cij - ij) + sigma2 * rng.normal(size=2)
            ij[:] = (ij + ij0).clip((0, 0), (ni, nj))

            # select image
            k = int((t - dt) / presentation_time + 1e-7)
            image = images[k % n]

            # interpolate jittered sub-image
            i, j = ij
            image = scipy.ndimage.interpolation.shift(
                image, (0, ni - i, nj - j))[:, -nyi:, -nyj:]

            return image.ravel()

        return step_presentjitteredimages
Ejemplo n.º 26
0
class SparseMatrix(FrozenObject):
    """Represents a sparse matrix.

    .. versionadded:: 3.0.0

    Parameters
    ----------
    indices : array_like of int
        An Nx2 array of integers indicating the (row,col) coordinates for the
        N non-zero elements in the matrix.
    data : array_like or `.Distribution`
        An Nx1 array defining the value of the nonzero elements in the matrix
        (corresponding to ``indices``), or a `.Distribution` that will be
        used to initialize the nonzero elements.
    shape : tuple of int
        Shape of the full matrix.
    """

    indices = NdarrayParam("indices", shape=("*", 2), dtype=np.int64)
    data = DistOrArrayParam("data", sample_shape=("*", ))
    shape = ShapeParam("shape", length=2)

    def __init__(self, indices, data, shape):
        super().__init__()

        self.indices = indices
        self.shape = shape

        # if data is not a distribution
        if is_array_like(data):
            data = np.asarray(data)

            # convert scalars to vectors
            if data.size == 1:
                data = data.item() * np.ones(self.indices.shape[0],
                                             dtype=data.dtype)

            if data.ndim != 1 or data.shape[0] != self.indices.shape[0]:
                raise ValidationError(
                    "Must be a vector of the same length as `indices`",
                    attr="data",
                    obj=self,
                )

        self.data = data
        self._allocated = None
        self._dense = None

    @property
    def dtype(self):
        return self.data.dtype

    @property
    def ndim(self):
        return len(self.shape)

    @property
    def size(self):
        return self.indices.shape[0]

    def allocate(self):
        """Return a `scipy.sparse.csr_matrix` or dense matrix equivalent.

        We mark this data as readonly to be consistent with how other
        data associated with signals are allocated. If this allocated
        data is to be modified, it should be copied first.
        """

        if self._allocated is not None:
            return self._allocated

        if scipy_sparse is None:
            warnings.warn("Sparse operations require Scipy, which is not "
                          "installed. Using dense matrices instead.")
            self._allocated = self.toarray().view()
        else:
            self._allocated = scipy_sparse.csr_matrix(
                (self.data, self.indices.T), shape=self.shape)
            self._allocated.data.setflags(write=False)

        return self._allocated

    def sample(self, rng=np.random):
        """Convert `.Distribution` data to fixed array.

        Parameters
        ----------
        rng : `.numpy.random.mtrand.RandomState`
            Random number generator that will be used when
            sampling distribution.

        Returns
        -------
        matrix : `.SparseMatrix`
            A new `.SparseMatrix` instance with `.Distribution` converted to
            array if ``self.data`` is a `.Distribution`, otherwise simply
            returns ``self``.
        """
        if isinstance(self.data, Distribution):
            return SparseMatrix(
                self.indices,
                self.data.sample(self.indices.shape[0], rng=rng),
                self.shape,
            )
        else:
            return self

    def toarray(self):
        """Return the dense matrix equivalent of this matrix."""

        if self._dense is not None:
            return self._dense

        self._dense = np.zeros(self.shape, dtype=self.dtype)
        self._dense[self.indices[:, 0], self.indices[:, 1]] = self.data
        # Mark as readonly, if the user wants to modify they should copy first
        self._dense.setflags(write=False)
        return self._dense
Ejemplo n.º 27
0
class SpatiallyConstrainedConnectivity(ConstrainedConnectivity):
    """
    Same as "ConstrainedConnectivity", but with a default callback for the
    "probabilities" callback that computes connection probabilities based on the
    location of the neurons.
    """

    sigma = NumberParam(
        name="sigma",
        low=0.0,
        default=0.25,
        low_open=True,
        readonly=True,
    )

    projection = NdarrayParam(
        name="projection",
        default=np.zeros((0, )),
        optional=True,
        shape=('*', '*'),
        readonly=True,
    )

    @property
    def _argreprs(self):
        return super()._argreprs + [
            "sigma={}".format(self.sigma),
            "projection={}".format(self.projection),
        ]

    def get_probabilities(self, n_pre, n_post, pre_obj, post_obj, data):
        # Fetch the neuron locations
        xs_pre, xs_post = None, None

        # If the "locations" attribute is set
        if (pre_obj in data) and hasattr(data[pre_obj], 'locations'):
            xs_pre = data[pre_obj].locations
        if (post_obj in data) and hasattr(data[post_obj], 'locations'):
            xs_post = data[post_obj].locations

        # We cannot compute connectivity constraints if the locations are not
        # defined -- just use uniform connection probabilities (by returning
        # "None")
        if (xs_pre is None) or (xs_post is None):
            return None

        # Make sure the number of pre-neurons and the number of post-neurons
        # are correct
        if xs_pre.ndim != 2:
            raise ValueError(
                "Pre-population neuron locations must be a 2D array, "
                "but got {}D array".format(xs_pre.ndim))
        if xs_post.ndim != 2:
            raise ValueError(
                "Post-population neuron locations must be a 2D array, "
                "but got {}D array".format(xs_pre.ndim))
        if n_pre != xs_pre.shape[0]:
            raise ValueError(
                "Expected pre-population neuron location shape ({}, d_pre), "
                "but got ({}, d_pre)".format(n_pre, xs_pre.shape[0]))
        if n_post != xs_post.shape[0]:
            raise ValueError(
                "Expected post-population neuron location shape ({}, d_post), "
                "but got ({}, d_post)".format(n_post, xs_post.shape[0]))

        # Fetch the dimensionality of the neuron locations
        d_pre, d_post = xs_pre.shape[1], xs_post.shape[1]

        # Project the locations onto the minimum dimensionality
        d_min, d_max = min(d_pre, d_post), max(d_pre, d_post)
        P = np.eye(d_min,
                   d_max) if self.projection is None else self.projection

        # Make sure the projection vector has the correct size
        if (P.shape[0] != d_min and (d_min != d_max)) or (P.shape[1] != d_max):
            raise ValueError("Expected a projection matrix of size ({}, {}), "
                             "but got projection vector of shape {}".format(
                                 d_min, d_max, P.shape))

        # Apply the projection
        if xs_pre.shape[1] == d_max:
            xs_pre = xs_pre @ P.T
        if xs_post.shape[1] == d_max:
            xs_post = xs_post @ P.T

        # Compute the squared distance
        dists = np.sum(np.square(xs_pre[:, None] - xs_post[None, :]), axis=-1)

        # Apply exponential falloff
        return np.exp(-dists / np.square(self.sigma))

    def __init__(self,
                 convergence=None,
                 divergence=None,
                 probabilities=None,
                 sigma=0.25,
                 projection=None):

        # Call the inherited constructor
        super().__init__(convergence, divergence, probabilities)

        # Copy the sigma parameters
        self.sigma = sigma

        # Copy the projection parameter
        self.projection = projection

        # Copy the probabilities
        if probabilities is None:

            def get_probabilities_wrapper(*args, **kwargs):
                return self.get_probabilities(*args, **kwargs)

            self.probabilities = get_probabilities_wrapper
        else:
            self.probabilities = probabilities