예제 #1
0
class Dense(Transform):
    """A dense matrix transformation between an input and output signal.

    .. versionadded:: 3.0.0

    Parameters
    ----------
    shape : tuple of int
        The shape of the dense matrix: ``(size_out, size_in)``.
    init : `.Distribution` or array_like, optional
        A Distribution used to initialize the transform matrix, or a concrete
        instantiation for the matrix. If the matrix is square we also allow a
        scalar (equivalent to ``np.eye(n) * init``) or a vector (equivalent to
        ``np.diag(init)``) to represent the matrix more compactly.
    """

    shape = ShapeParam("shape", length=2, low=1)
    init = DistOrArrayParam("init")

    def __init__(self, shape, init=1.0):
        super().__init__()

        self.shape = shape

        if is_array_like(init):
            init = np.asarray(init, dtype=rc.float_dtype)

            # check that the shape of init is compatible with the given shape
            # for this transform
            expected_shape = None
            if shape[0] != shape[1]:
                # init must be 2D if transform is not square
                expected_shape = shape
            elif init.ndim == 1:
                expected_shape = (shape[0], )
            elif init.ndim >= 2:
                expected_shape = shape

            if expected_shape is not None and init.shape != expected_shape:
                raise ValidationError(
                    "Shape of initial value %s does not match expected "
                    "shape %s" % (init.shape, expected_shape),
                    attr="init",
                )

        self.init = init

    @property
    def _argreprs(self):
        return ["shape=%r" % (self.shape, )]

    def sample(self, rng=np.random):
        if isinstance(self.init, Distribution):
            return self.init.sample(*self.shape, rng=rng)

        return self.init

    @property
    def init_shape(self):
        """The shape of the initial value."""
        return self.shape if isinstance(self.init,
                                        Distribution) else self.init.shape

    @property
    def size_in(self):
        return self.shape[1]

    @property
    def size_out(self):
        return self.shape[0]
예제 #2
0
class SparseMatrix(FrozenObject):
    """Represents a sparse matrix.

    .. versionadded:: 3.0.0

    Parameters
    ----------
    indices : array_like of int
        An Nx2 array of integers indicating the (row,col) coordinates for the
        N non-zero elements in the matrix.
    data : array_like or `.Distribution`
        An Nx1 array defining the value of the nonzero elements in the matrix
        (corresponding to ``indices``), or a `.Distribution` that will be
        used to initialize the nonzero elements.
    shape : tuple of int
        Shape of the full matrix.
    """

    indices = NdarrayParam("indices", shape=("*", 2), dtype=np.int64)
    data = DistOrArrayParam("data", sample_shape=("*", ))
    shape = ShapeParam("shape", length=2)

    def __init__(self, indices, data, shape):
        super().__init__()

        self.indices = indices
        self.shape = shape

        # if data is not a distribution
        if is_array_like(data):
            data = np.asarray(data)

            # convert scalars to vectors
            if data.size == 1:
                data = data.item() * np.ones(self.indices.shape[0],
                                             dtype=data.dtype)

            if data.ndim != 1 or data.shape[0] != self.indices.shape[0]:
                raise ValidationError(
                    "Must be a vector of the same length as `indices`",
                    attr="data",
                    obj=self,
                )

        self.data = data
        self._allocated = None
        self._dense = None

    @property
    def dtype(self):
        return self.data.dtype

    @property
    def ndim(self):
        return len(self.shape)

    @property
    def size(self):
        return self.indices.shape[0]

    def allocate(self):
        """Return a `scipy.sparse.csr_matrix` or dense matrix equivalent.

        We mark this data as readonly to be consistent with how other
        data associated with signals are allocated. If this allocated
        data is to be modified, it should be copied first.
        """

        if self._allocated is not None:
            return self._allocated

        if scipy_sparse is None:
            warnings.warn("Sparse operations require Scipy, which is not "
                          "installed. Using dense matrices instead.")
            self._allocated = self.toarray().view()
        else:
            self._allocated = scipy_sparse.csr_matrix(
                (self.data, self.indices.T), shape=self.shape)
            self._allocated.data.setflags(write=False)

        return self._allocated

    def sample(self, rng=np.random):
        """Convert `.Distribution` data to fixed array.

        Parameters
        ----------
        rng : `.numpy.random.mtrand.RandomState`
            Random number generator that will be used when
            sampling distribution.

        Returns
        -------
        matrix : `.SparseMatrix`
            A new `.SparseMatrix` instance with `.Distribution` converted to
            array if ``self.data`` is a `.Distribution`, otherwise simply
            returns ``self``.
        """
        if isinstance(self.data, Distribution):
            return SparseMatrix(
                self.indices,
                self.data.sample(self.indices.shape[0], rng=rng),
                self.shape,
            )
        else:
            return self

    def toarray(self):
        """Return the dense matrix equivalent of this matrix."""

        if self._dense is not None:
            return self._dense

        self._dense = np.zeros(self.shape, dtype=self.dtype)
        self._dense[self.indices[:, 0], self.indices[:, 1]] = self.data
        # Mark as readonly, if the user wants to modify they should copy first
        self._dense.setflags(write=False)
        return self._dense
예제 #3
0
class Convolution(Transform):
    """An N-dimensional convolutional transform.

    The dimensionality of the convolution is determined by the input shape.

    .. versionadded:: 3.0.0

    Parameters
    ----------
    n_filters : int
        The number of convolutional filters to apply
    input_shape : tuple of int or `.ChannelShape`
        Shape of the input signal to the convolution; e.g.,
        ``(height, width, channels)`` for a 2D convolution with
        ``channels_last=True``.
    kernel_size : tuple of int, optional
        Size of the convolutional kernels (1 element for a 1D convolution,
        2 for a 2D convolution, etc.).
    strides : tuple of int, optional
        Stride of the convolution (1 element for a 1D convolution, 2 for
        a 2D convolution, etc.).
    padding : ``"same"`` or ``"valid"``, optional
        Padding method for input signal. "Valid" means no padding, and
        convolution will only be applied to the fully-overlapping areas of the
        input signal (meaning the output will be smaller). "Same" means that
        the input signal is zero-padded so that the output is the same shape
        as the input.
    channels_last : bool, optional
        If ``True`` (default), the channels are the last dimension in the input
        signal (e.g., a 28x28 image with 3 channels would have shape
        ``(28, 28, 3)``).  ``False`` means that channels are the first
        dimension (e.g., ``(3, 28, 28)``).
    init : `.Distribution` or `~numpy:numpy.ndarray`, optional
        A predefined kernel with shape
        ``kernel_size + (input_channels, n_filters)``, or a ``Distribution``
        that will be used to initialize the kernel.

    Notes
    -----
    As is typical in neural networks, this is technically correlation rather
    than convolution (because the kernel is not flipped).
    """

    n_filters = IntParam("n_filters", low=1)
    input_shape = ChannelShapeParam("input_shape", low=1)
    kernel_size = ShapeParam("kernel_size", low=1)
    strides = ShapeParam("strides", low=1)
    padding = EnumParam("padding", values=("same", "valid"))
    channels_last = BoolParam("channels_last")
    init = DistOrArrayParam("init")

    _param_init_order = ["channels_last", "input_shape"]

    def __init__(
            self,
            n_filters,
            input_shape,
            kernel_size=(3, 3),
            strides=(1, 1),
            padding="valid",
            channels_last=True,
            init=Uniform(-1, 1),
    ):
        super().__init__()

        self.n_filters = n_filters
        self.channels_last = channels_last  # must be set before input_shape
        self.input_shape = input_shape
        self.kernel_size = kernel_size
        self.strides = strides
        self.padding = padding
        self.init = init

        if len(kernel_size) != self.dimensions:
            raise ValidationError(
                "Kernel dimensions (%d) do not match input dimensions (%d)" %
                (len(kernel_size), self.dimensions),
                attr="kernel_size",
            )
        if len(strides) != self.dimensions:
            raise ValidationError(
                "Stride dimensions (%d) do not match input dimensions (%d)" %
                (len(strides), self.dimensions),
                attr="strides",
            )
        if not isinstance(init, Distribution):
            if init.shape != self.kernel_shape:
                raise ValidationError(
                    "Kernel shape %s does not match expected shape %s" %
                    (init.shape, self.kernel_shape),
                    attr="init",
                )

    @property
    def _argreprs(self):
        argreprs = [
            "n_filters=%r" % (self.n_filters, ),
            "input_shape=%s" % (self.input_shape.shape, ),
        ]
        if self.kernel_size != (3, 3):
            argreprs.append("kernel_size=%r" % (self.kernel_size, ))
        if self.strides != (1, 1):
            argreprs.append("strides=%r" % (self.strides, ))
        if self.padding != "valid":
            argreprs.append("padding=%r" % (self.padding, ))
        if self.channels_last is not True:
            argreprs.append("channels_last=%r" % (self.channels_last, ))
        return argreprs

    def sample(self, rng=np.random):
        if isinstance(self.init, Distribution):
            # we sample this way so that any variancescaling distribution based
            # on n/d is scaled appropriately
            kernel = [
                self.init.sample(self.input_shape.n_channels,
                                 self.n_filters,
                                 rng=rng)
                for _ in range(np.prod(self.kernel_size))
            ]
            kernel = np.reshape(kernel, self.kernel_shape)
        else:
            kernel = np.array(self.init, dtype=rc.float_dtype)
        return kernel

    @property
    def kernel_shape(self):
        """Full shape of kernel."""
        return self.kernel_size + (self.input_shape.n_channels, self.n_filters)

    @property
    def size_in(self):
        return self.input_shape.size

    @property
    def size_out(self):
        return self.output_shape.size

    @property
    def dimensions(self):
        """Dimensionality of convolution."""
        return self.input_shape.dimensions

    @property
    def output_shape(self):
        """Output shape after applying convolution to input."""
        output_shape = np.array(self.input_shape.spatial_shape,
                                dtype=rc.float_dtype)
        if self.padding == "valid":
            output_shape -= self.kernel_size
            output_shape += 1
        output_shape /= self.strides
        output_shape = tuple(np.ceil(output_shape).astype(rc.int_dtype))
        output_shape = (output_shape +
                        (self.n_filters, ) if self.channels_last else
                        (self.n_filters, ) + output_shape)

        return ChannelShape(output_shape, channels_last=self.channels_last)
예제 #4
0
class _ConvolutionBase(Transform):
    """Abstract base class for Convolution and ConvolutionTranspose."""

    n_filters = IntParam("n_filters", low=1)
    input_shape = ChannelShapeParam("input_shape", low=1)
    kernel_size = ShapeParam("kernel_size", low=1)
    strides = ShapeParam("strides", low=1)
    padding = EnumParam("padding", values=("same", "valid"))
    channels_last = BoolParam("channels_last")
    init = DistOrArrayParam("init")
    groups = IntParam("groups", low=1)

    _param_init_order = ["channels_last", "input_shape"]

    def __init__(
        self,
        n_filters,
        input_shape,
        kernel_size=(3, 3),
        strides=(1, 1),
        padding="valid",
        channels_last=True,
        init=Uniform(-1, 1),
        groups=1,
    ):
        super().__init__()

        self.n_filters = n_filters
        self.channels_last = channels_last  # must be set before input_shape
        self.input_shape = input_shape
        self.kernel_size = kernel_size
        self.strides = strides
        self.padding = padding
        self.init = init
        self.groups = groups

        if len(kernel_size) != self.dimensions:
            raise ValidationError(
                f"Kernel dimensions ({len(kernel_size)}) does not match "
                f"input dimensions ({self.dimensions})",
                attr="kernel_size",
            )
        if len(strides) != self.dimensions:
            raise ValidationError(
                f"Stride dimensions ({len(strides)}) does not match "
                f"input dimensions ({self.dimensions})",
                attr="strides",
            )
        if not isinstance(init, Distribution):
            if init.shape != self.kernel_shape:
                raise ValidationError(
                    f"Kernel shape {init.shape} does not match "
                    f"expected shape {self.kernel_shape}",
                    attr="init",
                )

        in_channels = self.input_shape.n_channels
        if groups > in_channels:
            raise ValidationError(
                f"Groups ({groups}) cannot be greater than "
                f"the number of input channels ({in_channels})",
                attr="groups",
            )
        if in_channels % groups != 0 or self.n_filters % groups != 0:
            raise ValidationError(
                f"Both the number of input channels ({in_channels}) and filters "
                f"({self.n_filters}) must be evenly divisible by ``groups`` ({groups})",
                attr="groups",
            )

    @property
    def _argreprs(self):
        argreprs = [
            f"n_filters={self.n_filters!r}",
            f"input_shape={self.input_shape.shape}",
        ]
        if self.kernel_size != (3, 3):
            argreprs.append(f"kernel_size={self.kernel_size!r}")
        if self.strides != (1, 1):
            argreprs.append(f"strides={self.strides!r}")
        if self.padding != "valid":
            argreprs.append(f"padding={self.padding!r}")
        if self.channels_last is not True:
            argreprs.append(f"channels_last={self.channels_last!r}")
        if self.groups != 1:
            argreprs.append(f"groups={self.groups!r}")
        return argreprs

    def sample(self, rng=np.random):
        if isinstance(self.init, Distribution):
            # we sample this way so that any variancescaling distribution based
            # on n/d is scaled appropriately
            kernel = [
                self.init.sample(
                    self.input_shape.n_channels // self.groups, self.n_filters, rng=rng
                )
                for _ in range(np.prod(self.kernel_size))
            ]
            kernel = np.reshape(kernel, self.kernel_shape)
        else:
            kernel = np.array(self.init, dtype=rc.float_dtype)
        return kernel

    @property
    def kernel_shape(self):
        """Full shape of kernel."""
        return self.kernel_size + (
            self.input_shape.n_channels // self.groups,
            self.n_filters,
        )

    @property
    def size_in(self):
        return self.input_shape.size

    @property
    def size_out(self):
        return self.output_shape.size

    @property
    def dimensions(self):
        """Dimensionality of convolution."""
        return self.input_shape.dimensions

    def _forward_shape(self, input_spatial_shape, n_filters):
        output_shape = np.array(input_spatial_shape, dtype=rc.float_dtype)
        if self.padding == "valid":
            output_shape -= self.kernel_size
            output_shape += 1
        output_shape /= self.strides
        output_shape = tuple(np.ceil(output_shape).astype(rc.int_dtype))

        return ChannelShape.from_space_and_channels(
            output_shape, n_filters, channels_last=self.channels_last
        )
예제 #5
0
class Ensemble(NengoObject):
    """A group of neurons that collectively represent a vector.

    Parameters
    ----------
    n_neurons : int
        The number of neurons.
    dimensions : int
        The number of representational dimensions.

    radius : int, optional (Default: 1.0)
        The representational radius of the ensemble.
    encoders : Distribution or (n_neurons, dimensions) array_like, optional \
               (Default: UniformHypersphere(surface=True))
        The encoders used to transform from representational space
        to neuron space. Each row is a neuron's encoder; each column is a
        representational dimension.
    intercepts : Distribution or (n_neurons,) array_like, optional \
                 (Default: ``nengo.dists.Uniform(-1.0, 1.0)``)
        The point along each neuron's encoder where its activity is zero. If
        ``e`` is the neuron's encoder, then the activity will be zero when
        ``dot(x, e) <= c``, where ``c`` is the given intercept.
    max_rates : Distribution or (n_neurons,) array_like, optional \
                (Default: ``nengo.dists.Uniform(200, 400)``)
        The activity of each neuron when the input signal ``x`` is magnitude 1
        and aligned with that neuron's encoder ``e``;
        i.e., when ``dot(x, e) = 1``.
    eval_points : Distribution or (n_eval_points, dims) array_like, optional \
                  (Default: ``nengo.dists.UniformHypersphere()``)
        The evaluation points used for decoder solving, spanning the interval
        (-radius, radius) in each dimension, or a distribution from which
        to choose evaluation points.
    n_eval_points : int, optional (Default: None)
        The number of evaluation points to be drawn from the ``eval_points``
        distribution. If None, then a heuristic is used to determine
        the number of evaluation points.
    neuron_type : `~nengo.neurons.NeuronType`, optional \
                  (Default: ``nengo.LIF()``)
        The model that simulates all neurons in the ensemble
        (see `~nengo.neurons.NeuronType`).
    gain : Distribution or (n_neurons,) array_like (Default: None)
        The gains associated with each neuron in the ensemble. If None, then
        the gain will be solved for using ``max_rates`` and ``intercepts``.
    bias : Distribution or (n_neurons,) array_like (Default: None)
        The biases associated with each neuron in the ensemble. If None, then
        the gain will be solved for using ``max_rates`` and ``intercepts``.
    noise : Process, optional (Default: None)
        Random noise injected directly into each neuron in the ensemble
        as current. A sample is drawn for each individual neuron on
        every simulation step.
    normalize_encoders : bool, optional (Default: True)
        Indicates whether the encoders should be normalized.
    label : str, optional (Default: None)
        A name for the ensemble. Used for debugging and visualization.
    seed : int, optional (Default: None)
        The seed used for random number generation.

    Attributes
    ----------
    bias : Distribution or (n_neurons,) array_like or None
        The biases associated with each neuron in the ensemble.
    dimensions : int
        The number of representational dimensions.
    encoders : Distribution or (n_neurons, dimensions) array_like
        The encoders, used to transform from representational space
        to neuron space. Each row is a neuron's encoder, each column is a
        representational dimension.
    eval_points : Distribution or (n_eval_points, dims) array_like
        The evaluation points used for decoder solving, spanning the interval
        (-radius, radius) in each dimension, or a distribution from which
        to choose evaluation points.
    gain : Distribution or (n_neurons,) array_like or None
        The gains associated with each neuron in the ensemble.
    intercepts : Distribution or (n_neurons) array_like or None
        The point along each neuron's encoder where its activity is zero. If
        ``e`` is the neuron's encoder, then the activity will be zero when
        ``dot(x, e) <= c``, where ``c`` is the given intercept.
    label : str or None
        A name for the ensemble. Used for debugging and visualization.
    max_rates : Distribution or (n_neurons,) array_like or None
        The activity of each neuron when ``dot(x, e) = 1``,
        where ``e`` is the neuron's encoder.
    n_eval_points : int or None
        The number of evaluation points to be drawn from the ``eval_points``
        distribution. If None, then a heuristic is used to determine
        the number of evaluation points.
    n_neurons : int or None
        The number of neurons.
    neuron_type : NeuronType
        The model that simulates all neurons in the ensemble
        (see ``nengo.neurons``).
    noise : Process or None
        Random noise injected directly into each neuron in the ensemble
        as current. A sample is drawn for each individual neuron on
        every simulation step.
    radius : int
        The representational radius of the ensemble.
    seed : int or None
        The seed used for random number generation.
    """

    probeable = ('decoded_output', 'input', 'scaled_encoders')

    n_neurons = IntParam('n_neurons', low=1)
    dimensions = IntParam('dimensions', low=1)
    radius = NumberParam('radius', default=1.0, low=1e-10)
    encoders = DistOrArrayParam('encoders',
                                default=UniformHypersphere(surface=True),
                                sample_shape=('n_neurons', 'dimensions'))
    intercepts = DistOrArrayParam('intercepts',
                                  default=Uniform(-1.0, 1.0),
                                  optional=True,
                                  sample_shape=('n_neurons', ))
    max_rates = DistOrArrayParam('max_rates',
                                 default=Uniform(200, 400),
                                 optional=True,
                                 sample_shape=('n_neurons', ))
    eval_points = DistOrArrayParam('eval_points',
                                   default=UniformHypersphere(),
                                   sample_shape=('*', 'dimensions'))
    n_eval_points = IntParam('n_eval_points', default=None, optional=True)
    neuron_type = NeuronTypeParam('neuron_type', default=LIF())
    gain = DistOrArrayParam('gain',
                            default=None,
                            optional=True,
                            sample_shape=('n_neurons', ))
    bias = DistOrArrayParam('bias',
                            default=None,
                            optional=True,
                            sample_shape=('n_neurons', ))
    noise = ProcessParam('noise', default=None, optional=True)
    normalize_encoders = BoolParam('normalize_encoders',
                                   default=True,
                                   optional=True)

    def __init__(self,
                 n_neurons,
                 dimensions,
                 radius=Default,
                 encoders=Default,
                 intercepts=Default,
                 max_rates=Default,
                 eval_points=Default,
                 n_eval_points=Default,
                 neuron_type=Default,
                 gain=Default,
                 bias=Default,
                 noise=Default,
                 normalize_encoders=Default,
                 label=Default,
                 seed=Default):
        super(Ensemble, self).__init__(label=label, seed=seed)
        self.n_neurons = n_neurons
        self.dimensions = dimensions
        self.radius = radius
        self.encoders = encoders
        self.intercepts = intercepts
        self.max_rates = max_rates
        self.n_eval_points = n_eval_points
        self.eval_points = eval_points
        self.bias = bias
        self.gain = gain
        self.neuron_type = neuron_type
        self.noise = noise
        self.normalize_encoders = normalize_encoders

    def __getitem__(self, key):
        return ObjView(self, key)

    def __len__(self):
        return self.dimensions

    @property
    def neurons(self):
        """A direct interface to the neurons in the ensemble."""
        return Neurons(self)

    @neurons.setter
    def neurons(self, dummy):
        raise ReadonlyError(attr="neurons", obj=self)

    @property
    def size_in(self):
        """The dimensionality of the ensemble."""
        return self.dimensions

    @property
    def size_out(self):
        """The dimensionality of the ensemble."""
        return self.dimensions
예제 #6
0
 class Test:
     dp = DistOrArrayParam("dp", default=None, sample_shape=["d1", 10])
     d1 = 4
예제 #7
0
 class Test:
     dp = DistOrArrayParam("dp", default=None, sample_shape=["*", "*"])
예제 #8
0
 class Test:
     dp = DistOrArrayParam('dp', default=None, sample_shape=['d1', 10])
     d1 = 4
예제 #9
0
 class Test:
     dp = DistOrArrayParam('dp', default=None, sample_shape=['*', '*'])
예제 #10
0
class Ensemble(NengoObject):
    """A group of neurons that collectively represent a vector.

    Parameters
    ----------
    n_neurons : int
        The number of neurons.
    dimensions : int
        The number of representational dimensions.
    radius : int, optional
        The representational radius of the ensemble.
    encoders : Distribution or ndarray (`n_neurons`, `dimensions`), optional
        The encoders, used to transform from representational space
        to neuron space. Each row is a neuron's encoder, each column is a
        representational dimension.
    intercepts : Distribution or ndarray (`n_neurons`), optional
        The point along each neuron's encoder where its activity is zero. If
        e is the neuron's encoder, then the activity will be zero when
        dot(x, e) <= c, where c is the given intercept.
    max_rates : Distribution or ndarray (`n_neurons`), optional
        The activity of each neuron when dot(x, e) = 1, where e is the neuron's
        encoder.
    eval_points : Distribution or ndarray (`n_eval_points`, `dims`), optional
        The evaluation points used for decoder solving, spanning the interval
        (-radius, radius) in each dimension, or a distribution from which to
        choose evaluation points. Default: ``UniformHypersphere``.
    n_eval_points : int, optional
        The number of evaluation points to be drawn from the `eval_points`
        distribution. If None (the default), then a heuristic is used to
        determine the number of evaluation points.
    neuron_type : Neurons, optional
        The model that simulates all neurons in the ensemble.
    noise : Process, optional
        Random noise injected directly into each neuron in the ensemble
        as current. A sample is drawn for each individual neuron on
        every simulation step.
    seed : int, optional
        The seed used for random number generation.
    label : str, optional
        A name for the ensemble. Used for debugging and visualization.
    """

    n_neurons = IntParam(default=None, low=1)
    dimensions = IntParam(default=None, low=1)
    radius = NumberParam(default=1.0, low=1e-10)
    neuron_type = NeuronTypeParam(default=LIF())
    encoders = DistOrArrayParam(default=UniformHypersphere(surface=True),
                                sample_shape=('n_neurons', 'dimensions'))
    intercepts = DistOrArrayParam(default=Uniform(-1.0, 1.0),
                                  optional=True,
                                  sample_shape=('n_neurons', ))
    max_rates = DistOrArrayParam(default=Uniform(200, 400),
                                 optional=True,
                                 sample_shape=('n_neurons', ))
    n_eval_points = IntParam(default=None, optional=True)
    eval_points = DistOrArrayParam(default=UniformHypersphere(),
                                   sample_shape=('*', 'dimensions'))
    bias = DistOrArrayParam(default=None,
                            optional=True,
                            sample_shape=('n_neurons', ))
    gain = DistOrArrayParam(default=None,
                            optional=True,
                            sample_shape=('n_neurons', ))
    noise = ProcessParam(default=None, optional=True)
    seed = IntParam(default=None, optional=True)
    label = StringParam(default=None, optional=True)

    def __init__(self,
                 n_neurons,
                 dimensions,
                 radius=Default,
                 encoders=Default,
                 intercepts=Default,
                 max_rates=Default,
                 eval_points=Default,
                 n_eval_points=Default,
                 neuron_type=Default,
                 gain=Default,
                 bias=Default,
                 noise=Default,
                 seed=Default,
                 label=Default):

        self.n_neurons = n_neurons
        self.dimensions = dimensions
        self.radius = radius
        self.encoders = encoders
        self.intercepts = intercepts
        self.max_rates = max_rates
        self.label = label
        self.n_eval_points = n_eval_points
        self.eval_points = eval_points
        self.bias = bias
        self.gain = gain
        self.neuron_type = neuron_type
        self.noise = noise
        self.seed = seed
        self._neurons = Neurons(self)

    def __getitem__(self, key):
        return ObjView(self, key)

    def __len__(self):
        return self.dimensions

    @property
    def neurons(self):
        return self._neurons

    @neurons.setter
    def neurons(self, dummy):
        raise AttributeError("neurons cannot be overwritten.")

    @property
    def probeable(self):
        return ["decoded_output", "input"]

    @property
    def size_in(self):
        return self.dimensions

    @property
    def size_out(self):
        return self.dimensions