Example #1
0
    def extend(self, keys, unitary=False):
        """Extends the vocabulary with additional keys.

        Creates and adds the semantic pointers listed in keys to the
        vocabulary.

        Parameters
        ----------
        keys : list
            List of semantic pointer names to be added to the vocabulary.
        unitary : bool or list, optional (Default: False)
            If True, all generated pointers will be unitary. If a list of
            strings, any pointer whose name is on the list will be forced to
            be unitary when created.
        """
        if is_iterable(unitary):
            if is_iterable(self.unitary):
                self.unitary.extend(unitary)
            else:
                self.unitary = list(unitary)
        elif unitary:
            if is_iterable(self.unitary):
                self.unitary.extend(keys)
            else:
                self.unitary = list(keys)

        for key in keys:
            if key not in self.keys:
                self[key]
    def __init__(self, shape_in, filters, biases=None, stride=1, padding=0, activation='linear'):  # noqa: C901
        from nengo.utils.compat import is_iterable, is_integer

        self.shape_in = tuple(shape_in)
        self.filters = filters
        self.stride = stride if is_iterable(stride) else [stride] * 2
        self.padding = padding if is_iterable(padding) else [padding] * 2
        self.activation=activation

        nf = self.filters.shape[0]
        nxi, nxj = self.shape_in[1:]
        si, sj = self.filters.shape[-2:]
        pi, pj = self.padding
        sti, stj = self.stride
        nyi = 1 + max(int(np.ceil((2*pi + nxi - si) / float(sti))), 0)
        nyj = 1 + max(int(np.ceil((2*pj + nxj - sj) / float(stj))), 0)
        self.shape_out = (nf, nyi, nyj)

        self.biases = biases if biases is not None else None
        if self.biases is not None:
            if self.biases.size == 1:
                self.biases.shape = (1, 1, 1)
            elif self.biases.size == np.prod(self.shape_out):
                self.biases.shape = self.shape_out
            elif self.biases.size == self.shape_out[0]:
                self.biases.shape = (self.shape_out[0], 1, 1)
            elif self.biases.size == np.prod(self.shape_out[1:]):
                self.biases.shape = (1,) + self.shape_out[1:]

        super(Conv2d, self).__init__(
            default_size_in=np.prod(self.shape_in),
            default_size_out=np.prod(self.shape_out))
Example #3
0
    def add_input(self, name, input_vectors, input_scale=1.0):
        # Handle different vocabulary types
        if is_iterable(input_vectors):
            input_vectors = np.matrix(input_vectors)

        # Handle possible different input_scale values for each
        # element in the associative memory
        if not is_iterable(input_scale):
            input_scale = np.matrix([input_scale] * input_vectors.shape[0])
        else:
            input_scale = np.matrix(input_scale)
        if input_scale.shape[1] != input_vectors.shape[0]:
            raise ValueError(
                'Number of input_scale values do not match number of input '
                'vectors. Got: %d, expected %d.' %
                (input_scale.shape[1], input_vectors.shape[0]))

        input = nengo.Node(size_in=input_vectors.shape[1], label=name)

        if hasattr(self, name):
            raise NameError('Name "%s" already exists as a node in the'
                            'associative memory.')
        else:
            setattr(self, name, input)

        nengo.Connection(input, self.elem_input,
                         synapse=None,
                         transform=np.multiply(input_vectors, input_scale.T))
Example #4
0
    def __init__(self,
                 shape_in,
                 pool_size,
                 strides=None,
                 kind='avg',
                 mode='full'):
        self.shape_in = shape_in
        self.pool_size = (pool_size
                          if is_iterable(pool_size) else [pool_size] * 2)
        self.strides = (strides if is_iterable(strides) else [strides] *
                        2 if strides is not None else self.pool_size)
        self.kind = kind
        self.mode = mode
        if not all(st <= p for st, p in zip(self.strides, self.pool_size)):
            raise ValueError("Strides %s must be <= pool_size %s" %
                             (self.strides, self.pool_size))

        nc, nxi, nxj = self.shape_in
        nyi_float = float(nxi - self.pool_size[0]) / self.strides[0]
        nyj_float = float(nxj - self.pool_size[1]) / self.strides[1]
        if self.mode == 'full':
            nyi = 1 + int(np.ceil(nyi_float))
            nyj = 1 + int(np.ceil(nyj_float))
        elif self.mode == 'valid':
            nyi = 1 + int(np.floor(nyi_float))
            nyj = 1 + int(np.floor(nyj_float))
        self.shape_out = (nc, nyi, nyj)

        super(Pool2d, self).__init__(default_size_in=np.prod(self.shape_in),
                                     default_size_out=np.prod(self.shape_out))
Example #5
0
    def add_input(self, name, input_vectors, input_scales=1.0):
        # Handle different vocabulary types
        if is_iterable(input_vectors):
            input_vectors = np.matrix(input_vectors)

        # Handle possible different input_scale values for each
        # element in the associative memory
        if not is_iterable(input_scales):
            input_scales = np.matrix([input_scales] * input_vectors.shape[0])
        else:
            input_scales = np.matrix(input_scales)
        if input_scales.shape[1] != input_vectors.shape[0]:
            raise ValueError(
                'Number of input_scale values do not match number of input '
                'vectors. Got: %d, expected %d.' %
                (input_scales.shape[1], input_vectors.shape[0]))

        input = nengo.Node(size_in=input_vectors.shape[1], label=name)

        if hasattr(self, name):
            raise NameError('Name "%s" already exists as a node in the'
                            'associative memory.')
        else:
            setattr(self, name, input)

        nengo.Connection(input, self.elem_input,
                         synapse=None,
                         transform=np.multiply(input_vectors, input_scales.T))
Example #6
0
    def __init__(self, shape_in, pool_size, strides=None,
                 kind='avg', mode='full'):
        self.shape_in = shape_in
        self.pool_size = (pool_size if is_iterable(pool_size) else
                          [pool_size] * 2)
        self.strides = (strides if is_iterable(strides) else
                        [strides] * 2 if strides is not None else
                        self.pool_size)
        self.kind = kind
        self.mode = mode
        if not all(st <= p for st, p in zip(self.strides, self.pool_size)):
            raise ValueError("Strides %s must be <= pool_size %s" %
                             (self.strides, self.pool_size))

        nc, nxi, nxj = self.shape_in
        nyi_float = float(nxi - self.pool_size[0]) / self.strides[0]
        nyj_float = float(nxj - self.pool_size[1]) / self.strides[1]
        if self.mode == 'full':
            nyi = 1 + int(np.ceil(nyi_float))
            nyj = 1 + int(np.ceil(nyj_float))
        elif self.mode == 'valid':
            nyi = 1 + int(np.floor(nyi_float))
            nyj = 1 + int(np.floor(nyj_float))
        self.shape_out = (nc, nyi, nyj)

        super(Pool2d, self).__init__(
            default_size_in=np.prod(self.shape_in),
            default_size_out=np.prod(self.shape_out))
Example #7
0
    def __init__(self,
                 shape_in,
                 filters,
                 biases=None,
                 strides=1,
                 padding=0,
                 border='ceil'):  # noqa: C901
        self.shape_in = shape_in
        self.filters = filters
        if self.filters.ndim not in [4, 6]:
            raise ValueError(
                "`filters` must have four or six dimensions "
                "(filters, [height, width,] channels, f_height, f_width)")
        if self.filters.shape[-3] != self.shape_in[0]:
            raise ValueError(
                "Filter channels (%d) and input channels (%d) must match" %
                (self.filters.shape[-3], self.shape_in[0]))
        if not all(s % 2 == 1 for s in self.filters.shape[-2:]):
            raise ValueError("Filter shapes must be odd (got %r)" %
                             (self.filters.shape[-2:], ))

        self.strides = strides if is_iterable(strides) else [strides] * 2
        self.padding = padding if is_iterable(padding) else [padding] * 2
        self.border = border

        nf = self.filters.shape[0]
        nxi, nxj = self.shape_in[1:]
        si, sj = self.filters.shape[-2:]
        pi, pj = self.padding
        sti, stj = self.strides
        rounder = np.ceil if self.border == 'ceil' else np.floor
        nyi = 1 + max(int(rounder(float(2 * pi + nxi - si) / sti)), 0)
        nyj = 1 + max(int(rounder(float(2 * pj + nxj - sj) / stj)), 0)
        self.shape_out = (nf, nyi, nyj)
        if self.filters.ndim == 6 and self.filters.shape[1:3] != (nyi, nyj):
            raise ValueError("Number of local filters %r must match out shape "
                             "%r" % (self.filters.shape[1:3], (nyi, nyj)))

        self.biases = biases if biases is not None else None
        if self.biases is not None:
            if self.biases.size == 1:
                self.biases.shape = (1, 1, 1)
            elif self.biases.size == np.prod(self.shape_out):
                self.biases.shape = self.shape_out
            elif self.biases.size == self.shape_out[0]:
                self.biases.shape = (self.shape_out[0], 1, 1)
            elif self.biases.size == np.prod(self.shape_out[1:]):
                self.biases.shape = (1, ) + self.shape_out[1:]
            else:
                raise ValueError(
                    "Biases size (%d) does not match output shape %s" %
                    (self.biases.size, self.shape_out))

        super(Conv2d, self).__init__(default_size_in=np.prod(self.shape_in),
                                     default_size_out=np.prod(self.shape_out))
 def simplify(self):
     is_num = lambda x: isinstance(x, NumExp)
     if isinstance(self.fn, str):
         return self  # cannot simplify
     elif all(map(is_num, self.args)):
         # simplify scalar function
         return NumExp(self.fn(*[a.value for a in self.args]))
     elif all(is_num(a) or is_iterable(a) and all(map(is_num, a))
              for a in self.args):
         # simplify vector function
         return NumExp(self.fn(
             [[aa.value for aa in a] if is_iterable(a) else a.value
              for a in self.args]))
     else:
         return self  # cannot simplify
Example #9
0
    def __init__(self, shape_in, filters, biases=None, strides=1, padding=0):  # noqa: C901
        self.shape_in = shape_in
        self.filters = filters
        if self.filters.ndim not in [4, 6]:
            raise ValueError(
                "`filters` must have four or six dimensions "
                "(filters, [height, width,] channels, f_height, f_width)")
        if self.filters.shape[-3] != self.shape_in[0]:
            raise ValueError(
                "Filter channels (%d) and input channels (%d) must match"
                % (self.filters.shape[-3], self.shape_in[0]))
        if not all(s % 2 == 1 for s in self.filters.shape[-2:]):
            raise ValueError("Filter shapes must be odd (got %r)"
                             % (self.filters.shape[-2:],))

        self.strides = strides if is_iterable(strides) else [strides] * 2
        self.padding = padding if is_iterable(padding) else [padding] * 2

        nf = self.filters.shape[0]
        nxi, nxj = self.shape_in[1:]
        si, sj = self.filters.shape[-2:]
        pi, pj = self.padding
        sti, stj = self.strides
        nyi = 1 + max(int(np.ceil(float(2*pi + nxi - si) / sti)), 0)
        nyj = 1 + max(int(np.ceil(float(2*pj + nxj - sj) / stj)), 0)
        self.shape_out = (nf, nyi, nyj)
        if self.filters.ndim == 6 and self.filters.shape[1:3] != (nyi, nyj):
            raise ValueError("Number of local filters %r must match out shape "
                             "%r" % (self.filters.shape[1:3], (nyi, nyj)))

        self.biases = biases if biases is not None else None
        if self.biases is not None:
            if self.biases.size == 1:
                self.biases.shape = (1, 1, 1)
            elif self.biases.size == np.prod(self.shape_out):
                self.biases.shape = self.shape_out
            elif self.biases.size == self.shape_out[0]:
                self.biases.shape = (self.shape_out[0], 1, 1)
            elif self.biases.size == np.prod(self.shape_out[1:]):
                self.biases.shape = (1,) + self.shape_out[1:]
            else:
                raise ValueError(
                    "Biases size (%d) does not match output shape %s"
                    % (self.biases.size, self.shape_out))

        super(Conv2d, self).__init__(
            default_size_in=np.prod(self.shape_in),
            default_size_out=np.prod(self.shape_out))
Example #10
0
 def validate(self, instance, rule):
     if is_iterable(rule):
         for r in (itervalues(rule) if isinstance(rule, dict) else rule):
             self.validate_rule(instance, r)
     elif rule is not None:
         self.validate_rule(instance, rule)
     super(LearningRuleTypeParam, self).validate(instance, rule)
Example #11
0
 def validate(self, instance, rule):
     if is_iterable(rule):
         for r in (itervalues(rule) if isinstance(rule, dict) else rule):
             self.validate_rule(instance, r)
     elif rule is not None:
         self.validate_rule(instance, rule)
     super(LearningRuleTypeParam, self).validate(instance, rule)
Example #12
0
    def __getitem__(self, item):
        if isinstance(item, slice):
            item = np.arange(len(self))[item]

        if is_iterable(item):
            rval = self.__class__.__new__(self.__class__)
            rval.starts = [self.starts[i] for i in item]
            rval.shape0s = [self.shape0s[i] for i in item]
            rval.shape1s = [self.shape1s[i] for i in item]
            rval.stride0s = [self.stride0s[i] for i in item]
            rval.stride1s = [self.stride1s[i] for i in item]
            rval.buf = self.buf
            rval.names = [self.names[i] for i in item]
            return rval
        else:
            if isinstance(item, np.ndarray):
                item.shape = ()  # avoid numpy DeprecationWarning

            itemsize = self.dtype.itemsize
            shape = (self.shape0s[item], self.shape1s[item])
            byteoffset = itemsize * self.starts[item]
            bytestrides = (itemsize * self.stride0s[item],
                           itemsize * self.stride1s[item])
            return np.ndarray(shape=shape,
                              dtype=self.dtype,
                              buffer=self.buf.data,
                              offset=byteoffset,
                              strides=bytestrides)
Example #13
0
 def coerce(self, instance, rule):
     if is_iterable(rule):
         for r in (itervalues(rule) if isinstance(rule, dict) else rule):
             self.check_rule(instance, r)
     elif rule is not None:
         self.check_rule(instance, rule)
     return super(LearningRuleTypeParam, self).coerce(instance, rule)
Example #14
0
    def __setitem__(self, item, new_value):
        if isinstance(item, slice) or is_iterable(item):
            raise NotImplementedError('TODO')
        else:
            m, n = self.shape0s[item], self.shape1s[item]
            sm, sn = self.stride0s[item], self.stride1s[item]

            if (sm, sn) in [(1, m), (n, 1)]:
                # contiguous
                clarray = self.getitem_device(item)
                if isinstance(new_value, np.ndarray):
                    array = new_value.astype(self.dtype)
                else:
                    array = np.zeros(clarray.shape, dtype=clarray.dtype)
                    array[...] = new_value

                array.shape = clarray.shape  # reshape to avoid warning
                clarray.set(array)
            else:
                # discontiguous
                #   Copy a contiguous region off the device that surrounds the
                #   discontiguous, set the appropriate values, and copy back
                s = self.starts[item]
                array = to_host(self.queue, self.cl_buf.data, self.dtype,
                                s, (m, n), (sm, sn), is_blocking=True)
                array[...] = new_value

                buf = array.base if array.base is not None else array
                bytestart = self.dtype.itemsize * s
                cl.enqueue_copy(self.queue, self.cl_buf.data, buf,
                                device_offset=bytestart, is_blocking=True)
Example #15
0
 def validate(self, instance, rule):
     if is_iterable(rule):
         for lr in rule:
             self.validate_rule(instance, lr)
     elif rule is not None:
         self.validate_rule(instance, rule)
     super(LearningRuleParam, self).validate(instance, rule)
Example #16
0
    def __getitem__(self, item):
        if isinstance(item, slice):
            item = np.arange(len(self))[item]

        if is_iterable(item):
            rval = self.__class__.__new__(self.__class__)
            rval.starts = [self.starts[i] for i in item]
            rval.shape0s = [self.shape0s[i] for i in item]
            rval.shape1s = [self.shape1s[i] for i in item]
            rval.stride0s = [self.stride0s[i] for i in item]
            rval.stride1s = [self.stride1s[i] for i in item]
            rval.buf = self.buf
            rval.names = [self.names[i] for i in item]
            return rval
        else:
            if isinstance(item, np.ndarray):
                item.shape = ()  # avoid numpy DeprecationWarning

            itemsize = self.dtype.itemsize
            shape = (self.shape0s[item], self.shape1s[item])
            byteoffset = itemsize * self.starts[item]
            bytestrides = (itemsize * self.stride0s[item],
                           itemsize * self.stride1s[item])
            return np.ndarray(
                shape=shape, dtype=self.dtype, buffer=self.buf.data,
                offset=byteoffset, strides=bytestrides)
Example #17
0
    def __setitem__(self, item, new_value):
        if isinstance(item, slice) or is_iterable(item):
            raise NotImplementedError('TODO')
        else:
            m, n = self.shape0s[item], self.shape1s[item]
            sm, sn = self.stride0s[item], self.stride1s[item]

            if (sm, sn) in [(1, m), (n, 1)]:
                # contiguous
                clarray = self.getitem_device(item)
                if isinstance(new_value, np.ndarray):
                    array = np.asarray(new_value, order='C', dtype=self.dtype)
                else:
                    array = np.zeros(clarray.shape, dtype=clarray.dtype)
                    array[...] = new_value

                array.shape = clarray.shape  # reshape to avoid warning
                assert equal_strides(
                    array.strides, clarray.strides, clarray.shape)
                clarray.set(array)
            else:
                # discontiguous
                #   Copy a contiguous region off the device that surrounds the
                #   discontiguous, set the appropriate values, and copy back
                s = self.starts[item]
                array = to_host(self.queue, self.cl_buf.data, self.dtype,
                                s, (m, n), (sm, sn), is_blocking=True)
                array[...] = new_value

                buf = array.base if array.base is not None else array
                bytestart = self.dtype.itemsize * s
                cl.enqueue_copy(self.queue, self.cl_buf.data, buf,
                                device_offset=bytestart, is_blocking=True)
Example #18
0
    def __init__(
            self, dimensions, strict=True, max_similarity=0.1,
            pointer_gen=None, name=None, algebra=None):
        if algebra is None:
            algebra = HrrAlgebra()
        self.algebra = algebra

        if not is_integer(dimensions) or dimensions < 1:
            raise ValidationError("dimensions must be a positive integer",
                                  attr='dimensions', obj=self)

        if pointer_gen is None:
            pointer_gen = UnitLengthVectors(dimensions)
        elif isinstance(pointer_gen, np.random.RandomState):
            pointer_gen = UnitLengthVectors(dimensions, pointer_gen)

        if not is_iterable(pointer_gen) or is_string(pointer_gen):
            raise ValidationError(
                "pointer_gen must be iterable or RandomState",
                attr='pointer_gen', obj=self)

        self.dimensions = dimensions
        self.strict = strict
        self.max_similarity = max_similarity
        self._key2idx = {}
        self._keys = []
        self._vectors = np.zeros((0, dimensions), dtype=float)
        self.pointer_gen = pointer_gen
        self.name = name
Example #19
0
    def add_output(self, name, function, synapse=None, **conn_kwargs):
        dims_per_ens = self.dimensions_per_ensemble

        # get output size for each ensemble
        sizes = np.zeros(self.n_ensembles, dtype=int)

        if is_iterable(function) and all(callable(f) for f in function):
            if len(list(function)) != self.n_ensembles:
                raise ValueError("Must have one function per ensemble")

            for i, func in enumerate(function):
                sizes[i] = np.asarray(func(np.zeros(dims_per_ens))).size
        elif callable(function):
            sizes[:] = np.asarray(function(np.zeros(dims_per_ens))).size
            function = [function] * self.n_ensembles
        elif function is None:
            sizes[:] = dims_per_ens
            function = [None] * self.n_ensembles
        else:
            raise ValueError(
                "'function' must be a callable, list of callables, or 'None'")

        output = nengo.Node(output=None, size_in=sizes.sum(), label=name)
        setattr(self, name, output)

        indices = np.zeros(len(sizes) + 1, dtype=int)
        indices[1:] = np.cumsum(sizes)
        for i, e in enumerate(self.ea_ensembles):
            nengo.Connection(e,
                             output[indices[i]:indices[i + 1]],
                             function=function[i],
                             synapse=synapse,
                             **conn_kwargs)

        return output
Example #20
0
    def add_output_mapping(self, name, output_vectors):
        """Adds another output to the associative memory network.

        Creates a transform with the given output vectors between the
        associative memory element output and a named output node to enable the
        selection of output vectors by the associative memory.

        Parameters
        ----------
        name: str
            Name to use for the output node. This name will be used as
            the name of the attribute for the associative memory network.
        output_vectors: array_like
            The list of vectors to be produced for each match.
        """
        # --- Put arguments in canonical form
        if is_iterable(output_vectors):
            output_vectors = np.array(output_vectors, ndmin=2)

        # --- Check preconditions
        if hasattr(self, name):
            raise ValidationError("Name '%s' already exists as a node in the "
                                  "associative memory." % name, attr='name')

        # --- Make the output node and connect it
        output = nengo.Node(size_in=output_vectors.shape[1], label=name)
        setattr(self, name, output)

        if self.thresh_ens is not None:
            c = nengo.Connection(self.thresh_ens.output, output,
                                 synapse=None, transform=output_vectors.T)
        else:
            c = nengo.Connection(self.elem_output, output,
                                 synapse=None, transform=output_vectors.T)
        self.out_conns.append(c)
Example #21
0
    def add_output(self, name, function, synapse=None, **conn_kwargs):
        dims_per_ens = self.dimensions_per_ensemble

        # get output size for each ensemble
        sizes = np.zeros(self.n_ensembles, dtype=int)

        if is_iterable(function) and all(callable(f) for f in function):
            if len(list(function)) != self.n_ensembles:
                raise ValidationError(
                    "Must have one function per ensemble", attr='function')

            for i, func in enumerate(function):
                sizes[i] = np.asarray(func(np.zeros(dims_per_ens))).size
        elif callable(function):
            sizes[:] = np.asarray(function(np.zeros(dims_per_ens))).size
            function = [function] * self.n_ensembles
        elif function is None:
            sizes[:] = dims_per_ens
            function = [None] * self.n_ensembles
        else:
            raise ValidationError("'function' must be a callable, list of "
                                  "callables, or None", attr='function')

        output = nengo.Node(output=None, size_in=sizes.sum(), label=name)
        setattr(self, name, output)

        indices = np.zeros(len(sizes) + 1, dtype=int)
        indices[1:] = np.cumsum(sizes)
        for i, e in enumerate(self.ea_ensembles):
            nengo.Connection(
                e, output[indices[i]:indices[i+1]], function=function[i],
                synapse=synapse, **conn_kwargs)

        return output
Example #22
0
 def validate(self, instance, rule):
     if is_iterable(rule):
         for lr in rule:
             self.validate_rule(instance, lr)
     elif rule is not None:
         self.validate_rule(instance, rule)
     super(LearningRuleParam, self).validate(instance, rule)
Example #23
0
    def __init__(self, fn, in_dims=None, out_dim=None):
        if in_dims is not None and not is_iterable(in_dims):
            in_dims = [in_dims]

        self.fn = fn
        self.in_dims = in_dims
        self.out_dim = out_dim
        self._translator = None
Example #24
0
    def __init__(self, fn, in_dims=None, out_dim=None):
        if in_dims is not None and not is_iterable(in_dims):
            in_dims = [in_dims]

        self.fn = fn
        self.in_dims = in_dims
        self.out_dim = out_dim
        self._translator = None
Example #25
0
    def add_input_mapping(self, name, input_vectors, input_scales=1.0):
        """Adds a set of input vectors to the associative memory network.

        Creates a transform with the given input vectors between the
        a named input node and associative memory element input to enable the
        inputs to be mapped onto ensembles of the Associative Memory.

        Parameters
        ----------
        name: string
            Name to use for the input node. This name will be used as the name
            of the attribute for the associative memory network.

        input_vectors: array_like
            The list of vectors to be compared against.
        input_scales: array_list, optional
            Scaling factor to apply on each of the input vectors. Note that it
            is possible to scale each vector independently.
        """
        # --- Put arguments in canonical form
        n_vectors, d_vectors = input_vectors.shape
        if is_iterable(input_vectors):
            input_vectors = np.array(input_vectors, ndmin=2)
        if not is_iterable(input_scales):
            input_scales = input_scales * np.ones((1, n_vectors))
        else:
            input_scales = np.array(input_scales, ndmin=2)

        # --- Check some preconditions
        if input_scales.shape[1] != n_vectors:
            raise ValidationError("Number of input_scale values (%d) does not "
                                  "match number of input vectors (%d)." %
                                  (input_scales.shape[1], n_vectors),
                                  attr='input_scales')
        if hasattr(self, name):
            raise ValidationError("Name '%s' already exists as a node in the "
                                  "associative memory." % name,
                                  attr='name')

        # --- Finally, make the input node and connect it
        in_node = nengo.Node(size_in=d_vectors, label=name)
        setattr(self, name, in_node)
        nengo.Connection(in_node,
                         self.elem_input,
                         synapse=None,
                         transform=input_vectors * input_scales.T)
Example #26
0
    def __init__(self,
                 n_neurons,
                 n_ensembles,
                 ens_dimensions=1,
                 neuron_nodes=False,
                 label=None,
                 seed=None,
                 add_to_container=None,
                 **ens_kwargs):
        if "dimensions" in ens_kwargs:
            raise ValidationError(
                "'dimensions' is not a valid argument to EnsembleArray. "
                "To set the number of ensembles, use 'n_ensembles'. To set "
                "the number of dimensions per ensemble, use 'ens_dimensions'.",
                attr='dimensions',
                obj=self)

        super(EnsembleArray, self).__init__(label, seed, add_to_container)

        for param in ens_kwargs:
            if is_iterable(ens_kwargs[param]):
                ens_kwargs[param] = nengo.dists.Samples(ens_kwargs[param])

        self.config[nengo.Ensemble].update(ens_kwargs)

        label_prefix = "" if label is None else label + "_"

        self.n_neurons = n_neurons
        self.n_ensembles = n_ensembles
        self.dimensions_per_ensemble = ens_dimensions

        # These may be set in add_neuron_input and add_neuron_output
        self.neuron_input, self.neuron_output = None, None

        self.ea_ensembles = []

        with self:
            self.input = nengo.Node(size_in=self.dimensions, label="input")

            for i in range(n_ensembles):
                e = nengo.Ensemble(n_neurons,
                                   self.dimensions_per_ensemble,
                                   label="%s%d" % (label_prefix, i))
                nengo.Connection(self.input[i * ens_dimensions:(i + 1) *
                                            ens_dimensions],
                                 e,
                                 synapse=None)
                self.ea_ensembles.append(e)

        if neuron_nodes:
            self.add_neuron_input()
            self.add_neuron_output()
            warnings.warn(
                "'neuron_nodes' argument will be removed in Nengo 2.2. Use "
                "'add_neuron_input' and 'add_neuron_output' methods instead.",
                DeprecationWarning)

        self.add_output('output', function=None)
Example #27
0
    def add_input_mapping(self, name, input_vectors, input_scales=1.0):
        """Adds a set of input vectors to the associative memory network.

        Creates a transform with the given input vectors between the
        a named input node and associative memory element input to enable the
        inputs to be mapped onto ensembles of the Associative Memory.

        Parameters
        ----------
        name: string
            Name to use for the input node. This name will be used as the name
            of the attribute for the associative memory network.

        input_vectors: array_like
            The list of vectors to be compared against.
        input_scales: array_list, optional
            Scaling factor to apply on each of the input vectors. Note that it
            is possible to scale each vector independently.
        """
        # --- Put arguments in canonical form
        n_vectors, d_vectors = input_vectors.shape
        if is_iterable(input_vectors):
            input_vectors = np.array(input_vectors, ndmin=2)
        if not is_iterable(input_scales):
            input_scales = input_scales * np.ones((1, n_vectors))
        else:
            input_scales = np.array(input_scales, ndmin=2)

        # --- Check some preconditions
        if input_scales.shape[1] != n_vectors:
            raise ValidationError("Number of input_scale values (%d) does not "
                                  "match number of input vectors (%d)."
                                  % (input_scales.shape[1], n_vectors),
                                  attr='input_scales')
        if hasattr(self, name):
            raise ValidationError("Name '%s' already exists as a node in the "
                                  "associative memory." % name, attr='name')

        # --- Finally, make the input node and connect it
        in_node = nengo.Node(size_in=d_vectors, label=name)
        setattr(self, name, in_node)
        nengo.Connection(in_node, self.elem_input,
                         synapse=None,
                         transform=input_vectors * input_scales.T)
Example #28
0
    def make_step(self, shape_in, shape_out, dt, rng):
        size_out = shape_out[0] if is_iterable(shape_out) else shape_out

        if self.base_output is None:
            f = self.passthrough
        elif isinstance(self.base_output, Process):
            f = self.base_output.make_step(shape_in, shape_out, dt, rng)
        else:
            f = self.base_output
        return self.Step(size_out, f, self.fast_client)
Example #29
0
    def function(self, _function):
        if _function is not None:
            self._check_pre_ensemble('function')
            x = (self.eval_points[0] if is_iterable(self.eval_points) else
                 np.zeros(self._pre.dimensions))
            size = np.asarray(_function(x)).size
        else:
            size = 0

        self._function = (_function, size)
        self._check_shapes()
Example #30
0
    def make_step(self, shape_in, shape_out, dt, rng):
        size_out = shape_out[0] if is_iterable(shape_out) else shape_out

        if self.base_output is None:
            f = self.passthrough
        elif isinstance(self.base_output, Process):
            f = self.base_output.make_step(shape_in, shape_out, dt, rng)
        else:
            f = self.base_output
        return self.Step(size_out, f,
                         to_client=self.to_client,
                         from_client=self.from_client)
    def _broadcast_args(self, func, args):
        """Apply 'func' element-wise to lists of args"""
        as_list = lambda x: list(x) if is_iterable(x) else [x]
        args = list(map(as_list, args))
        arg_lens = list(map(len, args))
        max_len = max(arg_lens)
        assert all(n in [0, 1, max_len] for n in arg_lens), (
            "Could not broadcast arguments with lengths %s" % arg_lens)

        result = [func(*[a[i] if len(a) > 1 else a[0] for a in args])
                  for i in range(max_len)]
        result = [r.simplify() for r in result]
        return result[0] if len(result) == 1 else result
Example #32
0
File: utils.py Project: hunse/phd
def lsuv(X, ws, fs, **kwargs):
    """Layer-sequential unit-variance initialization [1]_

    References
    ----------
    .. [1] Mishkin, D., & Matas, J. (2016). All you need is a good init.
       In ICLR 2016 (pp. 1-13).
    """
    fs = ([fs] * (len(ws) - 1) + [None]) if not is_iterable(fs) else fs
    assert len(ws) >= 2
    assert len(fs) == len(ws)

    for i, (w, f) in enumerate(zip(ws, fs)):
        X = lsuv_layer(X, w, f, layer_i=i, **kwargs)
 def visit_Return(self, expr):
     value = self.visit(expr.value)
     if is_iterable(value):
         self._check_vector_length(len(value))
         if not all(isinstance(v, Expression) for v in value):
             raise ValueError(
                 "Can only return a list of mathematical expressions")
         return ["%s[%d] = %s;" % (OUTPUT_NAME, i, v.to_ocl())
                 for i, v in enumerate(value)] + ["return;"]
     elif isinstance(value, Expression):
         return ["%s[0] = %s;" % (OUTPUT_NAME, value.to_ocl()), "return;"]
     else:
         raise ValueError("Can only return mathematical expressions, "
                          "or lists of expressions")
Example #34
0
 def learning_rule(self):
     if self.learning_rule_type is not None and self._learning_rule is None:
         types = self.learning_rule_type
         if isinstance(types, dict):
             self._learning_rule = types.__class__()  # dict of same type
             for k, v in iteritems(types):
                 self._learning_rule[k] = LearningRule(self, v)
         elif is_iterable(types):
             self._learning_rule = [LearningRule(self, v) for v in types]
         elif isinstance(types, LearningRuleType):
             self._learning_rule = LearningRule(self, types)
         else:
             raise ValueError("Invalid type for `learning_rule_type`: %s" %
                              (types.__class__.__name__))
     return self._learning_rule
Example #35
0
 def __getitem__(self, item):
     """
     Getting one item returns a numpy array (on the host).
     Getting multiple items returns a view into the device.
     """
     if is_iterable(item):
         return self.getitem_device(item)
     else:
         buf = to_host(
             self.queue, self.cl_buf.data, self.dtype, self.starts[item],
             (self.shape0s[item], self.shape1s[item]),
             (self.stride0s[item], self.stride1s[item]),
         )
         buf.setflags(write=False)
         return buf
Example #36
0
 def learning_rule(self):
     if self.learning_rule_type is not None and self._learning_rule is None:
         types = self.learning_rule_type
         if isinstance(types, dict):
             self._learning_rule = types.__class__()  # dict of same type
             for k, v in iteritems(types):
                 self._learning_rule[k] = LearningRule(self, v)
         elif is_iterable(types):
             self._learning_rule = [LearningRule(self, v) for v in types]
         elif isinstance(types, LearningRuleType):
             self._learning_rule = LearningRule(self, types)
         else:
             raise ValueError("Invalid type for `learning_rule_type`: %s"
                              % (types.__class__.__name__))
     return self._learning_rule
Example #37
0
 def __getitem__(self, item):
     """
     Getting one item returns a numpy array (on the host).
     Getting multiple items returns a view into the device.
     """
     if is_iterable(item):
         return self.getitem_device(item)
     else:
         buf = to_host(
             self.queue, self.cl_buf.data, self.dtype, self.starts[item],
             (self.shape0s[item], self.shape1s[item]),
             (self.stride0s[item], self.stride1s[item]),
         )
         buf.setflags(write=False)
         return buf
Example #38
0
def similarity(data, vocab, normalize=False):
    """Return the similarity between simulation data and Semantic Pointers.

    Computes the dot products between all Semantic Pointers in the Vocabulary
    and the simulation data for each timestep. If ``normalize=True``,
    normalizes all vectors to compute the cosine similarity.

    Parameters
    ----------
    data: (D,) or (T, D) array_like
        The *D*-dimensional data for *T* timesteps used for comparison.
    vocab: Vocabulary or array_like
        Vocabulary (or list of vectors) used to calculate the similarity
        values.
    normalize : bool, optional
        Whether to normalize all vectors, to compute the cosine similarity.
    """

    if isinstance(data, SemanticPointer):
        data = data.v

    if isinstance(vocab, Vocabulary):
        vectors = vocab.vectors
    elif is_iterable(vocab):
        if isinstance(next(iter(vocab)), SemanticPointer):
            vocab = [p.v for p in vocab]
        vectors = np.array(vocab, copy=False, ndmin=2)
    else:
        raise ValidationError("%r object is not a valid vocabulary" %
                              (type(vocab).__name__),
                              attr='vocab')

    dots = np.dot(vectors, data.T)

    if normalize:
        # Zero-norm vectors should return zero, so avoid divide-by-zero error
        eps = np.nextafter(0, 1)  # smallest float above zero
        dnorm = np.maximum(npext.norm(data.T, axis=0, keepdims=True), eps)
        vnorm = np.maximum(npext.norm(vectors, axis=1, keepdims=True), eps)

        if len(dots.shape) == 1:
            vnorm = np.squeeze(vnorm)

        dots /= dnorm
        dots /= vnorm

    return dots.T
Example #39
0
    def getitem_device(self, item):
        if isinstance(item, slice):
            item = np.arange(len(self))[item]

        if is_iterable(item):
            return CLRaggedArray.from_buffer(
                self.queue, self.cl_buf, self.starts[item],
                self.shape0s[item], self.shape1s[item],
                self.stride0s[item], self.stride1s[item],
                names=[self.names[i] for i in item])
        else:
            s = self.dtype.itemsize
            return Array(
                self.queue,
                (self.shape0s[item], self.shape1s[item]), self.dtype,
                strides=(self.stride0s[item] * s, self.stride1s[item] * s),
                data=self.cl_buf.data, offset=self.starts[item] * s)
Example #40
0
    def __getitem__(self, key):
        """Return the semantic pointer with the requested name.

        If one does not exist, automatically create one.  The key must be
        a valid semantic pointer name, which is any Python identifier starting
        with a capital letter.
        """
        if not key[0].isupper():
            raise KeyError("Semantic pointers must begin with a capital")
        value = self.pointers.get(key, None)
        if value is None:
            if is_iterable(self.unitary):
                unitary = key in self.unitary
            else:
                unitary = self.unitary
            value = self.create_pointer(unitary=unitary)
            self.add(key, value)
        return value
Example #41
0
    def learning_rule(self):
        """(LearningRule or iterable) Connectable learning rule object(s)."""
        if self.learning_rule_type is not None and self._learning_rule is None:
            types = self.learning_rule_type
            if isinstance(types, dict):
                self._learning_rule = types.__class__()  # dict of same type
                for k, v in iteritems(types):
                    self._learning_rule[k] = LearningRule(self, v)
            elif is_iterable(types):
                self._learning_rule = [LearningRule(self, v) for v in types]
            elif isinstance(types, LearningRuleType):
                self._learning_rule = LearningRule(self, types)
            else:
                raise ValidationError(
                    "Invalid type %r" % types.__class__.__name__,
                    attr='learning_rule_type', obj=self)

        return self._learning_rule
Example #42
0
    def __getitem__(self, key):
        """Return the semantic pointer with the requested name.

        If one does not exist, automatically create one.  The key must be
        a valid semantic pointer name, which is any Python identifier starting
        with a capital letter.
        """
        if not key[0].isupper():
            raise KeyError('Semantic pointers must begin with a capital')
        value = self.pointers.get(key, None)
        if value is None:
            if is_iterable(self.unitary):
                unitary = key in self.unitary
            else:
                unitary = self.unitary
            value = self.create_pointer(unitary=unitary)
            self.add(key, value)
        return value
Example #43
0
    def learning_rule(self):
        """(LearningRule or iterable) Connectable learning rule object(s)."""
        if self.learning_rule_type is not None and self._learning_rule is None:
            types = self.learning_rule_type
            if isinstance(types, dict):
                self._learning_rule = types.__class__()  # dict of same type
                for k, v in iteritems(types):
                    self._learning_rule[k] = LearningRule(self, v)
            elif is_iterable(types):
                self._learning_rule = [LearningRule(self, v) for v in types]
            elif isinstance(types, LearningRuleType):
                self._learning_rule = LearningRule(self, types)
            else:
                raise ValidationError("Invalid type %r" %
                                      types.__class__.__name__,
                                      attr='learning_rule_type',
                                      obj=self)

        return self._learning_rule
Example #44
0
    def add_output(self, name, output_vectors):
        # Handle different vocabulary types
        if is_iterable(output_vectors):
            output_vectors = np.matrix(output_vectors)

        output = nengo.Node(size_in=output_vectors.shape[1], label=name)

        if hasattr(self, name):
            raise NameError('Name "%s" already exists as a node in the'
                            'associative memory.')
        else:
            setattr(self, name, output)

        if self.threshold_output:
            nengo.Connection(self.thresh_ens.output, output, synapse=None,
                             transform=output_vectors.T)
        else:
            nengo.Connection(self.elem_output, output, synapse=None,
                             transform=output_vectors.T)
Example #45
0
    def add_output(self, name, output_vectors):
        # Handle different vocabulary types
        if is_iterable(output_vectors):
            output_vectors = np.matrix(output_vectors)

        output = nengo.Node(size_in=output_vectors.shape[1], label=name)

        if hasattr(self, name):
            raise NameError('Name "%s" already exists as a node in the'
                            'associative memory.')
        else:
            setattr(self, name, output)

        if self.threshold_output:
            nengo.Connection(self.thresh_ens.output, output, synapse=None,
                             transform=output_vectors.T)
        else:
            nengo.Connection(self.elem_output, output, synapse=None,
                             transform=output_vectors.T)
Example #46
0
    def __init__(self, systems, dt=None, elementwise=False, method='zoh'):
        if not is_iterable(systems) or isinstance(systems, LinearSystem):
            systems = [systems]
        self.systems = systems
        self.dt = dt
        self.elementwise = elementwise

        self.A = []
        self.B = []
        self.C = []
        self.D = []
        for sys in systems:
            sys = LinearSystem(sys)
            if dt is not None:
                sys = cont2discrete(sys, dt, method=method)
            elif sys.analog:
                raise ValueError(
                    "system (%s) must be digital if not given dt" % sys)

            A, B, C, D = sys.ss
            self.A.append(A)
            self.B.append(B)
            self.C.append(C)
            self.D.append(D)

        # TODO: If all of the synapses are single order, than A is diagonal
        # and so np.dot(self.A, self._x) is trivial. But perhaps
        # block_diag is already optimized for this.

        # Note: ideally we could put this into CCF to reduce the A mapping
        # to a single dot product and a shift operation. But in general
        # since this is MIMO it is not controllable from a single input.
        # Instead we might want to consider balanced reduction to
        # improve efficiency.
        self.A = block_diag(*self.A)
        self.B = block_diag(*self.B) if elementwise else np.vstack(self.B)
        self.C = block_diag(*self.C)
        self.D = block_diag(*self.D) if elementwise else np.vstack(self.D)
        # TODO: shape validation

        self._x = np.zeros(len(self.A))[:, None]
Example #47
0
def similarity(data, vocab, normalize=False):
    """Return the similarity between some data and the vocabulary.

    Computes the dot products between all data vectors and each
    vocabulary vector. If `normalize=True`, normalizes all vectors
    to compute the cosine similarity.

    Parameters
    ----------
    data: array_like
        The data used for comparison.
    vocab: spa.Vocabulary, array_like
        Vocabulary (or list of vectors) to use to calculate
        the similarity values
    normalize : boolean (optional)
        Whether to normalize all vectors, to compute the cosine similarity.
    """
    from nengo.spa.vocab import Vocabulary

    if isinstance(vocab, Vocabulary):
        vectors = vocab.vectors
    elif is_iterable(vocab):
        vectors = np.array(vocab, copy=False, ndmin=2)
    else:
        raise ValidationError("%r object is not a valid vocabulary" %
                              (vocab.__class__.__name__),
                              attr='vocab')

    data = np.array(data, copy=False, ndmin=2)
    dots = np.dot(data, vectors.T)

    if normalize:
        # Zero-norm vectors should return zero, so avoid divide-by-zero error
        eps = np.nextafter(0, 1)  # smallest float above zero
        dnorm = np.maximum(npext.norm(data, axis=1, keepdims=True), eps)
        vnorm = np.maximum(npext.norm(vectors, axis=1, keepdims=True), eps)

        dots /= dnorm
        dots /= vnorm.T

    return dots
Example #48
0
    def __init__(self, systems, dt=None, elementwise=False, method='zoh'):
        if not is_iterable(systems):
            systems = [systems]
        self.systems = systems
        self.dt = dt
        self.elementwise = elementwise

        self.A = []
        self.B = []
        self.C = []
        self.D = []
        for sys in systems:
            sys = LinearSystem(sys)
            if dt is not None:
                sys = cont2discrete(sys, dt, method=method)
            elif sys.analog:
                raise ValueError(
                    "system (%s) must be digital if not given dt" % sys)

            A, B, C, D = sys.ss
            self.A.append(A)
            self.B.append(B)
            self.C.append(C)
            self.D.append(D)

        # TODO: If all of the synapses are single order, than A is diagonal
        # and so np.dot(self.A, self._x) is trivial. But perhaps
        # block_diag is already optimized for this.

        # Note: ideally we could put this into CCF to reduce the A mapping
        # to a single dot product and a shift operation. But in general
        # since this is MIMO it is not controllable from a single input.
        # Instead we might want to consider balanced reduction to
        # improve efficiency.
        self.A = block_diag(*self.A)
        self.B = block_diag(*self.B) if elementwise else np.vstack(self.B)
        self.C = block_diag(*self.C)
        self.D = block_diag(*self.D) if elementwise else np.vstack(self.D)
        # TODO: shape validation

        self._x = np.zeros(len(self.A))[:, None]
Example #49
0
    def add_output_mapping(self, name, output_vectors):
        """Adds another output to the associative memory network.

        Creates a transform with the given output vectors between the
        associative memory element output and a named output node to enable the
        selection of output vectors by the associative memory.

        Parameters
        ----------
        name: string
            Name to use for the output node. This name will be used as
            the name of the attribute for the associative memory network.

        output_vectors: array_like
            The list of vectors to be produced for each match.
        """
        # --- Put arguments in canonical form
        if is_iterable(output_vectors):
            output_vectors = np.array(output_vectors, ndmin=2)

        # --- Check preconditions
        if hasattr(self, name):
            raise ValidationError("Name '%s' already exists as a node in the "
                                  "associative memory." % name,
                                  attr='name')

        # --- Make the output node and connect it
        output = nengo.Node(size_in=output_vectors.shape[1], label=name)
        setattr(self, name, output)

        if self.thresh_ens is not None:
            c = nengo.Connection(self.thresh_ens.output,
                                 output,
                                 synapse=None,
                                 transform=output_vectors.T)
        else:
            c = nengo.Connection(self.elem_output,
                                 output,
                                 synapse=None,
                                 transform=output_vectors.T)
        self.out_conns.append(c)
Example #50
0
def similarity(data, vocab, normalize=False):
    """Return the similarity between some data and the vocabulary.

    Computes the dot products between all data vectors and each
    vocabulary vector. If `normalize=True`, normalizes all vectors
    to compute the cosine similarity.

    Parameters
    ----------
    data: array_like
        The data used for comparison.
    vocab: spa.Vocabulary, array_like
        Vocabulary (or list of vectors) to use to calculate
        the similarity values
    normalize : boolean (optional)
        Whether to normalize all vectors, to compute the cosine similarity.
    """
    from nengo.spa.vocab import Vocabulary

    if isinstance(vocab, Vocabulary):
        vectors = vocab.vectors
    elif is_iterable(vocab):
        vectors = np.array(vocab, copy=False, ndmin=2)
    else:
        raise ValidationError("%r object is not a valid vocabulary"
                              % (vocab.__class__.__name__), attr='vocab')

    data = np.array(data, copy=False, ndmin=2)
    dots = np.dot(data, vectors.T)

    if normalize:
        # Zero-norm vectors should return zero, so avoid divide-by-zero error
        eps = np.nextafter(0, 1)  # smallest float above zero
        dnorm = np.maximum(npext.norm(data, axis=1, keepdims=True), eps)
        vnorm = np.maximum(npext.norm(vectors, axis=1, keepdims=True), eps)

        dots /= dnorm
        dots /= vnorm.T

    return dots
    def getitem_device(self, item):
        if isinstance(item, slice):
            item = np.arange(len(self))[item]

        if is_iterable(item):
            return CLRaggedArray.from_buffer(
                self.queue,
                self.cl_buf,
                self.starts[item],
                self.shape0s[item],
                self.shape1s[item],
                self.stride0s[item],
                self.stride1s[item],
                names=[self.names[i] for i in item])
        else:
            s = self.dtype.itemsize
            return Array(self.queue, (self.shape0s[item], self.shape1s[item]),
                         self.dtype,
                         strides=(self.stride0s[item] * s,
                                  self.stride1s[item] * s),
                         data=self.cl_buf.data,
                         offset=self.starts[item] * s)
Example #52
0
    def getitem_device(self, item):
        if isinstance(item, slice):
            item = np.arange(len(self))[item]

        if is_iterable(item):
            rval = self.__class__.__new__(self.__class__)
            rval.queue = self.queue
            rval.starts = self.starts[item]
            rval.shape0s = self.shape0s[item]
            rval.shape1s = self.shape1s[item]
            rval.stride0s = self.stride0s[item]
            rval.stride1s = self.stride1s[item]
            rval.cl_buf = self.cl_buf
            rval.names = [self.names[i] for i in item]
            return rval
        else:
            s = self.dtype.itemsize
            return Array(
                self.queue,
                (self.shape0s[item], self.shape1s[item]), self.dtype,
                strides=(self.stride0s[item] * s, self.stride1s[item] * s),
                data=self.cl_buf.data, offset=self.starts[item] * s)
Example #53
0
File: utils.py Project: Ocode/nengo
def similarity(data, probe, vocab=None):
    """Return the similarity between the probed data and the vocabulary.

    Parameters
    ----------
    data: ProbeDict
        Collection of simulation data returned by sim.run() function call.
    probe: Probe
        Probe with desired data.
    vocab: spa.Vocabulary, list, np.ndarray, np.matrix, optional
        Optional vocabulary (or list of vectors) to use to calculate
        the similarity values

    """
    if vocab is None:
        probe_vectors = probe.target.vocab.vectors.T
    elif isinstance(vocab, Vocabulary):
        probe_vectors = vocab.vectors.T
    elif is_iterable(vocab):
        probe_vectors = np.matrix(vocab).T
    else:
        probe_vectors = vocab.T

    return np.dot(data[probe], probe_vectors)
Example #54
0
def build_network(model, network):
    """Takes a Network object and returns a Model.

    This determines the signals and operators necessary to simulate that model.

    Builder does this by mapping each high-level object to its associated
    signals and operators one-by-one, in the following order:

    1) Ensembles, Nodes, Neurons
    2) Subnetworks (recursively)
    3) Connections
    4) Learning Rules
    5) Probes
    """
    def get_seed(obj, rng):
        # Generate a seed no matter what, so that setting a seed or not on
        # one object doesn't affect the seeds of other objects.
        seed = rng.randint(npext.maxint)
        return (seed if not hasattr(obj, 'seed') or obj.seed is None
                else obj.seed)

    if model.toplevel is None:
        model.toplevel = network
        model.sig['common'][0] = Signal(
            npext.array(0.0, readonly=True), name='Common: Zero')
        model.sig['common'][1] = Signal(
            npext.array(1.0, readonly=True), name='Common: One')
        model.seeds[network] = get_seed(network, np.random)

    # Set config
    old_config = model.config
    model.config = network.config

    # assign seeds to children
    rng = np.random.RandomState(model.seeds[network])
    sorted_types = sorted(network.objects, key=lambda t: t.__name__)
    for obj_type in sorted_types:
        for obj in network.objects[obj_type]:
            model.seeds[obj] = get_seed(obj, rng)

    logger.debug("Network step 1: Building ensembles and nodes")
    for obj in network.ensembles + network.nodes:
        model.build(obj)

    logger.debug("Network step 2: Building subnetworks")
    for subnetwork in network.networks:
        model.build(subnetwork)

    logger.debug("Network step 3: Building connections")
    for conn in network.connections:
        model.build(conn)

    logger.debug("Network step 4: Building learning rules")
    for conn in network.connections:
        rule = conn.learning_rule
        if is_iterable(rule):
            for r in (itervalues(rule) if isinstance(rule, dict) else rule):
                model.build(r)
        elif rule is not None:
            model.build(rule)

    logger.debug("Network step 5: Building probes")
    for probe in network.probes:
        model.build(probe)

    # Unset config
    model.config = old_config
    model.params[network] = None
Example #55
0
 def function_args(self, conn, function):
     x = (conn.eval_points[0] if is_iterable(conn.eval_points)
          else np.zeros(conn.size_in))
     return (x,)