Beispiel #1
0
    def transform_to(self, other, keys=None):
        """Create a linear transform from one Vocabulary to another.

        This is simply the sum of the outer products of the corresponding
        terms in each Vocabulary.

        Parameters
        ----------
        other : Vocabulary
            The other vocabulary to translate into
        keys : list of strings
            If None, any term that exists in just one of the Vocabularies
            will be created in the other Vocabulary and included.  Otherwise,
            the transformation will only consider terms in this list.  Any
            terms in this list that do not exist in the Vocabularies will
            be created.
        """
        if keys is None:
            keys = list(self.keys)
            for k in other.keys:
                if k not in keys:
                    keys.append(k)

        t = np.zeros((other.dimensions, self.dimensions),
                     dtype=rc.get('precision', 'dtype'))
        for k in keys:
            a = self[k].v
            b = other[k].v
            t += np.outer(b, a)
        return t
Beispiel #2
0
    def validate(self, instance, ndarray):
        ndim = len(self.shape)
        try:
            ndarray = np.asarray(ndarray, dtype=rc.get('precision', 'dtype'))
        except TypeError:
            raise
            raise ValueError("Must be a float NumPy array (got type '%s')" %
                             ndarray.__class__.__name__)

        if ndarray.ndim != ndim:
            raise ValueError("ndarray must be %dD (got %dD)" %
                             (ndim, ndarray.ndim))
        for i, attr in enumerate(self.shape):
            assert is_integer(attr) or is_string(attr), (
                "shape can only be an int or str representing an attribute")
            if attr == '*':
                continue

            desired = attr if is_integer(attr) else getattr(instance, attr)

            if not is_integer(desired):
                raise ValueError("%s not yet initialized; cannot determine "
                                 "if shape is correct. Consider using a "
                                 "distribution instead." % attr)

            if ndarray.shape[i] != desired:
                raise ValueError("shape[%d] should be %d (got %d)" %
                                 (i, desired, ndarray.shape[i]))
        return ndarray
Beispiel #3
0
    def shrink(self, limit=None):
        """Reduces the size of the cache to meet a limit.

        Parameters
        ----------
        limit : int, optional
            Maximum size of the cache in bytes.
        """
        if limit is None:
            limit = rc.get('decoder_cache', 'size')
        if is_string(limit):
            limit = human2bytes(limit)

        fileinfo = []
        excess = -limit
        for path in self.get_files():
            stat = safe_stat(path)
            if stat is not None:
                aligned_size = byte_align(stat.st_size, self._fragment_size)
                excess += aligned_size
                fileinfo.append((stat.st_atime, aligned_size, path))

        # Remove the least recently accessed first
        fileinfo.sort()

        for _, size, path in fileinfo:
            if excess <= 0:
                break

            excess -= size
            safe_remove(path)
Beispiel #4
0
    def shrink(self, limit=None):
        """Reduces the size of the cache to meet a limit.

        Parameters
        ----------
        limit : int, optional
            Maximum size of the cache in bytes.
        """
        if limit is None:
            limit = rc.get('decoder_cache', 'size')
        if is_string(limit):
            limit = human2bytes(limit)

        fileinfo = []
        for filename in self.get_files():
            path = os.path.join(self.cache_dir, filename)
            stat = safe_stat(path)
            if stat is not None:
                fileinfo.append((stat.st_atime, stat.st_size, path))

        # Remove the least recently accessed first
        fileinfo.sort()

        excess = self.get_size_in_bytes() - limit
        for _, size, path in fileinfo:
            if excess <= 0:
                break

            excess -= size
            safe_remove(path)

        # We may have removed a decoder file but not solver_info file
        # or vice versa, so we'll remove all orphans
        self.remove_orphans()
Beispiel #5
0
    def shrink(self, limit=None):
        """Reduces the size of the cache to meet a limit.

        Parameters
        ----------
        limit : int, optional
            Maximum size of the cache in bytes.
        """
        if limit is None:
            limit = rc.get('decoder_cache', 'size')
        if is_string(limit):
            limit = human2bytes(limit)

        fileinfo = []
        excess = -limit
        for path in self.get_files():
            stat = safe_stat(path)
            if stat is not None:
                aligned_size = byte_align(stat.st_size, self._fragment_size)
                excess += aligned_size
                fileinfo.append((stat.st_atime, aligned_size, path))

        # Remove the least recently accessed first
        fileinfo.sort()

        for _, size, path in fileinfo:
            if excess <= 0:
                break

            excess -= size
            safe_remove(path)
Beispiel #6
0
    def validate(self, conn, transform):
        transform = np.asarray(transform, dtype=rc.get('precision', 'dtype'))

        if transform.ndim == 0:
            self.shape = ()
        elif transform.ndim == 1:
            self.shape = ('size_out', )
        elif transform.ndim == 2:
            # Actually (size_out, size_mid) but Function handles size_mid
            self.shape = ('size_out', '*')
        else:
            raise ValueError("Cannot handle transforms with dimensions > 2")

        # Checks the shapes
        super(TransformParam, self).validate(conn, transform)

        if transform.ndim == 2:
            # check for repeated dimensions in lists, as these don't work
            # for two-dimensional transforms
            repeated_inds = lambda x: (not isinstance(x, slice) and np.unique(
                x).size != len(x))
            if repeated_inds(conn.pre_slice):
                raise ValueError("Input object selection has repeated indices")
            if repeated_inds(conn.post_slice):
                raise ValueError(
                    "Output object selection has repeated indices")

        return transform
    def get_default_dir():
        """Returns the default location of the cache.

        Returns
        -------
        str
        """
        return rc.get("decoder_cache", "path")
Beispiel #8
0
    def get_default_dir():
        """Returns the default location of the cache.

        Returns
        -------
        str
        """
        return rc.get('decoder_cache', 'path')
Beispiel #9
0
    def get_default_dir():
        """Returns the default location of the cache.

        Returns
        -------
        str
        """
        return rc.get('decoder_cache', 'path')
Beispiel #10
0
 def __init__(self, value, name=None, dtype=rc.get('precision', 'dtype')):
     # Make sure we use a C-contiguous array
     self._value = np.array(npext.castDecimal(value),
                            copy=False,
                            order='C',
                            dtype=dtype)
     if name is not None:
         self._name = name
     if Signal.assert_named_signals:
         assert name
Beispiel #11
0
def load_ipython_extension(ipython):
    if IPython.version_info[0] >= 5:
        warnings.warn(
            "Loading the nengo.ipynb notebook extension is no longer "
            "required. Progress bars are automatically activated for IPython "
            "version 5 and later.")
    elif has_ipynb_widgets() and rc.get('progress', 'progress_bar') == 'auto':
        warnings.warn(
            "The nengo.ipynb notebook extension is deprecated. Please upgrade "
            "to IPython version 5 or later.")

        IPythonProgressWidget.load_frontend(ipython)
        rc.set('progress', 'progress_bar', '.'.join(
            (__name__, IPython2ProgressBar.__name__)))
Beispiel #12
0
def load_ipython_extension(ipython):
    if IPython.version_info[0] >= 5:
        warnings.warn(
            "Loading the nengo.ipynb notebook extension is no longer "
            "required. Progress bars are automatically activated for IPython "
            "version 5 and later.")
    elif has_ipynb_widgets() and rc.get('progress', 'progress_bar') == 'auto':
        warnings.warn(
            "The nengo.ipynb notebook extension is deprecated. Please upgrade "
            "to IPython version 5 or later.")

        IPythonProgressWidget.load_frontend(ipython)
        rc.set('progress', 'progress_bar', '.'.join((
            __name__, IPython2ProgressBar.__name__)))
Beispiel #13
0
    def __init__(self,
                 dimensions,
                 vocab=None,
                 neurons_per_multiply=200,
                 output_scaling=1.0,
                 radius=1.0,
                 direct=False,
                 label=None,
                 seed=None,
                 add_to_container=None):
        super(Compare, self).__init__(label, seed, add_to_container)
        if vocab is None:
            # use the default vocab for this number of dimensions
            vocab = dimensions

        self.output_scaling = output_scaling

        with self:
            self.compare = nengo.networks.EnsembleArray(
                neurons_per_multiply,
                dimensions,
                ens_dimensions=2,
                neuron_type=nengo.Direct() if direct else nengo.LIF(),
                encoders=Choice([[1, 1], [1, -1], [-1, 1], [-1, -1]]),
                radius=radius * np.sqrt(2),
                label='compare')

            self.inputA = nengo.Node(size_in=dimensions, label='inputA')
            self.inputB = nengo.Node(size_in=dimensions, label='inputB')
            self.output = nengo.Node(size_in=dimensions, label='output')

        self.inputs = dict(A=(self.inputA, vocab), B=(self.inputB, vocab))
        self.outputs = dict(default=(self.output, vocab))

        dtype = rc.get('precision', 'dtype')
        t1 = np.zeros((dimensions * 2, dimensions), dtype=dtype)
        t2 = np.zeros((dimensions * 2, dimensions), dtype=dtype)
        for i in range(dimensions):
            t1[i * 2, i] = 1
            t2[i * 2 + 1, i] = 1

        with self:
            nengo.Connection(self.inputA, self.compare.input, transform=t1)
            nengo.Connection(self.inputB, self.compare.input, transform=t2)

        def multiply(x):
            return [x[0] * x[1]]

        self.compare.add_output('product', function=multiply)
Beispiel #14
0
def gen_eval_points(ens, eval_points, rng, scale_eval_points=True):
    if isinstance(eval_points, Distribution):
        n_points = ens.n_eval_points
        if n_points is None:
            n_points = default_n_eval_points(ens.n_neurons, ens.dimensions)
        eval_points = eval_points.sample(n_points, ens.dimensions, rng)
    else:
        if (ens.n_eval_points is not None
                and eval_points.shape[0] != ens.n_eval_points):
            warnings.warn("Number of eval_points doesn't match "
                          "n_eval_points. Ignoring n_eval_points.")
        eval_points = np.array(eval_points, dtype=rc.get('precision', 'dtype'))

    if scale_eval_points:
        eval_points *= ens.radius  # scale by ensemble radius
    return eval_points
Beispiel #15
0
    def shrink(self, limit=None):  # noqa: C901
        """Reduces the size of the cache to meet a limit.

        Parameters
        ----------
        limit : int, optional
            Maximum size of the cache in bytes.
        """
        if self.readonly:
            logger.info("Tried to shrink a readonly cache.")
            return

        if self._index is None:
            warnings.warn("Cannot shrink outside of a `with cache` block.")
            return

        if limit is None:
            limit = rc.get('decoder_cache', 'size')
        if is_string(limit):
            limit = human2bytes(limit)

        self._close_fd()

        fileinfo = []
        excess = -limit
        for path in self.get_files():
            stat = safe_stat(path)
            if stat is not None:
                aligned_size = byte_align(stat.st_size, self._fragment_size)
                excess += aligned_size
                fileinfo.append((stat.st_atime, aligned_size, path))

        # Remove the least recently accessed first
        fileinfo.sort()

        for _, size, path in fileinfo:
            if excess <= 0:
                break

            excess -= size
            self._index.remove_file_entry(path)
            safe_remove(path)

        self._index.sync()
Beispiel #16
0
    def shrink(self, limit=None):  # noqa: C901
        """Reduces the size of the cache to meet a limit.

        Parameters
        ----------
        limit : int, optional
            Maximum size of the cache in bytes.
        """
        if self.readonly:
            logger.info("Tried to shrink a readonly cache.")
            return

        if limit is None:
            limit = rc.get('decoder_cache', 'size')
        if isinstance(limit, str):
            limit = human2bytes(limit)

        self._close_fd()

        fileinfo = []
        excess = -limit
        for path in self.get_files():
            stat = safe_stat(path)
            if stat is not None:
                aligned_size = byte_align(stat.st_size, self._fragment_size)
                excess += aligned_size
                fileinfo.append((stat.st_atime, aligned_size, path))

        # Remove the least recently accessed first
        fileinfo.sort()

        try:
            with self._index:
                for _, size, path in fileinfo:
                    if excess <= 0:
                        break

                    excess -= size
                    self.remove_file(path)
        except TimeoutError:
            logger.debug("Not shrinking cache. Lock could not be acquired.")
Beispiel #17
0
    def shrink(self, limit=None):  # noqa: C901
        """Reduces the size of the cache to meet a limit.

        Parameters
        ----------
        limit : int, optional
            Maximum size of the cache in bytes.
        """
        if self.readonly:
            logger.info("Tried to shrink a readonly cache.")
            return

        if limit is None:
            limit = rc.get('decoder_cache', 'size')
        if is_string(limit):
            limit = human2bytes(limit)

        self._close_fd()

        fileinfo = []
        excess = -limit
        for path in self.get_files():
            stat = safe_stat(path)
            if stat is not None:
                aligned_size = byte_align(stat.st_size, self._fragment_size)
                excess += aligned_size
                fileinfo.append((stat.st_atime, aligned_size, path))

        # Remove the least recently accessed first
        fileinfo.sort()

        try:
            with self._index:
                for _, size, path in fileinfo:
                    if excess <= 0:
                        break

                    excess -= size
                    self.remove_file(path)
        except TimeoutError:
            logger.debug("Not shrinking cache. Lock could not be acquired.")
Beispiel #18
0
    def __init__(self, dimensions, randomize=True, unitary=False,
                 max_similarity=0.1, include_pairs=False, rng=None):

        if not is_integer(dimensions):
            raise TypeError('dimensions must be an integer')
        if dimensions < 1:
            raise ValueError('dimensions must be positive')
        self.dimensions = dimensions
        self.randomize = randomize
        self.unitary = unitary
        self.max_similarity = max_similarity
        self.pointers = {}
        self.keys = []
        self.key_pairs = None
        self.vectors = np.zeros((0, dimensions),
                                dtype=rc.get('precision', 'dtype'))
        self.vector_pairs = None
        self._include_pairs = None
        self.include_pairs = include_pairs
        self._identity = None
        self.rng = rng
Beispiel #19
0
    def __init__(self,
                 dt=0.001,
                 label=None,
                 decoder_cache=NoDecoderCache(),
                 dtype=rc.get('precision', 'dtype')):
        self.dt = dt
        self.label = label
        self.decoder_cache = decoder_cache
        self.dtype = dtype

        # We want to keep track of the toplevel network
        self.toplevel = None
        # Builders can set a config object to affect sub-builders
        self.config = None

        # Resources used by the build process.
        self.operators = []
        self.params = {}
        self.seeds = {}
        self.probes = []
        self.sig = collections.defaultdict(dict)
Beispiel #20
0
    def include_pairs(self, value):
        """Adjusts whether key pairs are kept track of by the Vocabulary.

        If this is turned on, we need to compute all the pairs of terms
        already existing.
        """
        if value == self._include_pairs:
            return
        self._include_pairs = value
        if self._include_pairs:
            self.key_pairs = []
            self.vector_pairs = np.zeros((0, self.dimensions),
                                         dtype=rc.get('precision', 'dtype'))
            for i in range(1, len(self.keys)):
                for k in self.keys[:i]:
                    key = self.keys[i]
                    self.key_pairs.append('%s*%s' % (k, key))
                    v = (self.pointers[k] * self.pointers[key]).v
                    self.vector_pairs = np.vstack((self.vector_pairs, v))
        else:
            self.key_pairs = None
            self.vector_pairs = None
Beispiel #21
0
    def shrink(self, limit=None):
        """Reduces the size of the cache to meet a limit.

        Parameters
        ----------
        limit : int, optional
            Maximum size of the cache in bytes.
        """
        if limit is None:
            limit = rc.get('decoder_cache', 'size')
        if is_string(limit):
            limit = human2bytes(limit)

        filelist = []
        for filename in os.listdir(self.cache_dir):
            key, ext = os.path.splitext(filename)
            if ext == self._SOLVER_INFO_EXT:
                continue
            path = os.path.join(self.cache_dir, filename)
            stat = os.stat(path)
            filelist.append((stat.st_atime, key))
        filelist.sort()

        excess = self.get_size_in_bytes() - limit
        for _, key in filelist:
            if excess <= 0:
                break

            decoder_path = os.path.join(
                self.cache_dir, key + self._DECODER_EXT)
            solver_info_path = os.path.join(
                self.cache_dir, key + self._SOLVER_INFO_EXT)

            excess -= os.stat(decoder_path).st_size
            os.remove(decoder_path)
            if os.path.exists(solver_info_path):
                excess -= os.stat(solver_info_path).st_size
                os.remove(solver_info_path)
Beispiel #22
0
def build_connection(model, conn):
    # Create random number generator
    rng = np.random.RandomState(model.seeds[conn])

    # Get input and output connections from pre and post
    def get_prepost_signal(is_pre):
        target = conn.pre_obj if is_pre else conn.post_obj
        print('pre', conn.pre_obj, 'post', conn.post_obj)
        key = 'out' if is_pre else 'in'

        if target not in model.sig:
            raise ValueError("Building %s: the '%s' object %s "
                             "is not in the model, or has a size of zero." %
                             (conn, 'pre' if is_pre else 'post', target))
        if key not in model.sig[target]:
            raise ValueError("Error building %s: the '%s' object %s "
                             "has a '%s' size of zero." %
                             (conn, 'pre' if is_pre else 'post', target, key))

        return model.sig[target][key]

    model.sig[conn]['in'] = get_prepost_signal(is_pre=True)
    model.sig[conn]['out'] = get_prepost_signal(is_pre=False)

    decoders = None
    eval_points = None
    solver_info = None
    transform = full_transform(conn, slice_pre=False)

    # Figure out the signal going across this connection
    if (isinstance(conn.pre_obj, Node)
            or (isinstance(conn.pre_obj, Ensemble)
                and isinstance(conn.pre_obj.neuron_type, Direct))):
        # Node or Decoded connection in directmode
        if (conn.function is None and isinstance(conn.pre_slice, slice)
                and (conn.pre_slice.step is None or conn.pre_slice.step == 1)):
            signal = model.sig[conn]['in'][conn.pre_slice]
        else:
            sig_in, signal = build_pyfunc(
                fn=(lambda x: x[conn.pre_slice]) if conn.function is None else
                (lambda x: conn.function(x[conn.pre_slice])),
                t_in=False,
                n_in=model.sig[conn]['in'].size,
                n_out=conn.size_mid,
                label=str(conn),
                model=model)
            model.add_op(
                DotInc(model.sig[conn]['in'],
                       model.sig['common'][1],
                       sig_in,
                       tag="%s input" % conn))
    elif isinstance(conn.pre_obj, Ensemble):
        # Normal decoded connection
        eval_points, activities, targets = build_linear_system(
            model, conn, rng)

        # Use cached solver, if configured
        solver = model.decoder_cache.wrap_solver(conn.solver)
        if conn.solver.weights:
            # account for transform
            targets = np.dot(targets, transform.T)
            transform = np.array(1, dtype=rc.get('precision', 'dtype'))

            decoders, solver_info = solver(
                activities,
                targets,
                rng=rng,
                E=model.params[conn.post_obj].scaled_encoders.T)
            model.sig[conn]['out'] = model.sig[conn.post_obj.neurons]['in']
            signal_size = model.sig[conn]['out'].size
        else:
            decoders, solver_info = solver(activities, targets, rng=rng)
            signal_size = conn.size_mid

        # Add operator for decoders
        decoders = decoders.T

        model.sig[conn]['decoders'] = model.Signal(decoders,
                                                   name="%s.decoders" % conn)
        signal = model.Signal(npext.castDecimal(np.zeros(signal_size)),
                              name=str(conn))
        model.add_op(Reset(signal))
        model.add_op(
            DotInc(model.sig[conn]['decoders'],
                   model.sig[conn]['in'],
                   signal,
                   tag="%s decoding" % conn))
    else:
        # Direct connection
        signal = model.sig[conn]['in']

    # Add operator for filtering
    if conn.synapse is not None:
        signal = filtered_signal(model, conn, signal, conn.synapse)

    if conn.modulatory:
        # Make a new signal, effectively detaching from post
        model.sig[conn]['out'] = model.Signal(npext.castDecimal(
            np.zeros(model.sig[conn]['out'].size)),
                                              name="%s.mod_output" % conn)
        model.add_op(Reset(model.sig[conn]['out']))

    # Add operator for transform
    if isinstance(conn.post_obj, Neurons):
        if not model.has_built(conn.post_obj.ensemble):
            # Since it hasn't been built, it wasn't added to the Network,
            # which is most likely because the Neurons weren't associated
            # with an Ensemble.
            raise RuntimeError("Connection '%s' refers to Neurons '%s' "
                               "that are not a part of any Ensemble." %
                               (conn, conn.post_obj))

        if conn.post_slice != slice(None):
            raise NotImplementedError(
                "Post-slices on connections to neurons are not implemented")

        gain = model.params[conn.post_obj.ensemble].gain[conn.post_slice]
        if transform.ndim < 2:
            transform = transform * gain
        else:
            transform *= gain[:, np.newaxis]

    model.sig[conn]['transform'] = model.Signal(transform,
                                                name="%s.transform" % conn)
    print('abcd', model.sig[conn]['out'].value, signal.value)
    if transform.ndim < 2:
        print('line 174', model.sig[conn]['transform'].value)
        model.add_op(
            ElementwiseInc(model.sig[conn]['transform'],
                           signal,
                           model.sig[conn]['out'],
                           tag=str(conn)))
    else:
        model.add_op(
            DotInc(model.sig[conn]['transform'],
                   signal,
                   model.sig[conn]['out'],
                   tag=str(conn)))

    if conn.learning_rule_type:
        # Forcing update of signal that is modified by learning rules.
        # Learning rules themselves apply DotIncs.

        if isinstance(conn.pre_obj, Neurons):
            modified_signal = model.sig[conn]['transform']
        elif isinstance(conn.pre_obj, Ensemble):
            if conn.solver.weights:
                # TODO: make less hacky.
                # Have to do this because when a weight_solver
                # is provided, then learning rules should operators on
                # "decoders" which is really the weight matrix.
                model.sig[conn]['transform'] = model.sig[conn]['decoders']
                modified_signal = model.sig[conn]['transform']
            else:
                modified_signal = model.sig[conn]['decoders']
        else:
            raise TypeError(
                "Can't apply learning rules to connections of "
                "this type. pre type: %s, post type: %s" %
                (type(conn.pre_obj).__name__, type(conn.post_obj).__name__))

        model.add_op(PreserveValue(modified_signal))

    model.params[conn] = BuiltConnection(decoders=decoders,
                                         eval_points=eval_points,
                                         transform=transform,
                                         solver_info=solver_info)
Beispiel #23
0
    def text(self, v, minimum_count=1, maximum_count=None,  # noqa: C901
             threshold=0.1, join=';', terms=None, normalize=False):
        """Return a human-readable text version of the provided vector.

        This is meant to give a quick text version of a vector for display
        purposes.  To do this, compute the dot product between the vector
        and all the terms in the vocabulary.  The top few vectors are
        chosen for inclusion in the text.  It will try to only return
        terms with a match above the threshold, but will always return
        at least minimum_count and at most maximum_count terms.  Terms
        are sorted from most to least similar.

        Parameters
        ----------
        v : SemanticPointer or array
            The vector to convert into text
        minimum_count : int, optional
            Always return at least this many terms in the text
        maximum_count : int, optional
            Never return more than this many terms in the text
        threshold : float, optional
            How small a similarity for a term to be ignored
        join : string, optional
            The text separator to use between terms
        terms : list of strings, optional
            Only consider terms in this list
        normalize : bool, optional
            Whether to normalize the vector before computing similarity

        """
        if isinstance(v, pointer.SemanticPointer):
            v = v.v
        else:
            v = np.array(v, dtype=rc.get('precision', 'dtype'))

        if normalize:
            nrm = np.linalg.norm(v)
            if nrm > 0:
                v /= nrm

        m = np.dot(self.vectors, v)
        matches = [(mm, self.keys[i]) for i, mm in enumerate(m)]
        if self.include_pairs:
            m2 = np.dot(self.vector_pairs, v)
            matches2 = [(mm2, self.key_pairs[i]) for i, mm2 in enumerate(m2)]
            matches.extend(matches2)
        if terms is not None:
            # TODO: handle the terms parameter more efficiently, so we don't
            # compute a whole bunch of dot products and then throw them out
            matches = [mm for mm in matches if mm[1] in terms]
        matches.sort()
        matches.reverse()

        r = []
        for m in matches:
            if minimum_count is not None and len(r) < minimum_count:
                r.append(m)
            elif maximum_count is not None and len(r) == maximum_count:
                break
            elif threshold is None or m[0] > threshold:
                r.append(m)
            else:
                break

        return join.join(['%0.2f%s' % (sim, key) for (sim, key) in r])
Beispiel #24
0
def load_ipython_extension(ipython):
    if has_ipynb_widgets() and rc.get('progress', 'progress_bar') == 'auto':
        IPythonProgressWidget.load_frontend(ipython)
        rc.set('progress', 'progress_bar', '.'.join(
            (__name__, IPython2ProgressBar.__name__)))
Beispiel #25
0
def load_ipython_extension(ipython):
    if has_ipynb_widgets() and rc.get('progress', 'progress_bar') == 'auto':
        IPythonProgressWidget.load_frontend(ipython)
        rc.set('progress', 'progress_bar', '.'.join((
            __name__, IPython2ProgressBar.__name__)))
Beispiel #26
0
def build_ensemble(model, ens):
    # Create random number generator
    rng = np.random.RandomState(model.seeds[ens])
    dtype = rc.get('precision', 'dtype')

    eval_points = gen_eval_points(ens, ens.eval_points, rng=rng)

    # Set up signal
    model.sig[ens]['in'] = model.Signal(npext.castDecimal(
        np.zeros(ens.dimensions)),
                                        name="%s.signal" % ens)
    model.add_op(Reset(model.sig[ens]['in']))

    # Set up encoders
    if isinstance(ens.neuron_type, Direct):
        encoders = np.identity(ens.dimensions)
    elif isinstance(ens.encoders, Distribution):
        encoders = ens.encoders.sample(ens.n_neurons, ens.dimensions, rng=rng)
        encoders = np.asarray(encoders, dtype=dtype)
    else:
        encoders = npext.array(ens.encoders, min_dims=2, dtype=dtype)
    encoders = np.array([[dc.Decimal(p) for p in row] for row in encoders])
    encoders /= npext.norm(encoders, axis=1, keepdims=True)

    # Determine max_rates and intercepts
    max_rates = sample(ens.max_rates, ens.n_neurons, rng=rng)
    intercepts = sample(ens.intercepts, ens.n_neurons, rng=rng)

    # Build the neurons
    if ens.gain is not None and ens.bias is not None:
        gain = sample(ens.gain, ens.n_neurons, rng=rng)
        bias = sample(ens.bias, ens.n_neurons, rng=rng)
    elif ens.gain is not None or ens.bias is not None:
        # TODO: handle this instead of error
        raise NotImplementedError("gain or bias set for %s, but not both. "
                                  "Solving for one given the other is not "
                                  "implemented yet." % ens)
    else:
        gain, bias = ens.neuron_type.gain_bias(max_rates, intercepts)
    gain = np.array([dc.Decimal(p) for p in gain])
    bias = np.array([dc.Decimal(p) for p in bias])
    if isinstance(ens.neuron_type, Direct):
        model.sig[ens.neurons]['in'] = model.Signal(npext.castDecimal(
            np.zeros(ens.dimensions)),
                                                    name='%s.neuron_in' % ens)
        model.sig[ens.neurons]['out'] = model.sig[ens.neurons]['in']
        model.add_op(Reset(model.sig[ens.neurons]['in']))
    else:
        model.sig[ens.neurons]['in'] = model.Signal(npext.castDecimal(
            np.zeros(ens.n_neurons)),
                                                    name="%s.neuron_in" % ens)
        model.sig[ens.neurons]['out'] = model.Signal(
            npext.castDecimal(np.zeros(ens.n_neurons)),
            name="%s.neuron_out" % ens)
        model.add_op(
            Copy(src=model.Signal(bias, name="%s.bias" % ens),
                 dst=model.sig[ens.neurons]['in']))
        # This adds the neuron's operator and sets other signals
        model.build(ens.neuron_type, ens.neurons)

    # Scale the encoders
    if isinstance(ens.neuron_type, Direct):
        scaled_encoders = encoders
    else:
        scaled_encoders = encoders * (gain /
                                      dc.Decimal(ens.radius))[:, np.newaxis]

    model.sig[ens]['encoders'] = model.Signal(scaled_encoders,
                                              name="%s.scaled_encoders" % ens)

    # Inject noise if specified
    if ens.noise is not None:
        model.add_op(SimNoise(model.sig[ens.neurons]['in'], ens.noise))

    # Create output signal, using built Neurons
    model.add_op(
        DotInc(model.sig[ens]['encoders'],
               model.sig[ens]['in'],
               model.sig[ens.neurons]['in'],
               tag="%s encoding" % ens))

    # Output is neural output
    model.sig[ens]['out'] = model.sig[ens.neurons]['out']

    model.params[ens] = BuiltEnsemble(eval_points=eval_points,
                                      encoders=encoders,
                                      intercepts=intercepts,
                                      max_rates=max_rates,
                                      scaled_encoders=scaled_encoders,
                                      gain=gain,
                                      bias=bias)
Beispiel #27
0
    def __init__(self,
                 network,
                 dt=0.001,
                 seed=None,
                 model=None,
                 dtype=rc.get('precision', 'dtype')):
        """Initialize the simulator with a network and (optionally) a model.

        Most of the time, you will pass in a network and sometimes a dt::

            sim1 = nengo.Simulator(my_network)  # Uses default 0.001s dt
            sim2 = nengo.Simulator(my_network, dt=0.01)  # Uses 0.01s dt

        For more advanced use cases, you can initialize the model yourself,
        and also pass in a network that will be built into the same model
        that you pass in::

            sim = nengo.Simulator(my_network, model=my_model)

        If you want full control over the build process, then you can build
        your network into the model manually. If you do this, then you must
        explicitly pass in ``None`` for the network::

            sim = nengo.Simulator(None, model=my_model)

        Parameters
        ----------
        network : nengo.Network instance or None
            A network object to the built and then simulated.
            If a fully built ``model`` is passed in, then you can skip
            building the network by passing in network=None.
        dt : float
            The length of a simulator timestep, in seconds.
        seed : int
            A seed for all stochastic operators used in this simulator.
            Note that there are not stochastic operators implemented
            currently, so this parameters does nothing.
        model : nengo.builder.Model instance or None
            A model object that contains build artifacts to be simulated.
            Usually the simulator will build this model for you; however,
            if you want to build the network manually, or to inject some
            build artifacts in the Model before building the network,
            then you can pass in a ``nengo.builder.Model`` instance.
        """
        dt = float(dt)  # make sure it's a float (for division purposes)
        if model is None:
            self.model = Model(dt=dt,
                               label="%s, dt=%f" % (network, dt),
                               decoder_cache=get_default_decoder_cache(),
                               dtype=dtype)
        else:
            self.model = model

        #print(network)
        if network is not None:
            # Build the network into the model
            self.model.build(network)

        self.model.decoder_cache.shrink()

        self.seed = np.random.randint(npext.maxint) if seed is None else seed
        self.rng = np.random.RandomState(self.seed)

        # -- map from Signal.base -> ndarray
        self.signals = SignalDict(
            __time__=np.asarray(npext.castDecimal(0), dtype=self.dtype))
        #print(self.model)
        #print(self.model.operators)
        for op in self.model.operators:
            op.init_signals(self.signals)
        self.dg = operator_depencency_graph(self.model.operators)
        self._step_order = [
            node for node in toposort(self.dg) if hasattr(node, 'make_step')
        ]
        self._steps = [
            node.make_step(self.signals, dt, self.rng)
            for node in self._step_order
        ]

        # Add built states to the probe dictionary
        self._probe_outputs = self.model.params

        # Provide a nicer interface to probe outputs
        self.data = ProbeDict(self._probe_outputs)

        self.reset()