Exemple #1
0
def build_linear_system(model, conn):
    encoders = model.params[conn.pre_obj].encoders
    gain = model.params[conn.pre_obj].gain
    bias = model.params[conn.pre_obj].bias

    eval_points = conn.eval_points
    if eval_points is None:
        eval_points = npext.array(
            model.params[conn.pre_obj].eval_points, min_dims=2)
    else:
        eval_points = npext.array(eval_points, min_dims=2)

    x = np.dot(eval_points, encoders.T / conn.pre_obj.radius)
    activities = conn.pre_obj.neuron_type.rates(x, gain, bias)
    if np.count_nonzero(activities) == 0:
        raise RuntimeError(
            "Building %s: 'activites' matrix is all zero for %s. "
            "This is because no evaluation points fall in the firing "
            "ranges of any neurons." % (conn, conn.pre_obj))

    if conn.function is None:
        targets = eval_points[:, conn.pre_slice]
    else:
        targets = np.zeros((len(eval_points), conn.size_mid))
        for i, ep in enumerate(eval_points[:, conn.pre_slice]):
            targets[i] = conn.function(ep)

    return eval_points, activities, targets
        def build_decoder(function, evals, solver):
            """Internal function for building a single decoder."""
            if evals is None:
                evals = npext.array(eval_points, min_dims=2)
            else:
                evals = npext.array(evals, min_dims=2)

            assert solver is None or not solver.weights

            x = np.dot(evals, encoders.T / ens.radius)
            activities = ens.neuron_type.rates(x, gain, bias)

            if function is None:
                targets = evals
            else:
                (value, _) = checked_call(function, evals[0])
                function_size = np.asarray(value).size
                targets = np.zeros((len(evals), function_size))

                for i, ep in enumerate(evals):
                    targets[i] = function(ep)

            if solver is None:
                solver = nengo.solvers.LstsqL2()

            return solver(activities, targets, rng=rng)[0]
Exemple #3
0
def target_function(eval_points, targets):
    """Get a function that maps evaluation points to target points.

    Use this when making a nengo connection using a sequence
    of evaluation points and targets, instead of passing a
    callable as the connection function.

    Parameters
    ----------
    eval_points: iterable
        A sequence of evaluation points.
    targets: iterable
        A sequence of targets with the same length as ``eval_points``

    Returns
    -------
    dict:
       A diciontary with two keys: ``function`` and ``eval_points``.
       function is the mapping between the evaluation points and the
       targets. ``eval_points`` are the evalutaion points that will
       be passed to the connection

    Examples
    --------
    ens1 = nengo.Ensemble(n_neurons=100, dimensions=1)
    ens2 = nengo.Ensemble(n_neurons=100, dimensions=1)
    eval_points = numpy.arange(-1, 1, 0.01)
    targets = numpy.sin(eval_points)
    #the transformation on this connection approximates a sin function
    nengo.Connection(ens1, ens2,
                     **target_function(eval_points, targets)
    """

    eval_points = npext.array(eval_points, dtype=np.float64, min_dims=2)
    targets = npext.array(targets, dtype=np.float64, min_dims=2)

    if len(eval_points) != len(targets):
        raise ValueError("Number of evaluation points %s "
                         "is not equal to number of targets "
                         "%s" % (len(eval_points), len(targets)))

    func_dict = {}
    for eval_point, target in zip(eval_points, targets):
        func_dict[tuple(eval_point)] = target

    def function(x):
        x = tuple(x)
        return func_dict[x]

    return {'function': function, 'eval_points': eval_points}
def build_linear_system(model, conn, rng):
    encoders = model.params[conn.pre_obj].encoders
    gain = model.params[conn.pre_obj].gain
    bias = model.params[conn.pre_obj].bias

    if conn.eval_points is None:
        eval_points = npext.array(model.params[conn.pre_obj].eval_points,
                                  min_dims=2)
    else:
        eval_points = gen_eval_points(conn.pre_obj, conn.eval_points, rng,
                                      conn.scale_eval_points)
    z = encoders.T / dc.Decimal(conn.pre_obj.radius)
    #z=np.array([dc.Decimal(p) for p in z])
    x = np.dot(eval_points, z)
    activities = conn.pre_obj.neuron_type.rates(x, gain, bias)
    if np.count_nonzero(activities) == 0:
        raise RuntimeError(
            "Building %s: 'activites' matrix is all zero for %s. "
            "This is because no evaluation points fall in the firing "
            "ranges of any neurons." % (conn, conn.pre_obj))

    if conn.function is None:
        targets = eval_points[:, conn.pre_slice]
    else:
        targets = npext.castDecimal(np.zeros(
            (len(eval_points), conn.size_mid)))
        for i, ep in enumerate(eval_points[:, conn.pre_slice]):
            targets[i] = conn.function(ep)

    return eval_points, activities, targets
Exemple #5
0
    def __set__(self, node, output):
        super(OutputParam, self).validate(node, output)

        # --- Validate and set the new size_out
        if output is None:
            if node.size_out is not None:
                warnings.warn("'Node.size_out' is being overwritten with "
                              "'Node.size_in' since 'Node.output=None'")
            node.size_out = node.size_in
        elif callable(output) and node.size_out is not None:
            # We trust user's size_out if set, because calling output
            # may have unintended consequences (e.g., network communication)
            pass
        elif callable(output):
            result = self.validate_callable(node, output)
            node.size_out = 0 if result is None else result.size
        else:
            # Make into correctly shaped numpy array before validation
            output = npext.array(output,
                                 min_dims=1,
                                 copy=False,
                                 dtype=np.float64)
            self.validate_ndarray(node, output)
            node.size_out = output.size

        # --- Set output
        self.data[node] = output
Exemple #6
0
    def __set__(self, node, output):
        super(OutputParam, self).validate(node, output)

        # --- Validate and set the new size_out
        if output is None:
            if node.size_out is not None:
                warnings.warn("'Node.size_out' is being overwritten with "
                              "'Node.size_in' since 'Node.output=None'")
            node.size_out = node.size_in
        elif callable(output) and node.size_out is not None:
            # We trust user's size_out if set, because calling output
            # may have unintended consequences (e.g., network communication)
            pass
        elif callable(output):
            result = self.validate_callable(node, output)
            node.size_out = 0 if result is None else result.size
        else:
            # Make into correctly shaped numpy array before validation
            output = npext.array(
                output, min_dims=1, copy=False, dtype=np.float64)
            self.validate_ndarray(node, output)
            node.size_out = output.size

        # --- Set output
        self.data[node] = output
Exemple #7
0
    def __set__(self, node, output):
        super(OutputParam, self).validate(node, output)

        size_in_set = node.size_in is not None
        node.size_in = node.size_in if size_in_set else 0

        # --- Validate and set the new size_out
        if output is None:
            if node.size_out is not None:
                warnings.warn("'Node.size_out' is being overwritten with " "'Node.size_in' since 'Node.output=None'")
            node.size_out = node.size_in
        elif isinstance(output, Process):
            if not size_in_set:
                node.size_in = output.default_size_in
            if node.size_out is None:
                node.size_out = output.default_size_out
        elif callable(output):
            # We trust user's size_out if set, because calling output
            # may have unintended consequences (e.g., network communication)
            if node.size_out is None:
                result = self.validate_callable(node, output)
                node.size_out = 0 if result is None else result.size
        elif is_array_like(output):
            # Make into correctly shaped numpy array before validation
            output = npext.array(output, min_dims=1, copy=False, dtype=np.float64)
            self.validate_ndarray(node, output)
            node.size_out = output.size
        else:
            raise ValidationError("Invalid node output type %r" % type(output).__name__, attr=self.name, obj=node)

        # --- Set output
        self.data[node] = output
Exemple #8
0
def get_eval_points(model, conn, rng):
    if conn.eval_points is None:
        return npext.array(
            model.params[conn.pre_obj].eval_points, min_dims=2)
    else:
        return gen_eval_points(
            conn.pre_obj, conn.eval_points, rng, conn.scale_eval_points)
Exemple #9
0
def build_ensemble(model, ens):

    # Create random number generator
    rng = np.random.RandomState(model.seeds[ens])

    eval_points = gen_eval_points(ens, ens.eval_points, rng=rng)

    # Set up encoders
    if isinstance(ens.neuron_type, nengo.Direct):
        encoders = np.identity(ens.dimensions)
    elif isinstance(ens.encoders, Distribution):
        encoders = get_samples(ens.encoders,
                               ens.n_neurons,
                               ens.dimensions,
                               rng=rng)
    else:
        encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)
    if ens.normalize_encoders:
        encoders /= npext.norm(encoders, axis=1, keepdims=True)

    # Build the neurons
    gain, bias, max_rates, intercepts = get_gain_bias(ens, rng,
                                                      model.intercept_limit)

    group = CxGroup(ens.n_neurons, label='%s' % ens)
    group.bias[:] = bias
    model.build(ens.neuron_type, ens.neurons, group)

    # set default filter just in case no other filter gets set
    group.configure_default_filter(model.inter_tau, dt=model.dt)

    if ens.noise is not None:
        raise NotImplementedError("Ensemble noise not implemented")

    # Scale the encoders
    if isinstance(ens.neuron_type, nengo.Direct):
        raise NotImplementedError("Direct neurons not implemented")
        # scaled_encoders = encoders
    else:
        # to keep scaling reasonable, we don't include the radius
        # scaled_encoders = encoders * (gain / ens.radius)[:, np.newaxis]
        scaled_encoders = encoders * gain[:, np.newaxis]

    model.add_group(group)

    model.objs[ens]['in'] = group
    model.objs[ens]['out'] = group
    model.objs[ens.neurons]['in'] = group
    model.objs[ens.neurons]['out'] = group
    model.params[ens] = BuiltEnsemble(eval_points=eval_points,
                                      encoders=encoders,
                                      intercepts=intercepts,
                                      max_rates=max_rates,
                                      scaled_encoders=scaled_encoders,
                                      gain=gain,
                                      bias=bias)
Exemple #10
0
def spikes2events(t, spikes):
    """Return an event-based representation of spikes (i.e. spike times)"""
    spikes = npext.array(spikes, copy=False, min_dims=2)
    if spikes.ndim > 2:
        raise ValueError("Cannot handle %d-dimensional arrays" % spikes.ndim)
    if spikes.shape[-1] != len(t):
        raise ValueError("Last dimension of `spikes` must equal `len(t)`")

    # find nonzero elements (spikes) in each row, and translate to times
    return [t[spike != 0] for spike in spikes]
Exemple #11
0
def spikes2events(t, spikes):
    """Return an event-based representation of spikes (i.e. spike times)"""
    spikes = npext.array(spikes, copy=False, min_dims=2)
    if spikes.ndim > 2:
        raise ValueError("Cannot handle %d-dimensional arrays" % spikes.ndim)
    if spikes.shape[-1] != len(t):
        raise ValueError("Last dimension of `spikes` must equal `len(t)`")

    # find nonzero elements (spikes) in each row, and translate to times
    return [t[spike != 0] for spike in spikes]
Exemple #12
0
def build_lif(model, ens):
    # Create a random number generator
    rng = np.random.RandomState(model.seeds[ens])

    # Get the eval points
    eval_points = ensemble.gen_eval_points(ens, ens.eval_points, rng=rng)

    # Get the encoders
    if isinstance(ens.encoders, Distribution):
        encoders = ens.encoders.sample(ens.n_neurons, ens.dimensions, rng=rng)
        encoders = np.asarray(encoders, dtype=np.float64)
    else:
        encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)
    encoders /= npext.norm(encoders, axis=1, keepdims=True)

    # Get maximum rates and intercepts
    max_rates = ensemble.sample(ens.max_rates, ens.n_neurons, rng=rng)
    intercepts = ensemble.sample(ens.intercepts, ens.n_neurons, rng=rng)

    # Build the neurons
    if ens.gain is None and ens.bias is None:
        gain, bias = ens.neuron_type.gain_bias(max_rates, intercepts)
    elif ens.gain is not None and ens.bias is not None:
        gain = ensemble.sample(ens.gain, ens.n_neurons, rng=rng)
        bias = ensemble.sample(ens.bias, ens.n_neurons, rng=rng)
    else:
        raise NotImplementedError(
            "gain or bias set for {!s}, but not both. Solving for one given "
            "the other is not yet implemented.".format(ens)
        )

    # Scale the encoders
    scaled_encoders = encoders * (gain / ens.radius)[:, np.newaxis]

    # Store all the parameters
    model.params[ens] = BuiltEnsemble(
        eval_points=eval_points,
        encoders=encoders,
        scaled_encoders=scaled_encoders,
        max_rates=max_rates,
        intercepts=intercepts,
        gain=gain,
        bias=bias
    )

    # Create the object which will handle simulation of the LIF ensemble.  This
    # object will be responsible for adding items to the netlist and providing
    # functions to prepare the ensemble for simulation.  The object may be
    # modified by later methods.
    model.object_operators[ens] = operators.EnsembleLIF(ens)
def test_encoders(n_dimensions, n_neurons=10, encoders=None):
    if encoders is None:
        encoders = np.random.normal(size=(n_neurons, n_dimensions))
        encoders = npext.array(encoders, min_dims=2, dtype=np.float64)
        encoders /= npext.norm(encoders, axis=1, keepdims=True)

    model = nengo.Network(label="_test_encoders")
    with model:
        ens = nengo.Ensemble(n_neurons=n_neurons,
                             dimensions=n_dimensions,
                             encoders=encoders,
                             label="A")
    sim = nengo.Simulator(model)

    assert np.allclose(encoders, sim.data[ens].encoders)
Exemple #14
0
    def __init__(self, output=None, size_in=0, size_out=None, label="Node"):
        if output is not None and not is_callable(output):
            output = npext.array(output, min_dims=1, copy=False)
        self.output = output
        self.label = label
        self.size_in = size_in

        if output is not None:
            if isinstance(output, np.ndarray):
                shape_out = output.shape
            elif size_out is None and is_callable(output):
                t, x = np.asarray(0.0), np.zeros(size_in)
                args = [t, x] if size_in > 0 else [t]
                try:
                    result = output(*args)
                except TypeError:
                    raise TypeError(
                        "The function '%s' provided to '%s' takes %d "
                        "argument(s), where a function for this type "
                        "of node is expected to take %d argument(s)" % (
                            output.__name__, self,
                            output.__code__.co_argcount, len(args)))

                shape_out = (0,) if result is None \
                    else np.asarray(result).shape
            else:
                shape_out = (size_out,)  # assume `size_out` is correct

            if len(shape_out) > 1:
                raise ValueError(
                    "Node output must be a vector (got array shape %s)" %
                    (shape_out,))

            size_out_new = shape_out[0] if len(shape_out) == 1 else 1

            if size_out is not None and size_out != size_out_new:
                raise ValueError(
                    "Size of Node output (%d) does not match `size_out` (%d)" %
                    (size_out_new, size_out))

            size_out = size_out_new
        else:  # output is None
            size_out = size_in

        self.size_out = size_out

        # Set up probes
        self.probes = {'output': []}
Exemple #15
0
    def coerce(self, node, output):
        output = super().coerce(node, output)

        size_in_set = node.size_in is not None
        node.size_in = node.size_in if size_in_set else 0

        # --- Validate and set the new size_out
        if output is None:
            if node.size_out is not None:
                warnings.warn("'Node.size_out' is being overwritten with "
                              "'Node.size_in' since 'Node.output=None'")
            node.size_out = node.size_in
        elif isinstance(output, Process):
            if not size_in_set:
                node.size_in = output.default_size_in
            if node.size_out is None:
                node.size_out = output.default_size_out
        elif callable(output):
            self.check_callable_args_list(node, output)
            # We trust user's size_out if set, because calling output
            # may have unintended consequences (e.g., network communication)
            if node.size_out is None:
                node.size_out = self.check_callable_output(node, output)
        elif is_array_like(output):
            # Make into correctly shaped numpy array before validation
            output = npext.array(output,
                                 min_dims=1,
                                 copy=False,
                                 dtype=rc.float_dtype)
            self.check_ndarray(node, output)
            if not np.all(np.isfinite(output)):
                raise ValidationError("Output value must be finite.",
                                      attr=self.name,
                                      obj=node)
            node.size_out = output.size
        else:
            raise ValidationError(
                "Invalid node output type %r" % type(output).__name__,
                attr=self.name,
                obj=node,
            )

        return output
Exemple #16
0
def rates_kernel(t, spikes, kind='gauss', tau=0.04):
    """Estimate firing rates from spikes using a kernel.

    Parameters
    ----------
    t : (M,) array_like
        The times at which raw spike data (spikes) is defined.
    spikes : (M, N) array_like
        The raw spike data from N neurons.
    kind : str {'expon', 'gauss', 'expogauss', 'alpha'}, optional
        The type of kernel to use. 'expon' is an exponential kernel, 'gauss' is
        a Gaussian (normal) kernel, 'expogauss' is an exponential followed by
        a Gaussian, and 'alpha' is an alpha function kernel.
    tau : float
        The time constant for the kernel. The optimal value will depend on the
        firing rate of the neurons, with a longer tau preferred for lower
        firing rates. The default value of 0.04 works well across a wide range
        of firing rates.
    """
    spikes = spikes.T
    spikes = npext.array(spikes, copy=False, min_dims=2)
    if spikes.ndim > 2:
        raise ValidationError("Cannot handle %d-dimensional arrays"
                              % spikes.ndim, attr='spikes')
    if spikes.shape[-1] != len(t):
        raise ValidationError("Last dimension of 'spikes' must equal 'len(t)'",
                              attr='spikes')

    n, nt = spikes.shape
    dt = t[1] - t[0]

    tau_i = tau / dt
    kind = kind.lower()
    if kind == 'expogauss':
        rates = lowpass_filter(spikes, tau_i, kind='expon')
        rates = lowpass_filter(rates, tau_i / 4, kind='gauss')
    else:
        rates = lowpass_filter(spikes, tau_i, kind=kind)

    return rates.T
Exemple #17
0
    def coerce(self, node, output):
        output = super().coerce(node, output)

        size_in_set = node.size_in is not None
        node.size_in = node.size_in if size_in_set else 0

        # --- Validate and set the new size_out
        if output is None:
            if node.size_out is not None:
                warnings.warn("'Node.size_out' is being overwritten with "
                              "'Node.size_in' since 'Node.output=None'")
            node.size_out = node.size_in
        elif isinstance(output, Process):
            if not size_in_set:
                node.size_in = output.default_size_in
            if node.size_out is None:
                node.size_out = output.default_size_out
        elif callable(output):
            self.check_callable_args_list(node, output)
            # We trust user's size_out if set, because calling output
            # may have unintended consequences (e.g., network communication)
            if node.size_out is None:
                node.size_out = self.check_callable_output(node, output)
        elif is_array_like(output):
            # Make into correctly shaped numpy array before validation
            output = npext.array(
                output, min_dims=1, copy=False, dtype=np.float64)
            self.check_ndarray(node, output)
            if not np.all(np.isfinite(output)):
                raise ValidationError("Output value must be finite.",
                                      attr=self.name, obj=node)
            node.size_out = output.size
        else:
            raise ValidationError("Invalid node output type %r" %
                                  type(output).__name__,
                                  attr=self.name, obj=node)

        return output
Exemple #18
0
    def make_pool(self, ens):
        if isinstance(ens.encoders, Distribution):
            encoders = ens.encoders.sample(ens.n_neurons, ens.dimensions,
                                           rng=self.rng)
        else:
            encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)
            encoders /= npext.norm(encoders, axis=1, keepdims=True)


        if self.config[ens].compact:
            p = pool.CompactPool(ens.n_neurons)
        elif self.config[ens].fixed:
            p = pool.FixedPool(ens.n_neurons,
                               bits_soma=self.config[ens].fixed_bits_soma,
                               bits_syn=self.config[ens].fixed_bits_syn)
        else:
            p = pool.StdPool(ens.n_neurons)
        intercepts = nengo.builder.sample(ens.intercepts, ens.n_neurons,
                                          rng=self.rng)
        max_rates = nengo.builder.sample(ens.max_rates, ens.n_neurons,
                                          rng=self.rng)
        gain, bias = self.find_gain_bias(p.soma, intercepts, max_rates)
        p.set_bias(bias)
        print 'bias', p.get_bias()

        scaled_encoders = encoders * (gain / ens.radius)[:, np.newaxis]

        self.pools[ens] = p

        self.model.params[ens] = BuiltEnsemble(intercepts=intercepts,
                                               max_rates=max_rates,
                                               gain=gain,
                                               bias=bias,
                                               encoders=encoders,
                                               scaled_encoders=scaled_encoders,
                                               eval_points=None,
                                               )
    def before_simulation(self, netlist, simulator, n_steps):
        """Generate the values to output for the next set of simulation steps.
        """
        # Write out the system region to deal with the current run-time
        self.system_region.n_steps = n_steps

        # Evaluate the node for this period of time
        if self.period is not None:
            max_n = min(n_steps, int(np.ceil(self.period / simulator.dt)))
        else:
            max_n = n_steps

        ts = np.arange(simulator.steps, simulator.steps + max_n) * simulator.dt
        if callable(self.function):
            values = np.array([self.function(t) for t in ts])
        elif isinstance(self.function, Process):
            values = self.function.run_steps(max_n, d=self.size_out,
                                             dt=simulator.dt)
        else:
            values = np.array([self.function for t in ts])

        # Ensure that the values can be sliced, regardless of how they were
        # generated.
        values = npext.array(values, min_dims=2)

        # Compute the output for each connection
        outputs = []
        for conn, transform in self.conns_transforms:
            output = []

            # For each f(t) for the next set of simulations we calculate the
            # output at the end of the connection.  To do this we first apply
            # the pre-slice, then the function and then the post-slice.
            for v in values:
                # Apply the pre-slice
                v = v[conn.pre_slice]

                # Apply the function on the connection, if there is one.
                if conn.function is not None:
                    v = np.asarray(conn.function(v), dtype=float)

                output.append(np.dot(transform, v.T))
            outputs.append(np.array(output).reshape(max_n, -1))

        # Combine all of the output values to form a large matrix which we can
        # dump into memory.
        output_matrix = np.hstack(outputs)

        new_output_region = regions.MatrixRegion(
            np_to_fix(output_matrix),
            sliced_dimension=regions.MatrixPartitioning.columns
        )

        # Write the simulation values into memory
        for vertex in self.vertices:
            self.vertices_region_memory[vertex][self.system_region].seek(0)
            self.system_region.n_steps = max_n
            self.system_region.write_subregion_to_file(
                self.vertices_region_memory[vertex][self.system_region],
                vertex.slice
            )

            self.vertices_region_memory[vertex][self.output_region].seek(0)
            new_output_region.write_subregion_to_file(
                self.vertices_region_memory[vertex][self.output_region],
                vertex.slice
            )
Exemple #20
0
def build_network(model, network):
    """Takes a Network object and returns a Model.

    This determines the signals and operators necessary to simulate that model.

    Builder does this by mapping each high-level object to its associated
    signals and operators one-by-one, in the following order:

    1) Ensembles, Nodes, Neurons
    2) Subnetworks (recursively)
    3) Connections
    4) Learning Rules
    5) Probes
    """
    def get_seed(obj, rng):
        # Generate a seed no matter what, so that setting a seed or not on
        # one object doesn't affect the seeds of other objects.
        seed = rng.randint(npext.maxint)
        return (seed
                if not hasattr(obj, 'seed') or obj.seed is None else obj.seed)

    if model.toplevel is None:
        model.toplevel = network
        model.sig['common'][0] = Signal(npext.array(0.0, readonly=True),
                                        name='Common: Zero')
        model.sig['common'][1] = Signal(npext.array(1.0, readonly=True),
                                        name='Common: One')
        model.seeds[network] = get_seed(network, np.random)

    # Set config
    old_config = model.config
    model.config = network.config

    # assign seeds to children
    rng = np.random.RandomState(model.seeds[network])
    sorted_types = sorted(network.objects, key=lambda t: t.__name__)
    for obj_type in sorted_types:
        for obj in network.objects[obj_type]:
            model.seeds[obj] = get_seed(obj, rng)

    logger.debug("Network step 1: Building ensembles and nodes")
    for obj in network.ensembles + network.nodes:
        model.build(obj)

    logger.debug("Network step 2: Building subnetworks")
    for subnetwork in network.networks:
        model.build(subnetwork)

    logger.debug("Network step 3: Building connections")
    for conn in network.connections:
        model.build(conn)

    logger.debug("Network step 4: Building learning rules")
    for conn in network.connections:
        rule = conn.learning_rule
        if is_iterable(rule):
            for r in (itervalues(rule) if isinstance(rule, dict) else rule):
                model.build(r)
        elif rule is not None:
            model.build(rule)

    logger.debug("Network step 5: Building probes")
    for probe in network.probes:
        model.build(probe)

    # Unset config
    model.config = old_config
    model.params[network] = None
Exemple #21
0
def build_network(model, network):
    """Takes a Network object and returns a Model.

    This determines the signals and operators necessary to simulate that model.

    Builder does this by mapping each high-level object to its associated
    signals and operators one-by-one, in the following order:

    1) Ensembles, Nodes, Neurons
    2) Subnetworks (recursively)
    3) Connections
    4) Learning Rules
    5) Probes
    """
    def get_seed(obj, rng):
        # Generate a seed no matter what, so that setting a seed or not on
        # one object doesn't affect the seeds of other objects.
        seed = rng.randint(npext.maxint)
        return (seed if not hasattr(obj, 'seed') or obj.seed is None
                else obj.seed)

    if model.toplevel is None:
        model.toplevel = network
        model.sig['common'][0] = Signal(
            npext.array(0.0, readonly=True), name='Common: Zero')
        model.sig['common'][1] = Signal(
            npext.array(1.0, readonly=True), name='Common: One')
        model.seeds[network] = get_seed(network, np.random)

    # Set config
    old_config = model.config
    model.config = network.config

    # assign seeds to children
    rng = np.random.RandomState(model.seeds[network])
    sorted_types = sorted(network.objects, key=lambda t: t.__name__)
    for obj_type in sorted_types:
        for obj in network.objects[obj_type]:
            model.seeds[obj] = get_seed(obj, rng)

    logger.debug("Network step 1: Building ensembles and nodes")
    for obj in network.ensembles + network.nodes:
        model.build(obj)

    logger.debug("Network step 2: Building subnetworks")
    for subnetwork in network.networks:
        model.build(subnetwork)

    logger.debug("Network step 3: Building connections")
    for conn in network.connections:
        # NB: we do these in the order in which they're defined, and build the
        # learning rule in the connection builder. Because learning rules are
        # attached to connections, the connection that contains the learning
        # rule (and the learning rule) are always built *before* a connection
        # that attaches to that learning rule. Therefore, we don't have to
        # worry about connection ordering here.
        # TODO: Except perhaps if the connection being learned
        # is in a subnetwork?
        model.build(conn)

    logger.debug("Network step 4: Building probes")
    for probe in network.probes:
        model.build(probe)

    # Unset config
    model.config = old_config
    model.params[network] = None
Exemple #22
0
def build_ensemble(model, ens):
    # Create random number generator
    rng = np.random.RandomState(model.seeds[ens])

    eval_points = gen_eval_points(ens, ens.eval_points, rng=rng)

    # Set up signal
    model.sig[ens]['in'] = Signal(np.zeros(ens.dimensions),
                                  name="%s.signal" % ens)
    model.add_op(Reset(model.sig[ens]['in']))

    # Set up encoders
    if isinstance(ens.neuron_type, Direct):
        encoders = np.identity(ens.dimensions)
    elif isinstance(ens.encoders, Distribution):
        encoders = sample(ens.encoders, ens.n_neurons, ens.dimensions, rng=rng)
    else:
        encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)
    encoders /= npext.norm(encoders, axis=1, keepdims=True)

    # Build the neurons
    if ens.gain is not None and ens.bias is not None:
        gain = sample(ens.gain, ens.n_neurons, rng=rng)
        bias = sample(ens.bias, ens.n_neurons, rng=rng)
        max_rates, intercepts = None, None  # TODO: determine from gain & bias
    elif ens.gain is not None or ens.bias is not None:
        # TODO: handle this instead of error
        raise NotImplementedError("gain or bias set for %s, but not both. "
                                  "Solving for one given the other is not "
                                  "implemented yet." % ens)
    else:
        max_rates = sample(ens.max_rates, ens.n_neurons, rng=rng)
        intercepts = sample(ens.intercepts, ens.n_neurons, rng=rng)
        gain, bias = ens.neuron_type.gain_bias(max_rates, intercepts)

    if isinstance(ens.neuron_type, Direct):
        model.sig[ens.neurons]['in'] = Signal(
            np.zeros(ens.dimensions), name='%s.neuron_in' % ens)
        model.sig[ens.neurons]['out'] = model.sig[ens.neurons]['in']
        model.add_op(Reset(model.sig[ens.neurons]['in']))
    else:
        model.sig[ens.neurons]['in'] = Signal(
            np.zeros(ens.n_neurons), name="%s.neuron_in" % ens)
        model.sig[ens.neurons]['out'] = Signal(
            np.zeros(ens.n_neurons), name="%s.neuron_out" % ens)
        model.add_op(Copy(src=Signal(bias, name="%s.bias" % ens),
                          dst=model.sig[ens.neurons]['in']))
        # This adds the neuron's operator and sets other signals
        model.build(ens.neuron_type, ens.neurons)

    # Scale the encoders
    if isinstance(ens.neuron_type, Direct):
        scaled_encoders = encoders
    else:
        scaled_encoders = encoders * (gain / ens.radius)[:, np.newaxis]

    model.sig[ens]['encoders'] = Signal(
        scaled_encoders, name="%s.scaled_encoders" % ens)

    # Inject noise if specified
    if ens.noise is not None:
        model.build(ens.noise, sig_out=model.sig[ens.neurons]['in'], inc=True)

    # Create output signal, using built Neurons
    model.add_op(DotInc(
        model.sig[ens]['encoders'],
        model.sig[ens]['in'],
        model.sig[ens.neurons]['in'],
        tag="%s encoding" % ens))

    # Output is neural output
    model.sig[ens]['out'] = model.sig[ens.neurons]['out']

    model.params[ens] = BuiltEnsemble(eval_points=eval_points,
                                      encoders=encoders,
                                      intercepts=intercepts,
                                      max_rates=max_rates,
                                      scaled_encoders=scaled_encoders,
                                      gain=gain,
                                      bias=bias)
Exemple #23
0
 def init(self, signal):
     """Set up a permanent mapping from signal -> ndarray."""
     # Make a copy of base.value to start
     val = npext.array(signal.base.value, readonly=signal.readonly)
     dict.__setitem__(self, signal.base, val)
Exemple #24
0
 def init(self, signal):
     """Set up a permanent mapping from signal -> ndarray."""
     # Make a copy of base.value to start
     val = npext.array(signal.base.value, readonly=signal.readonly)
     dict.__setitem__(self, signal.base, val)
Exemple #25
0
def build_network(model, network):
    """Takes a Network object and returns a Model.

    This determines the signals and operators necessary to simulate that model.

    Builder does this by mapping each high-level object to its associated
    signals and operators one-by-one, in the following order:

    1) Ensembles, Nodes, Neurons
    2) Subnetworks (recursively)
    3) Connections
    4) Learning Rules
    5) Probes
    """
    def get_seed(obj, rng):
        # Generate a seed no matter what, so that setting a seed or not on
        # one object doesn't affect the seeds of other objects.
        seed = rng.randint(npext.maxint)
        return (seed if not hasattr(obj, 'seed') or obj.seed is None
                else obj.seed)

    if model.toplevel is None:
        model.toplevel = network
        model.sig['common'][0] = Signal(
            npext.array(0.0, readonly=True), name='Common: Zero')
        model.sig['common'][1] = Signal(
            npext.array(1.0, readonly=True), name='Common: One')
        model.seeds[network] = get_seed(network, np.random)

    # Set config
    old_config = model.config
    model.config = network.config

    # assign seeds to children
    rng = np.random.RandomState(model.seeds[network])
    sorted_types = sorted(network.objects, key=lambda t: t.__name__)
    for obj_type in sorted_types:
        for obj in network.objects[obj_type]:
            model.seeds[obj] = get_seed(obj, rng)

    logger.debug("Network step 1: Building ensembles and nodes")
    for obj in network.ensembles + network.nodes:
        model.build(obj)

    logger.debug("Network step 2: Building subnetworks")
    for subnetwork in network.networks:
        model.build(subnetwork)

    logger.debug("Network step 3: Building connections")
    for conn in network.connections:
        model.build(conn)

    logger.debug("Network step 4: Building learning rules")
    for conn in network.connections:
        rule = conn.learning_rule
        if is_iterable(rule):
            for r in (itervalues(rule) if isinstance(rule, dict) else rule):
                model.build(r)
        elif rule is not None:
            model.build(rule)

    logger.debug("Network step 5: Building probes")
    for probe in network.probes:
        model.build(probe)

    # Unset config
    model.config = old_config
    model.params[network] = None
Exemple #26
0
def build_ensemble(model, ens):
    if isinstance(ens.neuron_type, nengo.Direct):
        raise NotImplementedError("Direct neurons not implemented")

    # Create random number generator
    rng = np.random.RandomState(model.seeds[ens])

    eval_points = gen_eval_points(
        ens, ens.eval_points, rng=rng, dtype=nengo.rc.float_dtype
    )

    # Set up encoders
    if isinstance(ens.encoders, Distribution):
        encoders = get_samples(ens.encoders, ens.n_neurons, ens.dimensions, rng=rng)
        encoders = np.asarray(encoders, dtype=nengo.rc.float_dtype)
    else:
        encoders = npext.array(ens.encoders, min_dims=2, dtype=nengo.rc.float_dtype)

    if ens.normalize_encoders:
        encoders /= npext.norm(encoders, axis=1, keepdims=True)

    if np.any(np.isnan(encoders)):
        raise BuildError(
            f"{ens}: NaNs detected in encoders. This usually means that you have "
            "zero-length encoders; when normalized, these result in NaNs. Ensure all "
            "encoders have non-zero length, or set `normalize_encoders=False`."
        )

    # Build the neurons
    gain, bias, max_rates, intercepts = get_gain_bias(
        ens, rng, intercept_limit=model.intercept_limit, dtype=nengo.rc.float_dtype
    )

    block = LoihiBlock(ens.n_neurons, label="%s" % ens)
    block.compartment.bias[:] = bias

    # build the neuron_type (see builders below)
    model.build(ens.neuron_type, ens.neurons, block)

    # set default filter just in case no other filter gets set
    block.compartment.configure_default_filter(model.decode_tau, dt=model.dt)

    if ens.noise is not None:
        raise NotImplementedError("Ensemble noise not implemented")

    # Scale the encoders
    # we exclude the radius to keep scaling reasonable for decode neurons
    scaled_encoders = encoders * gain[:, np.newaxis]

    # add instructions for splitting
    model.block_shapes[block] = model.config[ens].block_shape

    model.add_block(block)

    model.objs[ens]["in"] = block
    model.objs[ens]["out"] = block
    model.objs[ens.neurons]["in"] = block
    model.objs[ens.neurons]["out"] = block
    model.params[ens] = BuiltEnsemble(
        eval_points=eval_points,
        encoders=encoders,
        intercepts=intercepts,
        max_rates=max_rates,
        scaled_encoders=scaled_encoders,
        gain=gain,
        bias=bias,
    )
    def make_ensemble(self, ens):
        """Build an Ensemble."""
        self.model.ensembles.append(ens)

        if isinstance(ens.neuron_type, nengo.Direct):
            self.make_direct_ensemble(ens)
            return

        p = self.make_pool(ens)

        if isinstance(ens.encoders, Distribution):
            encoders = ens.encoders.sample(ens.n_neurons, ens.dimensions,
                                           rng=self.rng)
        else:
            encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)
            encoders /= npext.norm(encoders, axis=1, keepdims=True)

        intercepts = nengo.builder.ensemble.sample(ens.intercepts,
                                                   ens.n_neurons, rng=self.rng)
        max_rates = nengo.builder.ensemble.sample(ens.max_rates,
                                                  ens.n_neurons, rng=self.rng)

        if ens.gain is not None and ens.bias is not None:
            gain = nengo.builder.ensemble.sample(ens.gain, ens.n_neurons,
                                                 rng=self.rng)
            bias = nengo.builder.ensemble.sample(ens.bias, ens.n_neurons,
                                                 rng=self.rng)
        elif ens.gain is not None or ens.bias is not None:
            raise NotImplementedError("gain or bias set for %s, but not both. "
                                      "Solving for one given the other is not "
                                      "implemented yet." % ens)
        else:
            gain, bias = neuron_tuning.find_gain_bias(p, intercepts, max_rates)

        scaled_encoders = encoders * (gain / ens.radius)[:, np.newaxis]

        self.model.pools[ens] = p

        self.model.decoders[ens] = {}

        if isinstance(ens.eval_points, Distribution):
            n_points = ens.n_eval_points
            if n_points is None:
                n_points = nengo.utils.builder.default_n_eval_points(
                    ens.n_neurons, ens.dimensions)
            eval_points = ens.eval_points.sample(n_points, ens.dimensions,
                                                 self.rng)
            eval_points *= ens.radius
        else:
            if (ens.n_eval_points is not None
                    and ens.eval_points.shape[0] != ens.n_eval_points):
                warnings.warn("Number of eval_points doesn't match "
                              "n_eval_points. Ignoring n_eval_points.")
            eval_points = np.array(ens.eval_points, dtype=np.float64)

            eval_points *= ens.radius

        J = gain * np.dot(eval_points, encoders.T / ens.radius) + bias
        activity = self.compute_activity(ens, J)

        self.model.params[ens] = BuiltEnsemble(intercepts=intercepts,
                                               max_rates=max_rates,
                                               gain=gain,
                                               bias=bias,
                                               encoders=encoders,
                                               scaled_encoders=scaled_encoders,
                                               eval_points=eval_points,
                                               activity=activity,
                                               neurons=ens.neurons,
                                               radius=ens.radius
                                               )
        self.model.outputs[ens] = np.zeros(ens.n_neurons, dtype=float)
        self.model.input_filters[ens] = {}
        self.model.input_filters[ens.neurons] = {}
Exemple #28
0
    def build_ensemble(self, ens):
        # Create random number generator
        seed = self.next_seed() if ens.seed is None else ens.seed
        rng = np.random.RandomState(seed)

        # Generate eval points
        if ens.eval_points is None or is_integer(ens.eval_points):
            eval_points = self.generate_eval_points(
                ens=ens, n_points=ens.eval_points, rng=rng)
        else:
            eval_points = npext.array(
                ens.eval_points, dtype=np.float64, min_dims=2)

        # Set up signal
        self.model.sig_in[ens] = Signal(np.zeros(ens.dimensions),
                                        name="%s.signal" % ens.label)
        self.model.operators.append(Reset(self.model.sig_in[ens]))

        # Set up encoders
        if ens.encoders is None:
            if isinstance(ens.neurons, nengo.Direct):
                encoders = np.identity(ens.dimensions)
            else:
                sphere = dists.UniformHypersphere(ens.dimensions, surface=True)
                encoders = sphere.sample(ens.neurons.n_neurons, rng=rng)
        else:
            encoders = np.array(ens.encoders, dtype=np.float64)
            enc_shape = (ens.neurons.n_neurons, ens.dimensions)
            if encoders.shape != enc_shape:
                raise ShapeMismatch(
                    "Encoder shape is %s. Should be (n_neurons, dimensions); "
                    "in this case %s." % (encoders.shape, enc_shape))
            encoders /= npext.norm(encoders, axis=1, keepdims=True)

        # Determine max_rates and intercepts
        if isinstance(ens.max_rates, dists.Distribution):
            max_rates = ens.max_rates.sample(
                ens.neurons.n_neurons, rng=rng)
        else:
            max_rates = np.array(ens.max_rates)
        if isinstance(ens.intercepts, dists.Distribution):
            intercepts = ens.intercepts.sample(
                ens.neurons.n_neurons, rng=rng)
        else:
            intercepts = np.array(ens.intercepts)

        # Build the neurons
        if isinstance(ens.neurons, nengo.Direct):
            bn = self.build(ens.neurons, ens.dimensions)
        else:
            bn = self.build(ens.neurons, max_rates, intercepts)

        # Scale the encoders
        if isinstance(ens.neurons, nengo.Direct):
            scaled_encoders = encoders
        else:
            scaled_encoders = encoders * (bn.gain / ens.radius)[:, np.newaxis]

        # Create output signal, using built Neurons
        self.model.operators.append(DotInc(
            Signal(scaled_encoders, name="%s.scaled_encoders" % ens.label),
            self.model.sig_in[ens],
            self.model.sig_in[ens.neurons],
            tag="%s encoding" % ens.label))

        # Output is neural output
        self.model.sig_out[ens] = self.model.sig_out[ens.neurons]

        for probe in ens.probes["decoded_output"]:
            self.build(probe, dimensions=ens.dimensions)
        for probe in ens.probes["spikes"] + ens.probes["voltages"]:
            self.build(probe, dimensions=ens.neurons.n_neurons)

        return BuiltEnsemble(eval_points=eval_points,
                             encoders=encoders,
                             intercepts=intercepts,
                             max_rates=max_rates,
                             scaled_encoders=scaled_encoders)
    def __init__(self, nengo_ensemble, random_number_generator):
        """ goes through the nengo_spinnaker_gfe ensemble object and extracts 
        the connection_parameters for the lif neurons

        :param nengo_ensemble: the ensemble handed down by nengo_spinnaker_gfe
        :param random_number_generator: the random number generator 
        controlling all random in this nengo_spinnaker_gfe run
        :return: dict of params with names.
        """

        eval_points = nengo_builder.ensemble.gen_eval_points(
            nengo_ensemble,
            nengo_ensemble.eval_points,
            rng=random_number_generator)

        # Get the encoders
        if isinstance(nengo_ensemble.encoders, nengo.dists.Distribution):
            encoders = nengo_ensemble.encoders.sample(
                nengo_ensemble.n_neurons,
                nengo_ensemble.dimensions,
                rng=random_number_generator)
            encoders = numpy.asarray(encoders, dtype=numpy.float64)
        else:
            encoders = nengo_numpy.array(nengo_ensemble.encoders,
                                         min_dims=2,
                                         dtype=numpy.float64)
        encoders /= nengo_numpy.norm(encoders, axis=1, keepdims=True)

        # Get correct sample function (seems dists.get_samples not in nengo
        # dists in some versions, so has to be a if / else)
        # TODO figure out which one we're following. having both is crazy
        if hasattr(ensemble, 'sample'):
            sample_function = ensemble.sample
        else:
            sample_function = nengo.dists.get_samples

        # Get maximum rates and intercepts
        max_rates = sample_function(nengo_ensemble.max_rates,
                                    nengo_ensemble.n_neurons,
                                    rng=random_number_generator)
        intercepts = sample_function(nengo_ensemble.intercepts,
                                     nengo_ensemble.n_neurons,
                                     rng=random_number_generator)

        # Build the neurons
        if nengo_ensemble.gain is None and nengo_ensemble.bias is None:
            gain, bias = nengo_ensemble.neuron_type.gain_bias(
                max_rates, intercepts)
        elif (nengo_ensemble.gain is not None
              and nengo_ensemble.bias is not None):
            gain = sample_function(nengo_ensemble.gain,
                                   nengo_ensemble.n_neurons,
                                   rng=random_number_generator)
            bias = sample_function(nengo_ensemble.bias,
                                   nengo_ensemble.n_neurons,
                                   rng=random_number_generator)
        else:
            raise NotImplementedError(
                "gain or bias set for {!s}, but not both. Solving for one "
                "given the other is not yet implemented.".format(
                    nengo_ensemble))

        # Scale the encoders
        scaled_encoders = \
            encoders * (gain / nengo_ensemble.radius)[:, numpy.newaxis]

        self._intercepts = intercepts
        self._gain = gain
        self._bias = bias
        self._max_rates = max_rates
        self._encoders = encoders
        self._scaled_encoders = scaled_encoders
        self._eval_points = eval_points
    def from_object(cls, ens, out_conns, dt, rng):
        assert isinstance(ens.neuron_type, nengo.neurons.LIF)
        assert isinstance(ens, nengo.Ensemble)

        if ens.seed is None:
            rng = np.random.RandomState(rng.tomaxint())
        else:
            rng = np.random.RandomState(ens.seed)

        # Generate evaluation points
        if isinstance(ens.eval_points, dists.Distribution):
            n_points = ens.n_eval_points
            if n_points is None:
                n_points = nengo.utils.builder.default_n_eval_points(
                    ens.n_neurons, ens.dimensions)
            eval_points = ens.eval_points.sample(n_points, ens.dimensions, rng)
            eval_points *= ens.radius
        else:
            if (ens.eval_points is not None and
                    ens.eval_points.shape[0] != ens.n_eval_points):
                warnings.warn("Number of eval points doesn't match "
                              "n_eval_points.  Ignoring n_eval_points.")
            eval_points = np.array(ens.eval_points, dtype=np.float64)

        # Determine max_rates and intercepts
        if isinstance(ens.max_rates, dists.Distribution):
            max_rates = ens.max_rates.sample(ens.n_neurons, rng=rng)
        else:
            max_rates = np.array(max_rates)
        if isinstance(ens.intercepts, dists.Distribution):
            intercepts = ens.intercepts.sample(ens.n_neurons, rng=rng)
        else:
            intercepts = np.array(intercepts)

        # Generate gains, bias
        gain, bias = ens.neuron_type.gain_bias(max_rates, intercepts)

        # Generate encoders
        if isinstance(ens.encoders, dists.Distribution):
            encoders = ens.encoders.sample(ens.n_neurons, ens.dimensions,
                                           rng=rng)
        else:
            encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)
            encoders /= npext.norm(encoders, axis=1, keepdims=True)

        # Generate decoders for outgoing connections
        decoders = list()
        tfses = utils.connections.OutgoingEnsembleConnections(out_conns)

        def build_decoder(function, evals, solver):
            """Internal function for building a single decoder."""
            if evals is None:
                evals = npext.array(eval_points, min_dims=2)
            else:
                evals = npext.array(evals, min_dims=2)

            assert solver is None or not solver.weights

            x = np.dot(evals, encoders.T / ens.radius)
            activities = ens.neuron_type.rates(x, gain, bias)

            if function is None:
                targets = evals
            else:
                (value, _) = checked_call(function, evals[0])
                function_size = np.asarray(value).size
                targets = np.zeros((len(evals), function_size))

                for i, ep in enumerate(evals):
                    targets[i] = function(ep)

            if solver is None:
                solver = nengo.solvers.LstsqL2()

            return solver(activities, targets, rng=rng)[0]

        decoder_builder = utils.decoders.DecoderBuilder(build_decoder)

        # Build each of the decoders in turn
        for tfse in tfses.transforms_functions:
            decoders.append(decoder_builder.get_transformed_decoder(
                tfse.function, tfse.transform, tfse.eval_points, tfse.solver))

        # Build list of learning rule, connection-index tuples
        learning_rules = list()
        for c in tfses:
            for l in utils.connections.get_learning_rules(c):
                learning_rules.append((l, tfses[c]))

        # By default compress all decoders
        decoders_to_compress = [True for d in decoders]

        # Turn off compression for all decoders associated with learning rules
        for l in learning_rules:
            decoders_to_compress[l[1]] = False

        # Compress and merge the decoders
        (decoder_headers, decoders) =\
            utils.decoders.get_combined_compressed_decoders(
                decoders, compress=decoders_to_compress)
        decoders /= dt

        return cls(ens.n_neurons, gain, bias, encoders, decoders,
                   ens.neuron_type.tau_rc, ens.neuron_type.tau_ref,
                   eval_points, decoder_headers, learning_rules)
Exemple #31
0
    def before_simulation(self, netlist, simulator, n_steps):
        """Generate the values to output for the next set of simulation steps.
        """
        # Write out the system region to deal with the current run-time
        self.system_region.n_steps = n_steps

        # Evaluate the node for this period of time
        if self.period is not None:
            max_n = min(n_steps, int(np.ceil(self.period / simulator.dt)))
        else:
            max_n = n_steps

        ts = np.arange(simulator.steps, simulator.steps + max_n) * simulator.dt
        if callable(self.function):
            values = np.array([self.function(t) for t in ts])
        elif isinstance(self.function, Process):
            values = self.function.run_steps(max_n, d=self.size_out,
                                             dt=simulator.dt)
        else:
            values = np.array([self.function for t in ts])

        # Ensure that the values can be sliced, regardless of how they were
        # generated.
        values = npext.array(values, min_dims=2)

        # Compute the output for each connection
        outputs = []
        for transmission_params, transform in self.transmission_parameters:
            output = []

            # For each f(t) for the next set of simulations we calculate the
            # output at the end of the connection.  To do this we first apply
            # the pre-slice, then the function and then the post-slice.
            for v in values:
                # Apply the pre-slice
                v = v[transmission_params.pre_slice]

                # Apply the function on the connection, if there is one.
                if transmission_params.function is not None:
                    v = np.asarray(transmission_params.function(v),
                                   dtype=float)

                output.append(np.dot(transform, v.T))
            outputs.append(np.array(output).reshape(max_n, -1))

        # Combine all of the output values to form a large matrix which we can
        # dump into memory.
        output_matrix = np.hstack(outputs)

        new_output_region = regions.MatrixRegion(
            np_to_fix(output_matrix),
            sliced_dimension=regions.MatrixPartitioning.columns
        )

        # Write the simulation values into memory
        for vertex in self.vertices:
            self.vertices_region_memory[vertex][self.system_region].seek(0)
            self.system_region.n_steps = max_n
            self.system_region.write_subregion_to_file(
                self.vertices_region_memory[vertex][self.system_region],
                vertex.slice
            )

            self.vertices_region_memory[vertex][self.output_region].seek(0)
            new_output_region.write_subregion_to_file(
                self.vertices_region_memory[vertex][self.output_region],
                vertex.slice
            )
Exemple #32
0
def build_ensemble(model, ens):
    # Create random number generator
    rng = np.random.RandomState(model.seeds[ens])

    # Generate eval points
    if isinstance(ens.eval_points, Distribution):
        n_points = ens.n_eval_points
        if n_points is None:
            n_points = default_n_eval_points(ens.n_neurons, ens.dimensions)
        eval_points = ens.eval_points.sample(n_points, ens.dimensions, rng)
        # eval_points should be in the ensemble's representational range
        eval_points *= ens.radius
    else:
        if (ens.n_eval_points is not None
                and ens.eval_points.shape[0] != ens.n_eval_points):
            warnings.warn("Number of eval_points doesn't match "
                          "n_eval_points. Ignoring n_eval_points.")
        eval_points = np.array(ens.eval_points, dtype=np.float64)

    # Set up signal
    model.sig[ens]['in'] = Signal(np.zeros(ens.dimensions),
                                  name="%s.signal" % ens)
    model.add_op(Reset(model.sig[ens]['in']))

    # Set up encoders
    if isinstance(ens.neuron_type, Direct):
        encoders = np.identity(ens.dimensions)
    elif isinstance(ens.encoders, Distribution):
        encoders = ens.encoders.sample(ens.n_neurons, ens.dimensions, rng=rng)
    else:
        encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)
        encoders /= npext.norm(encoders, axis=1, keepdims=True)

    # Determine max_rates and intercepts
    max_rates = sample(ens.max_rates, ens.n_neurons, rng=rng)
    intercepts = sample(ens.intercepts, ens.n_neurons, rng=rng)

    # Build the neurons
    if ens.gain is not None and ens.bias is not None:
        gain = sample(ens.gain, ens.n_neurons, rng=rng)
        bias = sample(ens.bias, ens.n_neurons, rng=rng)
    elif ens.gain is not None or ens.bias is not None:
        # TODO: handle this instead of error
        raise NotImplementedError("gain or bias set for %s, but not both. "
                                  "Solving for one given the other is not "
                                  "implemented yet." % ens)
    else:
        gain, bias = ens.neuron_type.gain_bias(max_rates, intercepts)

    if isinstance(ens.neuron_type, Direct):
        model.sig[ens.neurons]['in'] = Signal(
            np.zeros(ens.dimensions), name='%s.neuron_in' % ens)
        model.sig[ens.neurons]['out'] = model.sig[ens.neurons]['in']
        model.add_op(Reset(model.sig[ens.neurons]['in']))
    else:
        model.sig[ens.neurons]['in'] = Signal(
            np.zeros(ens.n_neurons), name="%s.neuron_in" % ens)
        model.sig[ens.neurons]['out'] = Signal(
            np.zeros(ens.n_neurons), name="%s.neuron_out" % ens)
        model.add_op(Copy(src=Signal(bias, name="%s.bias" % ens),
                          dst=model.sig[ens.neurons]['in']))
        # This adds the neuron's operator and sets other signals
        model.build(ens.neuron_type, ens.neurons)

    # Scale the encoders
    if isinstance(ens.neuron_type, Direct):
        scaled_encoders = encoders
    else:
        scaled_encoders = encoders * (gain / ens.radius)[:, np.newaxis]

    model.sig[ens]['encoders'] = Signal(
        scaled_encoders, name="%s.scaled_encoders" % ens)

    # Create output signal, using built Neurons
    model.add_op(DotInc(
        model.sig[ens]['encoders'],
        model.sig[ens]['in'],
        model.sig[ens.neurons]['in'],
        tag="%s encoding" % ens))

    # Output is neural output
    model.sig[ens]['out'] = model.sig[ens.neurons]['out']

    model.params[ens] = BuiltEnsemble(eval_points=eval_points,
                                      encoders=encoders,
                                      intercepts=intercepts,
                                      max_rates=max_rates,
                                      scaled_encoders=scaled_encoders,
                                      gain=gain,
                                      bias=bias)
Exemple #33
0
def build_ensemble(model, ens):
    """Builds an `.Ensemble` object into a model.

    A brief summary of what happens in the ensemble build process, in order:

    1. Generate evaluation points and encoders.
    2. Normalize encoders to unit length.
    3. Determine bias and gain.
    4. Create neuron input signal
    5. Add operator for injecting bias.
    6. Call build function for neuron type.
    7. Scale encoders by gain and radius.
    8. Add operators for multiplying decoded input signal by encoders and
       incrementing the result in the neuron input signal.
    9. Call build function for injected noise.

    Some of these steps may be altered or omitted depending on the parameters
    of the ensemble, in particular the neuron type. For example, most steps are
    omitted for the `.Direct` neuron type.

    Parameters
    ----------
    model : Model
        The model to build into.
    ens : Ensemble
        The ensemble to build.

    Notes
    -----
    Sets ``model.params[ens]`` to a `.BuiltEnsemble` instance.
    """

    # Create random number generator
    rng = np.random.RandomState(model.seeds[ens])

    eval_points = gen_eval_points(ens, ens.eval_points, rng=rng)

    # Set up signal
    model.sig[ens]['in'] = Signal(np.zeros(ens.dimensions),
                                  name="%s.signal" % ens)
    model.add_op(Reset(model.sig[ens]['in']))

    # Set up encoders
    if isinstance(ens.neuron_type, Direct):
        encoders = np.identity(ens.dimensions)
    elif isinstance(ens.encoders, Distribution):
        encoders = get_samples(
            ens.encoders, ens.n_neurons, ens.dimensions, rng=rng)
    else:
        encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)
    if ens.normalize_encoders:
        encoders /= npext.norm(encoders, axis=1, keepdims=True)

    # Build the neurons
    gain, bias, max_rates, intercepts = get_gain_bias(ens, rng)

    if isinstance(ens.neuron_type, Direct):
        model.sig[ens.neurons]['in'] = Signal(
            np.zeros(ens.dimensions), name='%s.neuron_in' % ens)
        model.sig[ens.neurons]['out'] = model.sig[ens.neurons]['in']
        model.add_op(Reset(model.sig[ens.neurons]['in']))
    else:
        model.sig[ens.neurons]['in'] = Signal(
            np.zeros(ens.n_neurons), name="%s.neuron_in" % ens)
        model.sig[ens.neurons]['out'] = Signal(
            np.zeros(ens.n_neurons), name="%s.neuron_out" % ens)
        model.sig[ens.neurons]['bias'] = Signal(
            bias, name="%s.bias" % ens, readonly=True)
        model.add_op(Copy(model.sig[ens.neurons]['bias'],
                          model.sig[ens.neurons]['in']))
        # This adds the neuron's operator and sets other signals
        model.build(ens.neuron_type, ens.neurons)

    # Scale the encoders
    if isinstance(ens.neuron_type, Direct):
        scaled_encoders = encoders
    else:
        scaled_encoders = encoders * (gain / ens.radius)[:, np.newaxis]

    model.sig[ens]['encoders'] = Signal(
        scaled_encoders, name="%s.scaled_encoders" % ens, readonly=True)

    # Inject noise if specified
    if ens.noise is not None:
        model.build(ens.noise, sig_out=model.sig[ens.neurons]['in'], inc=True)

    # Create output signal, using built Neurons
    model.add_op(DotInc(
        model.sig[ens]['encoders'],
        model.sig[ens]['in'],
        model.sig[ens.neurons]['in'],
        tag="%s encoding" % ens))

    # Output is neural output
    model.sig[ens]['out'] = model.sig[ens.neurons]['out']

    model.params[ens] = BuiltEnsemble(eval_points=eval_points,
                                      encoders=encoders,
                                      intercepts=intercepts,
                                      max_rates=max_rates,
                                      scaled_encoders=scaled_encoders,
                                      gain=gain,
                                      bias=bias)
Exemple #34
0
    def _generate_output_data(self, app_graph, n_machine_time_steps,
                              machine_time_step, current_time_step):

        if self._update_period is not None:
            max_n = min(
                n_machine_time_steps,
                int(numpy.ceil(self._update_period / machine_time_step)))
        else:
            max_n = n_machine_time_steps

        ts = (numpy.arange(current_time_step, n_machine_time_steps + max_n) *
              machine_time_step)
        if callable(self._nengo_output_function):
            values = numpy.array([self.function(t) for t in ts])
        elif isinstance(self._nengo_output_function, Process):
            values = self._nengo_output_function.run_steps(
                max_n, d=self.size_out, dt=machine_time_step)
        else:
            values = numpy.array([self._nengo_output_function for t in ts])

        # Ensure that the values can be sliced, regardless of how they were
        # generated.
        values = nengo_numpy.array(values, min_dims=2)

        # Compute the output for each connection
        outputs = []
        for outgoing_partition in app_graph.\
                get_outgoing_edge_partitions_starting_at_vertex(self):
            if (outgoing_partition.identifier.source_port ==
                    constants.OUTPUT_PORT.STANDARD):
                output = []
                transmission_parameter = \
                    outgoing_partition.identifier.transmission_parameter
                transform = transmission_parameter.full_transform(
                    slice_in=False, slice_out=False)
                keep = numpy.any(transform != 0.0, axis=1)
                transform = transform[keep]

                # For each f(t) for the next set of simulations we calculate the
                # output at the end of the connection.  To do this we first
                #  apply the pre-slice, then the function and then the
                # post-slice.
                for out_value in values:

                    # Apply the pre-slice
                    out_value = out_value[transmission_parameter.pre_slice]

                    # Apply the function on the connection, if there is one.
                    if transmission_parameter.parameter_function is not None:
                        out_value = numpy.asarray(
                            transmission_parameter.parameter_function(
                                out_value),
                            dtype=float)

                    output.append(numpy.dot(transform, out_value.T))
                outputs.append(numpy.array(output).reshape(max_n, -1))

        # Combine all of the output values to form a large matrix which we can
        # dump into memory.
        output_matrix = numpy.hstack(outputs)
        return output_matrix
Exemple #35
0
    def build_connection(self, conn):
        dt = self.model.dt
        rng = np.random.RandomState(self.next_seed())

        self.model.sig_in[conn] = self.model.sig_out[conn.pre]
        self.model.sig_out[conn] = self.model.sig_in[conn.post]

        decoders = None
        eval_points = None
        transform = np.array(conn.transform_full, dtype=np.float64)

        # Figure out the signal going across this connection
        if (isinstance(conn.pre, nengo.Ensemble)
                and isinstance(conn.pre.neurons, nengo.Direct)):
            # Decoded connection in directmode
            if conn.function is None:
                signal = self.model.sig_in[conn]
            else:
                sig_in, signal = self.build_pyfunc(
                    fn=conn.function,
                    t_in=False,
                    n_in=self.model.sig_in[conn].size,
                    n_out=conn.dimensions,
                    label=conn.label)
                self.model.operators.append(DotInc(
                    self.model.sig_in[conn],
                    Signal(1.0, name="1"),
                    sig_in,
                    tag="%s input" % conn.label))
        elif isinstance(conn.pre, nengo.Ensemble):
            # Normal decoded connection
            encoders = self.built[conn.pre].encoders
            gain = self.built[conn.pre.neurons].gain
            bias = self.built[conn.pre.neurons].bias

            eval_points = conn.eval_points
            if eval_points is None:
                eval_points = npext.array(
                    self.built[conn.pre].eval_points, min_dims=2)
            elif is_integer(eval_points):
                eval_points = self.generate_eval_points(
                    ens=conn.pre, n_points=eval_points, rng=rng)
            else:
                eval_points = npext.array(eval_points, min_dims=2)

            x = np.dot(eval_points, encoders.T / conn.pre.radius)
            activities = dt * conn.pre.neurons.rates(x, gain, bias)
            if np.count_nonzero(activities) == 0:
                raise RuntimeError(
                    "In '%s', for '%s', 'activities' matrix is all zero. "
                    "This is because no evaluation points fall in the firing "
                    "ranges of any neurons." % (str(conn), str(conn.pre)))

            if conn.function is None:
                targets = eval_points
            else:
                targets = npext.array(
                    [conn.function(ep) for ep in eval_points], min_dims=2)

            if conn.weight_solver is not None:
                if conn.decoder_solver is not None:
                    raise ValueError("Cannot specify both 'weight_solver' "
                                     "and 'decoder_solver'.")

                # account for transform
                targets = np.dot(targets, transform.T)
                transform = np.array(1., dtype=np.float64)

                decoders = conn.weight_solver(
                    activities, targets, rng=rng,
                    E=self.built[conn.post].scaled_encoders.T)
                self.model.sig_out[conn] = self.model.sig_in[
                    conn.post.neurons]
                signal_size = self.model.sig_out[conn].size
            else:
                solver = (conn.decoder_solver if conn.decoder_solver is
                          not None else nengo.decoders.lstsq_L2nz)
                decoders = solver(activities, targets, rng=rng)
                signal_size = conn.dimensions

            # Add operator for decoders and filtering
            decoders = decoders.T
            if conn.filter is not None and conn.filter > dt:
                o_coef, n_coef = self.filter_coefs(pstc=conn.filter, dt=dt)
                decoder_signal = Signal(
                    decoders * n_coef,
                    name="%s.decoders * n_coef" % conn.label)
            else:
                decoder_signal = Signal(decoders,
                                        name="%s.decoders" % conn.label)
                o_coef = 0

            signal = Signal(np.zeros(signal_size), name=conn.label)
            self.model.operators.append(ProdUpdate(
                decoder_signal,
                self.model.sig_in[conn],
                Signal(o_coef, name="o_coef"),
                signal,
                tag="%s decoding" % conn.label))
        else:
            # Direct connection
            signal = self.model.sig_in[conn]

        # Add operator for filtering (in the case filter wasn't already
        # added, when pre.neurons is a non-direct Ensemble)
        if decoders is None and conn.filter is not None:
            # Note: we add a filter here even if filter < dt,
            # in order to avoid cycles in the op graph. If the filter
            # is explicitly set to None (e.g. for a passthrough node)
            # then cycles can still occur.
            signal = self.filtered_signal(signal, conn.filter)

        if conn.modulatory:
            # Make a new signal, effectively detaching from post
            self.model.sig_out[conn] = Signal(
                np.zeros(self.model.sig_out[conn].size),
                name="%s.mod_output" % conn.label)
            # Add reset operator?
            # TODO: add unit test

        # Add operator for transform
        if isinstance(conn.post, nengo.objects.Neurons):
            if not self.has_built(conn.post):
                # Since it hasn't been built, it wasn't added to the Network,
                # which is most likely because the Neurons weren't associated
                # with an Ensemble.
                raise RuntimeError("Connection '%s' refers to Neurons '%s' "
                                   "that are not a part of any Ensemble." % (
                                       conn, conn.post))
            transform *= self.built[conn.post].gain[:, np.newaxis]

        self.model.operators.append(
            DotInc(Signal(transform, name="%s.transform" % conn.label),
                   signal,
                   self.model.sig_out[conn],
                   tag=conn.label))

        # Set up probes
        for probe in conn.probes["signal"]:
            self.build(probe, dimensions=self.model.sig_out[conn].size)

        return BuiltConnection(decoders=decoders,
                               eval_points=eval_points,
                               transform=transform)
Exemple #36
0
def build_ensemble(model, ens):
    # Create random number generator
    rng = np.random.RandomState(model.seeds[ens])

    eval_points = gen_eval_points(ens, ens.eval_points, rng=rng)

    # Set up signal
    model.sig[ens]['in'] = Signal(np.zeros(ens.dimensions),
                                  name="%s.signal" % ens)
    model.add_op(Reset(model.sig[ens]['in']))

    # Set up encoders
    if isinstance(ens.neuron_type, Direct):
        encoders = np.identity(ens.dimensions)
    elif isinstance(ens.encoders, Distribution):
        encoders = sample(ens.encoders, ens.n_neurons, ens.dimensions, rng=rng)
    else:
        encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)
    encoders /= npext.norm(encoders, axis=1, keepdims=True)

    # Build the neurons
    gain, bias, max_rates, intercepts = get_gain_bias(ens, rng)

    if isinstance(ens.neuron_type, Direct):
        model.sig[ens.neurons]['in'] = Signal(
            np.zeros(ens.dimensions), name='%s.neuron_in' % ens)
        model.sig[ens.neurons]['out'] = model.sig[ens.neurons]['in']
        model.add_op(Reset(model.sig[ens.neurons]['in']))
    else:
        model.sig[ens.neurons]['in'] = Signal(
            np.zeros(ens.n_neurons), name="%s.neuron_in" % ens)
        model.sig[ens.neurons]['out'] = Signal(
            np.zeros(ens.n_neurons), name="%s.neuron_out" % ens)
        bias_sig = Signal(bias, name="%s.bias" % ens, readonly=True)
        model.add_op(Copy(src=bias_sig, dst=model.sig[ens.neurons]['in']))
        # This adds the neuron's operator and sets other signals
        model.build(ens.neuron_type, ens.neurons)

    # Scale the encoders
    if isinstance(ens.neuron_type, Direct):
        scaled_encoders = encoders
    else:
        scaled_encoders = encoders * (gain / ens.radius)[:, np.newaxis]

    model.sig[ens]['encoders'] = Signal(
        scaled_encoders, name="%s.scaled_encoders" % ens, readonly=True)

    # Inject noise if specified
    if ens.noise is not None:
        model.build(ens.noise, sig_out=model.sig[ens.neurons]['in'], inc=True)

    # Create output signal, using built Neurons
    model.add_op(DotInc(
        model.sig[ens]['encoders'],
        model.sig[ens]['in'],
        model.sig[ens.neurons]['in'],
        tag="%s encoding" % ens))

    # Output is neural output
    model.sig[ens]['out'] = model.sig[ens.neurons]['out']

    model.params[ens] = BuiltEnsemble(eval_points=eval_points,
                                      encoders=encoders,
                                      intercepts=intercepts,
                                      max_rates=max_rates,
                                      scaled_encoders=scaled_encoders,
                                      gain=gain,
                                      bias=bias)