예제 #1
0
 def validate(self, instance, rule):
     if is_iterable(rule):
         for r in (itervalues(rule) if isinstance(rule, dict) else rule):
             self.validate_rule(instance, r)
     elif rule is not None:
         self.validate_rule(instance, rule)
     super(LearningRuleTypeParam, self).validate(instance, rule)
예제 #2
0
    def __init__(self, data, interpolation='zero', **kwargs):
        self.data = data

        needs_scipy = ('linear', 'nearest', 'slinear', 'quadratic', 'cubic')
        if interpolation in needs_scipy:
            self.sp_interpolate = None
            if any(callable(val) for val in itervalues(self.data)):
                warnings.warn("%r interpolation cannot be applied because "
                              "a callable was supplied for some piece of the "
                              "function. Using 'zero' interpolation instead." %
                              (interpolation, ))
                interpolation = 'zero'
            else:
                try:
                    import scipy.interpolate
                    self.sp_interpolate = scipy.interpolate
                except ImportError:
                    warnings.warn("%r interpolation cannot be applied because "
                                  "scipy is not installed. Using 'zero' "
                                  "interpolation instead." % (interpolation, ))
                    interpolation = 'zero'
        self.interpolation = interpolation

        super(Piecewise, self).__init__(default_size_in=0,
                                        default_size_out=self.size_out,
                                        **kwargs)
예제 #3
0
 def validate(self, instance, rule):
     if is_iterable(rule):
         for r in (itervalues(rule) if isinstance(rule, dict) else rule):
             self.validate_rule(instance, r)
     elif rule is not None:
         self.validate_rule(instance, rule)
     super(LearningRuleTypeParam, self).validate(instance, rule)
예제 #4
0
 def coerce(self, instance, rule):
     if is_iterable(rule):
         for r in (itervalues(rule) if isinstance(rule, dict) else rule):
             self.check_rule(instance, r)
     elif rule is not None:
         self.check_rule(instance, rule)
     return super(LearningRuleTypeParam, self).coerce(instance, rule)
예제 #5
0
def test_commonsig_readonly(RefSimulator):
    """Test that the common signals cannot be modified."""
    net = nengo.Network(label="test_commonsig")
    sim = RefSimulator(net)
    for sig in itervalues(sim.model.sig['common']):
        sim.signals.init(sig)
        with pytest.raises((ValueError, RuntimeError)):
            sim.signals[sig] = np.array([-1])
        with pytest.raises((ValueError, RuntimeError)):
            sim.signals[sig][...] = np.array([-1])
예제 #6
0
def test_commonsig_readonly(RefSimulator):
    """Test that the common signals cannot be modified."""
    net = nengo.Network(label="test_commonsig")
    sim = RefSimulator(net)
    for sig in itervalues(sim.model.sig['common']):
        sim.signals.init(sig)
        with pytest.raises((ValueError, RuntimeError)):
            sim.signals[sig] = np.array([-1])
        with pytest.raises((ValueError, RuntimeError)):
            sim.signals[sig][...] = np.array([-1])
예제 #7
0
def test_commonsig_readonly():
    """Test that the common signals cannot be modified."""
    net = nengo.Network(label="test_commonsig")
    model = Model()
    model.build(net)
    signals = SignalDict()

    for sig in itervalues(model.sig['common']):
        signals.init(sig)
        with pytest.raises((ValueError, RuntimeError)):
            signals[sig] = np.array([-1])
        with pytest.raises((ValueError, RuntimeError)):
            signals[sig][...] = np.array([-1])
예제 #8
0
def test_commonsig_readonly():
    """Test that the common signals cannot be modified."""
    net = nengo.Network(label="test_commonsig")
    model = Model()
    model.build(net)
    signals = SignalDict()

    for sig in itervalues(model.sig['common']):
        signals.init(sig)
        with pytest.raises((ValueError, RuntimeError)):
            signals[sig] = np.array([-1])
        with pytest.raises((ValueError, RuntimeError)):
            signals[sig][...] = np.array([-1])
예제 #9
0
    def perform_merges_for_subset(self, subset):
        """Performs operator merges for a subset of operators.

        Parameters
        ----------
        subset : list
            Subset of operators.
        """
        by_view = groupby(subset, lambda op: self.opinfo[op].v_base)
        if self.only_merge_ops_with_view:
            if None in by_view:
                # If an op has no views, v_base will be None.
                # If we're only merging views, then we get rid of this subset.
                del by_view[None]

            for view_subset in itervalues(by_view):
                if len(view_subset) > 1:
                    self.perform_merges_for_view_subset(view_subset)
        elif None in by_view and len(by_view[None]) > 1:
            self.perform_merges_for_view_subset(by_view[None])
예제 #10
0
def build_connection(model, conn):
    # Create random number generator
    rng = np.random.RandomState(model.seeds[conn])

    # Get input and output connections from pre and post
    def get_prepost_signal(is_pre):
        target = conn.pre_obj if is_pre else conn.post_obj
        key = 'out' if is_pre else 'in'

        if target not in model.sig:
            raise ValueError("Building %s: the '%s' object %s "
                             "is not in the model, or has a size of zero." %
                             (conn, 'pre' if is_pre else 'post', target))
        if key not in model.sig[target]:
            raise ValueError("Error building %s: the '%s' object %s "
                             "has a '%s' size of zero." %
                             (conn, 'pre' if is_pre else 'post', target, key))

        return model.sig[target][key]

    model.sig[conn]['in'] = get_prepost_signal(is_pre=True)
    model.sig[conn]['out'] = get_prepost_signal(is_pre=False)

    weights = None
    eval_points = None
    solver_info = None
    signal_size = conn.size_out
    post_slice = conn.post_slice

    # Figure out the signal going across this connection
    in_signal = model.sig[conn]['in']
    if (isinstance(conn.pre_obj, Node)
            or (isinstance(conn.pre_obj, Ensemble)
                and isinstance(conn.pre_obj.neuron_type, Direct))):
        # Node or Decoded connection in directmode
        sliced_in = slice_signal(model, in_signal, conn.pre_slice)

        if conn.function is not None:
            in_signal = Signal(np.zeros(conn.size_mid), name='%s.func' % conn)
            model.add_op(
                SimPyFunc(output=in_signal,
                          fn=conn.function,
                          t_in=False,
                          x=sliced_in))
        else:
            in_signal = sliced_in

    elif isinstance(conn.pre_obj, Ensemble):  # Normal decoded connection
        eval_points, decoders, solver_info = build_decoders(model, conn, rng)

        if conn.solver.weights:
            model.sig[conn]['out'] = model.sig[conn.post_obj.neurons]['in']
            signal_size = conn.post_obj.neurons.size_in
            post_slice = Ellipsis  # don't apply slice later
            weights = decoders.T
        else:
            weights = multiply(conn.transform, decoders.T)
    else:
        in_signal = slice_signal(model, in_signal, conn.pre_slice)

    # Add operator for applying weights
    if weights is None:
        weights = np.array(conn.transform)

    if isinstance(conn.post_obj, Neurons):
        gain = model.params[conn.post_obj.ensemble].gain[post_slice]
        weights = multiply(gain, weights)

    if conn.learning_rule is not None and weights.ndim < 2:
        raise ValueError("Learning connection must have full transform matrix")

    model.sig[conn]['weights'] = Signal(weights,
                                        name="%s.weights" % conn,
                                        readonly=True)
    signal = Signal(np.zeros(signal_size), name="%s.weighted" % conn)
    model.add_op(Reset(signal))
    op = ElementwiseInc if weights.ndim < 2 else DotInc
    model.add_op(
        op(model.sig[conn]['weights'],
           in_signal,
           signal,
           tag="%s.weights_elementwiseinc" % conn))

    # Add operator for filtering
    if conn.synapse is not None:
        signal = model.build(conn.synapse, signal)

    # Copy to the proper slice
    model.add_op(
        SlicedCopy(signal,
                   model.sig[conn]['out'],
                   b_slice=post_slice,
                   inc=True,
                   tag="%s.gain" % conn))

    # Build learning rules
    if conn.learning_rule is not None:
        model.sig[conn]['weights'].readonly = False
        model.add_op(PreserveValue(model.sig[conn]['weights']))

        rule = conn.learning_rule
        if is_iterable(rule):
            for r in itervalues(rule) if isinstance(rule, dict) else rule:
                model.build(r)
        elif rule is not None:
            model.build(rule)

    model.params[conn] = BuiltConnection(eval_points=eval_points,
                                         solver_info=solver_info,
                                         weights=weights)
예제 #11
0
파일: params.py 프로젝트: jmdelahanty/nengo
 def _params(self):
     return list(itervalues(self._paramdict))
예제 #12
0
파일: connection.py 프로젝트: CamZHU/nengo
def build_connection(model, conn):
    # Create random number generator
    rng = np.random.RandomState(model.seeds[conn])

    # Get input and output connections from pre and post
    def get_prepost_signal(is_pre):
        target = conn.pre_obj if is_pre else conn.post_obj
        key = 'out' if is_pre else 'in'

        if target not in model.sig:
            raise ValueError("Building %s: the '%s' object %s "
                             "is not in the model, or has a size of zero."
                             % (conn, 'pre' if is_pre else 'post', target))
        if key not in model.sig[target]:
            raise ValueError("Error building %s: the '%s' object %s "
                             "has a '%s' size of zero." %
                             (conn, 'pre' if is_pre else 'post', target, key))

        return model.sig[target][key]

    model.sig[conn]['in'] = get_prepost_signal(is_pre=True)
    model.sig[conn]['out'] = get_prepost_signal(is_pre=False)

    weights = None
    eval_points = None
    solver_info = None
    signal_size = conn.size_out
    post_slice = conn.post_slice

    # Figure out the signal going across this connection
    in_signal = model.sig[conn]['in']
    if (isinstance(conn.pre_obj, Node) or
            (isinstance(conn.pre_obj, Ensemble) and
             isinstance(conn.pre_obj.neuron_type, Direct))):
        # Node or Decoded connection in directmode
        sliced_in = slice_signal(model, in_signal, conn.pre_slice)

        if conn.function is not None:
            in_signal = Signal(np.zeros(conn.size_mid), name='%s.func' % conn)
            model.add_op(SimPyFunc(
                output=in_signal, fn=conn.function,
                t_in=False, x=sliced_in))
        else:
            in_signal = sliced_in

    elif isinstance(conn.pre_obj, Ensemble):  # Normal decoded connection
        eval_points, decoders, solver_info = build_decoders(model, conn, rng)

        if conn.solver.weights:
            model.sig[conn]['out'] = model.sig[conn.post_obj.neurons]['in']
            signal_size = conn.post_obj.neurons.size_in
            post_slice = Ellipsis  # don't apply slice later
            weights = decoders.T
        else:
            weights = multiply(conn.transform, decoders.T)
    else:
        in_signal = slice_signal(model, in_signal, conn.pre_slice)

    # Add operator for applying weights
    if weights is None:
        weights = np.array(conn.transform)

    if isinstance(conn.post_obj, Neurons):
        gain = model.params[conn.post_obj.ensemble].gain[post_slice]
        weights = multiply(gain, weights)

    model.sig[conn]['weights'] = Signal(weights, name="%s.weights" % conn)
    signal = Signal(np.zeros(signal_size), name="%s.weighted" % conn)
    model.add_op(Reset(signal))
    op = ElementwiseInc if weights.ndim < 2 else DotInc
    model.add_op(op(model.sig[conn]['weights'],
                    in_signal,
                    signal,
                    tag="%s.weights_elementwiseinc" % conn))

    # Add operator for filtering
    if conn.synapse is not None:
        signal = model.build(conn.synapse, signal)

    # Copy to the proper slice
    model.add_op(SlicedCopy(
        signal, model.sig[conn]['out'], b_slice=post_slice,
        inc=True, tag="%s.gain" % conn))

    # Build learning rules
    if conn.learning_rule is not None:
        rule = conn.learning_rule
        rule = [rule] if not is_iterable(rule) else rule
        targets = []
        for r in itervalues(rule) if isinstance(rule, dict) else rule:
            model.build(r)
            targets.append(r.modifies)

        if 'encoders' in targets:
            model.add_op(PreserveValue(model.sig[conn.post_obj]['encoders']))
        if 'decoders' in targets or 'weights' in targets:
            if weights.ndim < 2:
                raise ValueError(
                    "'transform' must be a 2-dimensional array for learning")
            model.add_op(PreserveValue(model.sig[conn]['weights']))

    model.params[conn] = BuiltConnection(eval_points=eval_points,
                                         solver_info=solver_info,
                                         weights=weights)
예제 #13
0
 def _params(self):
     return list(itervalues(self._paramdict))
예제 #14
0
파일: params.py 프로젝트: shaunren/nengo
 def _params(self):
     return itervalues(self._paramdict)
예제 #15
0
파일: params.py 프로젝트: thingimon/nengo
 def _params(self):
     return itervalues(self._paramdict)
예제 #16
0
def optimize(model, dg, max_passes=None):
    """Optimizes the operator graph by merging operators.

    This reduces the number of iterators to iterate over in slow Python code
    (as opposed to fast C code). The resulting merged operators will also
    operate on larger chunks of sequential memory, making better use of CPU
    caching and prefetching.

    The optimization algorithm has worst case complexity :math:`O(n^2 + e)`,
    where :math:`n` is the number of operators and :math:`e` is the number
    of edges in the dependency graph. In practice the run time will be much
    better because not all :math:`n^2` pairwise combinations of operators
    will be evaluated. A grouping depending on the operator type and view
    bases is done with dictionaries. This grouping can be done in amortized
    linear time and reduces the actual worst-case runtime of the optimization
    algorithm to :math:`O(gm^2 + e)`, where :math:`g` is the number of groups
    and :math:`m` is the number of elements in a group. Moreover, information
    about memory alignment will be used to cut the inner loop short in
    many cases and gives a runtime much closer to linear in most cases.

    Note that this function modifies both ``model`` and ``dg``.

    Parameters
    ----------
    model : `nengo.builder.Model`
        Builder output to optimize.
    dg : dict
        Dict of the form ``{a: {b, c}}`` where ``b`` and ``c`` depend on ``a``,
        specifying the operator dependency graph of the model.
    """

    logger.info("Optimizing model...")

    # We try first to merge operators with views only as these have a fixed
    # order for the memory alignment whereas operators without views could
    # be merged in a random order. Merging the views of operators will
    # propagate requirements in the memory ordering via the other
    # associated signals of the operator to other operators.

    # Once no more operators with views can be merged, we try to merge
    # operators without views and then try again merging views (because
    # each operator merge might generate new views).

    single_pass = OpMergePass(dg)

    n_initial_ops = len(dg)
    cum_duration = 0.
    before, after = None, None
    i = 0
    only_merge_ops_with_view = True
    while only_merge_ops_with_view or after < before:
        if max_passes is not None and i >= max_passes:
            break

        i += 1

        if logger.isEnabledFor(logging.DEBUG):
            for tp, ops in iteritems(groupby(dg, type)):
                logger.debug("%s: %d", tp, len(ops))

        only_merge_ops_with_view = before is None or before != after
        before = len(single_pass.dg.forward)

        with Timer() as t:
            single_pass(only_merge_ops_with_view)

        after = len(single_pass.dg.forward)
        logger.info(
            "Pass %i [%s]: Reduced %i to %i operators in %fs.",
            i, "views" if only_merge_ops_with_view else "non-views",
            before, after, t.duration)

        # Prevent optimizer from running too long if we get up diminishing
        # returns.
        # Note that we don't break if there was no reduction at all because
        # in that case we want to toggle only_merge_ops_with_view which might
        # still yield some significant reduction.
        cum_duration += t.duration
        mean_reduction_rate = float(n_initial_ops - after) / cum_duration
        last_reduction_rate = float(before - after) / t.duration
        threshold = 0.01
        if 0. < last_reduction_rate < threshold * mean_reduction_rate:
            logger.info(
                "Operator reduction rate fell below {} mean reduction rate. "
                "Stopping optimizer.".format(threshold))
            break

    # Update model signals
    for sigdict in itervalues(model.sig):
        for name in sigdict:
            while sigdict[name] in single_pass.sig_replacements:
                sigdict[name] = single_pass.sig_replacements[sigdict[name]]

    # Reinitialize the model's operator list
    del model.operators[:]
    for op in dg:
        model.add_op(op)
예제 #17
0
파일: base.py 프로젝트: jresch/nengo_mpi
def propogate_assignments(network, assignments, can_cross_boundary):
    """ Assign every object in ``network`` to a component.

    Propogates the component assignments stored in the dict ``assignments``
    (which only needs to contain assignments for top level Networks, Nodes and
    Ensembles) down to objects that are contained in those top-level objects.
    If assignments is empty, then all objects will be assigned to the 1st
    component, which has index 0. The intent is to have some partitioning
    algorithm determine some of the assignments before this function is called,
    and then have this function propogate those assignments.

    Also does validation, making sure that connections that cross component
    boundaries have certain properties (see ``can_cross_boundary``) and making
    sure that certain types of objects are assigned to component 0.

    Objects that must be simulated on component 0 are:
        1. Nodes with callable outputs.
        2. Ensembles of Direct neurons.
        3. Any Node that is the source for a Connection that has a function.

    Parameters
    ----------
    network: nengo.Network
        The network we are partitioning.

    assignments: dict
        A dictionary mapping from nengo objects to component indices.
        This dictionary will be altered to contain assignments for all objects
        in the network. If a network appears in assignments, then all objects
        in that network which do not also appear in assignments will be given
        the same assignment as the network.

    can_cross_boundary: function
        A function which accepts a Connection, and returns a boolean specifying
        whether the Connection is allowed to cross component boundaries.

    Returns
    -------
    Nothing, but ``assignments`` is modified.

    """

    def helper(network, assignments, outputs):
        for node in network.nodes:
            if callable(node.output):
                if node in assignments and assignments[node] != 0:
                    warnings.warn(
                        "Found Node with callable output that was assigned to "
                        "a component other than component 0. Overriding "
                        "previous assignment."
                    )

                assignments[node] = 0

            else:
                if any([conn.function is not None for conn in outputs[node]]):
                    if node in assignments and assignments[node] != 0:
                        warnings.warn(
                            "Found Node with an output connection whose "
                            "function is not None, which is assigned to a "
                            "component other than component 0. Overriding "
                            "previous assignment."
                        )

                    assignments[node] = 0

                elif node not in assignments:
                    assignments[node] = assignments[network]

        for ensemble in network.ensembles:
            if isinstance(ensemble.neuron_type, Direct):
                if ensemble in assignments and assignments[ensemble] != 0:
                    warnings.warn(
                        "Found Direct-mode ensemble that was assigned to a "
                        "component other than component 0. Overriding "
                        "previous assignment."
                    )

                assignments[ensemble] = 0

            elif ensemble not in assignments:
                assignments[ensemble] = assignments[network]

            assignments[ensemble.neurons] = assignments[ensemble]

        for n in network.networks:
            if n not in assignments:
                assignments[n] = assignments[network]

            helper(n, assignments, outputs)

    assignments[network] = 0

    _, outputs = find_all_io(network.all_connections)

    try:
        helper(network, assignments, outputs)

        # Assign learning rules
        for conn in network.all_connections:
            if conn.learning_rule is not None:
                rule = conn.learning_rule
                if is_iterable(rule):
                    rule = itervalues(rule) if isinstance(rule, dict) else rule
                    for r in rule:
                        assignments[r] = assignments[conn.pre_obj]
                elif rule is not None:
                    assignments[rule] = assignments[conn.pre_obj]

        # Check for connections erroneously crossing component boundaries
        non_crossing = [conn for conn in network.all_connections if not can_cross_boundary(conn)]

        for conn in non_crossing:
            pre_component = assignments[conn.pre_obj]
            post_component = assignments[conn.post_obj]

            if pre_component != post_component:
                raise PartitionError(
                    "Connection %s crosses a component "
                    "boundary, but it is not permitted to. "
                    "Pre-object assigned to %d, post-object "
                    "assigned to %d." % (conn, pre_component, post_component)
                )

        # Assign probes
        for probe in network.all_probes:
            target = probe.target.obj if isinstance(probe.target, ObjView) else probe.target

            if isinstance(target, Connection):
                target = target.pre_obj

            assignments[probe] = assignments[target]

    except KeyError as e:
        # Nengo tests require a value error to be raised in these cases.
        msg = ("Invalid Partition. KeyError: %s" % e.message,)
        raise ValueError(msg)

    nodes = network.all_nodes
    nodes_in = all([node in assignments for node in nodes])
    assert nodes_in, "Assignments incomplete, missing some nodes."

    ensembles = network.all_ensembles
    ensembles_in = all([ensemble in assignments for ensemble in ensembles])
    assert ensembles_in, "Assignments incomplete, missing some ensembles."
예제 #18
0
파일: config.py 프로젝트: JolyZhang/nengo
 def __str__(self):
     return "\n".join(str(v) for v in itervalues(self.params))
예제 #19
0
def build_connection(model, conn):
    # Create random number generator
    rng = np.random.RandomState(model.seeds[conn])

    # Get input and output connections from pre and post
    def get_prepost_signal(is_pre):
        target = conn.pre_obj if is_pre else conn.post_obj
        key = 'out' if is_pre else 'in'

        if target not in model.sig:
            raise ValueError("Building %s: the '%s' object %s "
                             "is not in the model, or has a size of zero." %
                             (conn, 'pre' if is_pre else 'post', target))
        if key not in model.sig[target]:
            raise ValueError("Error building %s: the '%s' object %s "
                             "has a '%s' size of zero." %
                             (conn, 'pre' if is_pre else 'post', target, key))

        return model.sig[target][key]

    model.sig[conn]['in'] = get_prepost_signal(is_pre=True)
    model.sig[conn]['out'] = get_prepost_signal(is_pre=False)

    decoders = None
    eval_points = None
    solver_info = None
    transform = full_transform(conn, slice_pre=False)

    # Figure out the signal going across this connection
    if (isinstance(conn.pre_obj, Node)
            or (isinstance(conn.pre_obj, Ensemble)
                and isinstance(conn.pre_obj.neuron_type, Direct))):
        # Node or Decoded connection in directmode
        if (conn.function is None and isinstance(conn.pre_slice, slice)
                and (conn.pre_slice.step is None or conn.pre_slice.step == 1)):
            signal = model.sig[conn]['in'][conn.pre_slice]
        else:
            signal = Signal(np.zeros(conn.size_mid), name='%s.func' % conn)
            fn = ((lambda x: x[conn.pre_slice]) if conn.function is None else
                  (lambda x: conn.function(x[conn.pre_slice])))
            model.add_op(
                SimPyFunc(output=signal,
                          fn=fn,
                          t_in=False,
                          x=model.sig[conn]['in']))
    elif isinstance(conn.pre_obj, Ensemble):
        # Normal decoded connection
        eval_points, activities, targets = build_linear_system(
            model, conn, rng)

        # Use cached solver, if configured
        solver = model.decoder_cache.wrap_solver(conn.solver)

        if conn.solver.weights:
            # include transform in solved weights
            targets = np.dot(targets, transform.T)
            transform = np.array(1., dtype=np.float64)

            decoders, solver_info = solver(
                activities,
                targets,
                rng=rng,
                E=model.params[conn.post_obj].scaled_encoders.T)
            model.sig[conn]['out'] = model.sig[conn.post_obj.neurons]['in']
            signal_size = model.sig[conn]['out'].size
        else:
            decoders, solver_info = solver(activities, targets, rng=rng)
            signal_size = conn.size_mid

        # Add operator for decoders
        decoders = decoders.T

        model.sig[conn]['decoders'] = Signal(decoders,
                                             name="%s.decoders" % conn)
        signal = Signal(np.zeros(signal_size), name=str(conn))
        model.add_op(Reset(signal))
        model.add_op(
            DotInc(model.sig[conn]['decoders'],
                   model.sig[conn]['in'],
                   signal,
                   tag="%s decoding" % conn))
    else:
        # Direct connection
        signal = model.sig[conn]['in']

    # Add operator for filtering
    if conn.synapse is not None:
        signal = filtered_signal(model, conn, signal, conn.synapse)

    # Add operator for transform
    if isinstance(conn.post_obj, Neurons):
        if not model.has_built(conn.post_obj.ensemble):
            # Since it hasn't been built, it wasn't added to the Network,
            # which is most likely because the Neurons weren't associated
            # with an Ensemble.
            raise RuntimeError("Connection '%s' refers to Neurons '%s' "
                               "that are not a part of any Ensemble." %
                               (conn, conn.post_obj))

        if conn.post_slice != slice(None):
            raise NotImplementedError(
                "Post-slices on connections to neurons are not implemented")

        gain = model.params[conn.post_obj.ensemble].gain[conn.post_slice]
        if transform.ndim < 2:
            transform = transform * gain
        else:
            transform *= gain[:, np.newaxis]

    model.sig[conn]['transform'] = Signal(transform,
                                          name="%s.transform" % conn)
    if transform.ndim < 2:
        model.add_op(
            ElementwiseInc(model.sig[conn]['transform'],
                           signal,
                           model.sig[conn]['out'],
                           tag=str(conn)))
    else:
        model.add_op(
            DotInc(model.sig[conn]['transform'],
                   signal,
                   model.sig[conn]['out'],
                   tag=str(conn)))

    # Build learning rules
    if conn.learning_rule:
        if isinstance(conn.pre_obj, Ensemble):
            model.add_op(PreserveValue(model.sig[conn]['decoders']))
        else:
            model.add_op(PreserveValue(model.sig[conn]['transform']))

        if isinstance(conn.pre_obj, Ensemble) and conn.solver.weights:
            # TODO: make less hacky.
            # Have to do this because when a weight_solver
            # is provided, then learning rules should operate on
            # "decoders" which is really the weight matrix.
            model.sig[conn]['transform'] = model.sig[conn]['decoders']

        rule = conn.learning_rule
        if is_iterable(rule):
            for r in itervalues(rule) if isinstance(rule, dict) else rule:
                model.build(r)
        elif rule is not None:
            model.build(rule)

    model.params[conn] = BuiltConnection(decoders=decoders,
                                         eval_points=eval_points,
                                         transform=transform,
                                         solver_info=solver_info)
예제 #20
0
파일: network.py 프로젝트: LittileBee/nengo
def build_network(model, network):
    """Takes a Network object and returns a Model.

    This determines the signals and operators necessary to simulate that model.

    Builder does this by mapping each high-level object to its associated
    signals and operators one-by-one, in the following order:

    1) Ensembles, Nodes, Neurons
    2) Subnetworks (recursively)
    3) Connections
    4) Learning Rules
    5) Probes
    """
    def get_seed(obj, rng):
        # Generate a seed no matter what, so that setting a seed or not on
        # one object doesn't affect the seeds of other objects.
        seed = rng.randint(npext.maxint)
        return (seed if not hasattr(obj, 'seed') or obj.seed is None
                else obj.seed)

    if model.toplevel is None:
        model.toplevel = network
        model.sig['common'][0] = Signal(
            npext.array(0.0, readonly=True), name='Common: Zero')
        model.sig['common'][1] = Signal(
            npext.array(1.0, readonly=True), name='Common: One')
        model.seeds[network] = get_seed(network, np.random)

    # Set config
    old_config = model.config
    model.config = network.config

    # assign seeds to children
    rng = np.random.RandomState(model.seeds[network])
    sorted_types = sorted(network.objects, key=lambda t: t.__name__)
    for obj_type in sorted_types:
        for obj in network.objects[obj_type]:
            model.seeds[obj] = get_seed(obj, rng)

    logger.debug("Network step 1: Building ensembles and nodes")
    for obj in network.ensembles + network.nodes:
        model.build(obj)

    logger.debug("Network step 2: Building subnetworks")
    for subnetwork in network.networks:
        model.build(subnetwork)

    logger.debug("Network step 3: Building connections")
    for conn in network.connections:
        model.build(conn)

    logger.debug("Network step 4: Building learning rules")
    for conn in network.connections:
        rule = conn.learning_rule
        if is_iterable(rule):
            for r in (itervalues(rule) if isinstance(rule, dict) else rule):
                model.build(r)
        elif rule is not None:
            model.build(rule)

    logger.debug("Network step 5: Building probes")
    for probe in network.probes:
        model.build(probe)

    # Unset config
    model.config = old_config
    model.params[network] = None
예제 #21
0
def build_connection(model, conn):
    """Builds a `.Connection` object into a model.

    A brief summary of what happens in the connection build process,
    in order:

    1. Solve for decoders.
    2. Combine transform matrix with decoders to get weights.
    3. Add operators for computing the function
       or multiplying neural activity by weights.
    4. Call build function for the synapse.
    5. Call build function for the learning rule.
    6. Add operator for applying learning rule delta to weights.

    Some of these steps may be altered or omitted depending on the parameters
    of the connection, in particular the pre and post types.

    Parameters
    ----------
    model : Model
        The model to build into.
    conn : Connection
        The connection to build.

    Notes
    -----
    Sets ``model.params[conn]`` to a `.BuiltConnection` instance.
    """

    # Create random number generator
    rng = np.random.RandomState(model.seeds[conn])

    # Get input and output connections from pre and post
    def get_prepost_signal(is_pre):
        target = conn.pre_obj if is_pre else conn.post_obj
        key = 'out' if is_pre else 'in'

        if target not in model.sig:
            raise BuildError("Building %s: the %r object %s is not in the "
                             "model, or has a size of zero."
                             % (conn, 'pre' if is_pre else 'post', target))
        if key not in model.sig[target]:
            raise BuildError(
                "Building %s: the %r object %s has a %r size of zero."
                % (conn, 'pre' if is_pre else 'post', target, key))

        return model.sig[target][key]

    model.sig[conn]['in'] = get_prepost_signal(is_pre=True)
    model.sig[conn]['out'] = get_prepost_signal(is_pre=False)

    weights = None
    eval_points = None
    solver_info = None
    signal_size = conn.size_out
    post_slice = conn.post_slice

    # Sample transform if given a distribution
    transform = (conn.transform.sample(conn.size_out, conn.size_mid, rng=rng)
                 if isinstance(conn.transform, Distribution) else
                 np.array(conn.transform))

    # Figure out the signal going across this connection
    in_signal = model.sig[conn]['in']
    if (isinstance(conn.pre_obj, Node) or
            (isinstance(conn.pre_obj, Ensemble) and
             isinstance(conn.pre_obj.neuron_type, Direct))):
        # Node or Decoded connection in directmode
        weights = transform
        sliced_in = slice_signal(model, in_signal, conn.pre_slice)
        if conn.function is not None:
            in_signal = Signal(np.zeros(conn.size_mid), name='%s.func' % conn)
            model.add_op(SimPyFunc(in_signal, conn.function, None, sliced_in))
        else:
            in_signal = sliced_in
    elif isinstance(conn.pre_obj, Ensemble):  # Normal decoded connection
        eval_points, weights, solver_info = build_decoders(
            model, conn, rng, transform)
        if conn.solver.weights:
            model.sig[conn]['out'] = model.sig[conn.post_obj.neurons]['in']
            signal_size = conn.post_obj.neurons.size_in
            post_slice = Ellipsis  # don't apply slice later
    else:
        weights = transform
        in_signal = slice_signal(model, in_signal, conn.pre_slice)

    if isinstance(conn.post_obj, Neurons):
        weights = multiply(
            model.params[conn.post_obj.ensemble].gain[post_slice], weights)

    # Add operator for applying weights
    model.sig[conn]['weights'] = Signal(
        weights, name="%s.weights" % conn, readonly=True)
    signal = Signal(np.zeros(signal_size), name="%s.weighted" % conn)
    model.add_op(Reset(signal))
    op = ElementwiseInc if weights.ndim < 2 else DotInc
    model.add_op(op(model.sig[conn]['weights'],
                    in_signal,
                    signal,
                    tag="%s.weights_elementwiseinc" % conn))

    # Add operator for filtering
    if conn.synapse is not None:
        signal = model.build(conn.synapse, signal)

    # Store the weighted-filtered output in case we want to probe it
    model.sig[conn]['weighted'] = signal

    # Copy to the proper slice
    model.add_op(SlicedCopy(
        signal, model.sig[conn]['out'], dst_slice=post_slice,
        inc=True, tag="%s.gain" % conn))

    # Build learning rules
    if conn.learning_rule is not None:
        rule = conn.learning_rule
        rule = [rule] if not is_iterable(rule) else rule
        targets = []
        for r in itervalues(rule) if isinstance(rule, dict) else rule:
            model.build(r)
            targets.append(r.modifies)

        if 'encoders' in targets:
            encoder_sig = model.sig[conn.post_obj]['encoders']
            if not any(isinstance(op, PreserveValue) and op.dst is encoder_sig
                       for op in model.operators):
                encoder_sig.readonly = False
                model.add_op(PreserveValue(encoder_sig))
        if 'decoders' in targets or 'weights' in targets:
            if weights.ndim < 2:
                raise BuildError(
                    "'transform' must be a 2-dimensional array for learning")
            model.sig[conn]['weights'].readonly = False
            model.add_op(PreserveValue(model.sig[conn]['weights']))

    model.params[conn] = BuiltConnection(eval_points=eval_points,
                                         solver_info=solver_info,
                                         transform=transform,
                                         weights=weights)
예제 #22
0
def build_connection(model, conn):
    """Builds a `.Connection` object into a model.

    A brief summary of what happens in the connection build process,
    in order:

    1. Solve for decoders.
    2. Combine transform matrix with decoders to get weights.
    3. Add operators for computing the function
       or multiplying neural activity by weights.
    4. Call build function for the synapse.
    5. Call build function for the learning rule.
    6. Add operator for applying learning rule delta to weights.

    Some of these steps may be altered or omitted depending on the parameters
    of the connection, in particular the pre and post types.

    Parameters
    ----------
    model : Model
        The model to build into.
    conn : Connection
        The connection to build.

    Notes
    -----
    Sets ``model.params[conn]`` to a `.BuiltConnection` instance.
    """

    # Create random number generator
    rng = np.random.RandomState(model.seeds[conn])

    # Get input and output connections from pre and post
    def get_prepost_signal(is_pre):
        target = conn.pre_obj if is_pre else conn.post_obj
        key = 'out' if is_pre else 'in'

        if target not in model.sig:
            raise BuildError("Building %s: the %r object %s is not in the "
                             "model, or has a size of zero." %
                             (conn, 'pre' if is_pre else 'post', target))
        if key not in model.sig[target]:
            raise BuildError(
                "Building %s: the %r object %s has a %r size of zero." %
                (conn, 'pre' if is_pre else 'post', target, key))

        return model.sig[target][key]

    model.sig[conn]['in'] = get_prepost_signal(is_pre=True)
    model.sig[conn]['out'] = get_prepost_signal(is_pre=False)

    weights = None
    eval_points = None
    solver_info = None
    signal_size = conn.size_out
    post_slice = conn.post_slice

    # Sample transform if given a distribution
    transform = get_samples(conn.transform,
                            conn.size_out,
                            d=conn.size_mid,
                            rng=rng)

    # Figure out the signal going across this connection
    in_signal = model.sig[conn]['in']
    if (isinstance(conn.pre_obj, Node)
            or (isinstance(conn.pre_obj, Ensemble)
                and isinstance(conn.pre_obj.neuron_type, Direct))):
        # Node or Decoded connection in directmode
        weights = transform
        sliced_in = slice_signal(model, in_signal, conn.pre_slice)
        if conn.function is None:
            in_signal = sliced_in
        elif isinstance(conn.function, np.ndarray):
            raise BuildError("Cannot use function points in direct connection")
        else:
            in_signal = Signal(np.zeros(conn.size_mid), name='%s.func' % conn)
            model.add_op(SimPyFunc(in_signal, conn.function, None, sliced_in))
    elif isinstance(conn.pre_obj, Ensemble):  # Normal decoded connection
        eval_points, weights, solver_info = model.build(
            conn.solver, conn, rng, transform)
        if conn.solver.weights:
            model.sig[conn]['out'] = model.sig[conn.post_obj.neurons]['in']
            signal_size = conn.post_obj.neurons.size_in
            post_slice = None  # don't apply slice later
    else:
        weights = transform
        in_signal = slice_signal(model, in_signal, conn.pre_slice)

    # Add operator for applying weights
    model.sig[conn]['weights'] = Signal(weights,
                                        name="%s.weights" % conn,
                                        readonly=True)
    signal = Signal(np.zeros(signal_size), name="%s.weighted" % conn)
    model.add_op(Reset(signal))
    op = ElementwiseInc if weights.ndim < 2 else DotInc
    model.add_op(
        op(model.sig[conn]['weights'],
           in_signal,
           signal,
           tag="%s.weights_elementwiseinc" % conn))

    # Add operator for filtering
    if conn.synapse is not None:
        signal = model.build(conn.synapse, signal)

    # Store the weighted-filtered output in case we want to probe it
    model.sig[conn]['weighted'] = signal

    if isinstance(conn.post_obj, Neurons):
        # Apply neuron gains (we don't need to do this if we're connecting to
        # an Ensemble, because the gains are rolled into the encoders)
        gains = Signal(model.params[conn.post_obj.ensemble].gain[post_slice],
                       name="%s.gains" % conn)
        model.add_op(
            ElementwiseInc(gains,
                           signal,
                           model.sig[conn]['out'][post_slice],
                           tag="%s.gains_elementwiseinc" % conn))
    else:
        # Copy to the proper slice
        model.add_op(
            Copy(signal,
                 model.sig[conn]['out'],
                 dst_slice=post_slice,
                 inc=True,
                 tag="%s" % conn))

    # Build learning rules
    if conn.learning_rule is not None:
        rule = conn.learning_rule
        rule = [rule] if not is_iterable(rule) else rule
        targets = []
        for r in itervalues(rule) if isinstance(rule, dict) else rule:
            model.build(r)
            targets.append(r.modifies)

        if 'encoders' in targets:
            encoder_sig = model.sig[conn.post_obj]['encoders']
            encoder_sig.readonly = False
        if 'decoders' in targets or 'weights' in targets:
            if weights.ndim < 2:
                raise BuildError(
                    "'transform' must be a 2-dimensional array for learning")
            model.sig[conn]['weights'].readonly = False

    model.params[conn] = BuiltConnection(eval_points=eval_points,
                                         solver_info=solver_info,
                                         transform=transform,
                                         weights=weights)
예제 #23
0
파일: sim_npy.py 프로젝트: jresch/nengo_ocl
def greedy_planner(operators):
    """
    I feel like there might e a dynamic programming solution here, but I can't
    work it out, and I'm not sure. Even if a DP solution existed, we would
    need a function to estimate the goodness (e.g. neg wall time) of kernel
    calls, and  that function would need to be pretty fast.
    """
    dg = exact_dependency_graph(operators)

    # map unscheduled ops to their direct predecessors and successors
    predecessors_of = {}
    successors_of = {}
    for op in operators:
        predecessors_of[op] = set(filter(is_op, dg.predecessors(op)))
        successors_of[op] = set(filter(is_op, dg.successors(op)))

    base_sets = {}
    for op in operators:
        base_sets[op] = list(set(
            sig.base for sig in (op.incs + op.sets + op.updates)))

    # available ops are ready to be scheduled (all predecessors scheduled)
    available = defaultdict(set)
    for op in (op for op, dep in iteritems(predecessors_of) if not dep):
        available[type(op)].add(op)

    rval = []
    while len(predecessors_of) > 0:
        if len(available) == 0:
            raise ValueError("Cycles in the op graph")

        chosen_type = sorted(available.items(), key=lambda x: len(x[1]))[-1][0]
        candidates = available[chosen_type]

        # --- pick one op writing to each base
        # TODO: is this dangerous if ops write to multiple bases? For example
        # A and B might write to both X and Y, and we might choose A for
        # writing to X and B for writing to Y at the same time.
        by_base = defaultdict(list)
        no_sets = []
        for op in candidates:
            bases = base_sets[op]
            if len(bases) == 0:
                no_sets.append(op)
            else:
                for base in bases:
                    by_base[base].append(op)

        chosen = []
        for ops_writing_to_base in itervalues(by_base):
            chosen.append(ops_writing_to_base[0])

        # remaining candidates can be done in any order (no outputs)
        chosen.extend(no_sets)

        # ops that produced multiple outputs show up multiple times
        chosen = stable_unique(chosen)
        assert chosen

        # --- schedule ops
        rval.append((chosen_type, chosen))

        # --- update predecessors and successors of unsheduled ops
        available[chosen_type].difference_update(chosen)
        if not available[chosen_type]:
            del available[chosen_type]

        for op in chosen:
            for op2 in successors_of[op]:
                preds = predecessors_of[op2]
                preds.remove(op)
                if len(preds) == 0:
                    available[type(op2)].add(op2)
            del predecessors_of[op]
            del successors_of[op]

    assert len(operators) == sum(len(p[1]) for p in rval)
    # print('greedy_planner: Program len:', len(rval))
    return rval
예제 #24
0
def build_connection(model, conn):
    """Builds a `.Connection` object into a model.

    A brief summary of what happens in the connection build process,
    in order:

    1. Solve for decoders.
    2. Combine transform matrix with decoders to get weights.
    3. Add operators for computing the function
       or multiplying neural activity by weights.
    4. Call build function for the synapse.
    5. Call build function for the learning rule.
    6. Add operator for applying learning rule delta to weights.

    Some of these steps may be altered or omitted depending on the parameters
    of the connection, in particular the pre and post types.

    Parameters
    ----------
    model : Model
        The model to build into.
    conn : Connection
        The connection to build.

    Notes
    -----
    Sets ``model.params[conn]`` to a `.BuiltConnection` instance.
    """

    # Create random number generator
    rng = np.random.RandomState(model.seeds[conn])

    # Get input and output connections from pre and post
    def get_prepost_signal(is_pre):
        target = conn.pre_obj if is_pre else conn.post_obj
        key = 'out' if is_pre else 'in'

        if target not in model.sig:
            raise BuildError("Building %s: the %r object %s is not in the "
                             "model, or has a size of zero."
                             % (conn, 'pre' if is_pre else 'post', target))
        if key not in model.sig[target]:
            raise BuildError(
                "Building %s: the %r object %s has a %r size of zero."
                % (conn, 'pre' if is_pre else 'post', target, key))

        return model.sig[target][key]

    model.sig[conn]['in'] = get_prepost_signal(is_pre=True)
    model.sig[conn]['out'] = get_prepost_signal(is_pre=False)

    decoders = None
    encoders = None
    eval_points = None
    solver_info = None
    post_slice = conn.post_slice

    # Figure out the signal going across this connection
    in_signal = model.sig[conn]['in']
    if (isinstance(conn.pre_obj, Node)
            or (isinstance(conn.pre_obj, Ensemble)
                and isinstance(conn.pre_obj.neuron_type, Direct))):
        # Node or Decoded connection in directmode
        sliced_in = slice_signal(model, in_signal, conn.pre_slice)
        if conn.function is None:
            in_signal = sliced_in
        elif isinstance(conn.function, np.ndarray):
            raise BuildError("Cannot use function points in direct connection")
        else:
            in_signal = Signal(np.zeros(conn.size_mid), name='%s.func' % conn)
            model.add_op(SimPyFunc(in_signal, conn.function, None, sliced_in))
    elif isinstance(conn.pre_obj, Ensemble):  # Normal decoded connection
        eval_points, decoders, solver_info = model.build(
            conn.solver, conn, rng)
        if conn.solver.weights:
            model.sig[conn]['out'] = model.sig[conn.post_obj.neurons]['in']

            # weight solvers only allowed on ensemble->ensemble connections
            assert isinstance(conn.post_obj, Ensemble)

            encoders = model.params[conn.post_obj].scaled_encoders.T
            encoders = encoders[conn.post_slice]

            # post slice already applied to encoders (either here or in
            # `build_decoders`), so don't apply later
            post_slice = None
    else:
        in_signal = slice_signal(model, in_signal, conn.pre_slice)

    # Build transform
    if conn.solver.weights and not conn.solver.compositional:
        # special case for non-compositional weight solvers, where
        # the solver is solving for the full weight matrix. so we don't
        # need to combine decoders/transform/encoders.
        weighted, weights = model.build(Dense(decoders.shape, init=decoders),
                                        in_signal,
                                        rng=rng)
    else:
        weighted, weights = model.build(conn.transform,
                                        in_signal,
                                        decoders=decoders,
                                        encoders=encoders,
                                        rng=rng)

    model.sig[conn]["weights"] = weights

    # Build synapse
    if conn.synapse is not None:
        weighted = model.build(conn.synapse, weighted)

    # Store the weighted-filtered output in case we want to probe it
    model.sig[conn]['weighted'] = weighted

    if isinstance(conn.post_obj, Neurons):
        # Apply neuron gains (we don't need to do this if we're connecting to
        # an Ensemble, because the gains are rolled into the encoders)
        gains = Signal(model.params[conn.post_obj.ensemble].gain[post_slice],
                       name="%s.gains" % conn)
        model.add_op(ElementwiseInc(
            gains, weighted, model.sig[conn]['out'][post_slice],
            tag="%s.gains_elementwiseinc" % conn))
    else:
        # Copy to the proper slice
        model.add_op(Copy(
            weighted, model.sig[conn]['out'], dst_slice=post_slice,
            inc=True, tag="%s" % conn))

    # Build learning rules
    if conn.learning_rule is not None:
        # TODO: provide a general way for transforms to expose learnable params
        if isinstance(conn.transform, Convolution):
            raise NotImplementedError(
                "Learning on convolutional connections is not supported")

        rule = conn.learning_rule
        rule = [rule] if not is_iterable(rule) else rule
        targets = []
        for r in itervalues(rule) if isinstance(rule, dict) else rule:
            model.build(r)
            targets.append(r.modifies)

        if 'encoders' in targets:
            encoder_sig = model.sig[conn.post_obj]['encoders']
            encoder_sig.readonly = False
        if 'decoders' in targets or 'weights' in targets:
            if weights.ndim < 2:
                raise BuildError(
                    "'transform' must be a 2-dimensional array for learning")
            model.sig[conn]['weights'].readonly = False

    model.params[conn] = BuiltConnection(eval_points=eval_points,
                                         solver_info=solver_info,
                                         transform=conn.transform,
                                         weights=weights.initial_value)
예제 #25
0
def build_network(model, network):
    """Takes a Network object and returns a Model.

    This determines the signals and operators necessary to simulate that model.

    Builder does this by mapping each high-level object to its associated
    signals and operators one-by-one, in the following order:

    1) Ensembles, Nodes, Neurons
    2) Subnetworks (recursively)
    3) Connections
    4) Learning Rules
    5) Probes
    """
    def get_seed(obj, rng):
        # Generate a seed no matter what, so that setting a seed or not on
        # one object doesn't affect the seeds of other objects.
        seed = rng.randint(npext.maxint)
        return (seed if not hasattr(obj, 'seed') or obj.seed is None
                else obj.seed)

    if model.toplevel is None:
        model.toplevel = network
        model.sig['common'][0] = model.Signal(0.0, name='Common: Zero')
        model.sig['common'][1] = model.Signal(1.0, name='Common: One')
        model.seeds[network] = get_seed(network, np.random)

    # Set config
    old_config = model.config
    model.config = network.config

    # assign seeds to children
    rng = np.random.RandomState(model.seeds[network])
    sorted_types = sorted(network.objects, key=lambda t: t.__name__)
    for obj_type in sorted_types:
        for obj in network.objects[obj_type]:
            model.seeds[obj] = get_seed(obj, rng)

    logger.debug("Network step 1: Building ensembles and nodes")
    for obj in network.ensembles + network.nodes:
        model.build(obj)

    logger.debug("Network step 2: Building subnetworks")
    for subnetwork in network.networks:
        model.build(subnetwork)

    logger.debug("Network step 3: Building connections")
    for conn in network.connections:
        model.build(conn)

    logger.debug("Network step 4: Building learning rules")
    for conn in network.connections:
        rule = conn.learning_rule
        if is_iterable(rule):
            for r in (itervalues(rule) if isinstance(rule, dict) else rule):
                model.build(r)
        elif rule is not None:
            model.build(rule)

    logger.debug("Network step 5: Building probes")
    for probe in network.probes:
        model.build(probe)

    # Unset config
    model.config = old_config
    model.params[network] = None
예제 #26
0
파일: config.py 프로젝트: tisana69/nengo
 def __str__(self):
     return "\n".join(str(v) for v in itervalues(self.params))
예제 #27
0
def build_connection(model, conn):
    # Create random number generator
    rng = np.random.RandomState(model.seeds[conn])

    # Get input and output connections from pre and post
    def get_prepost_signal(is_pre):
        target = conn.pre_obj if is_pre else conn.post_obj
        key = 'out' if is_pre else 'in'

        if target not in model.sig:
            raise ValueError("Building %s: the '%s' object %s "
                             "is not in the model, or has a size of zero."
                             % (conn, 'pre' if is_pre else 'post', target))
        if key not in model.sig[target]:
            raise ValueError("Error building %s: the '%s' object %s "
                             "has a '%s' size of zero." %
                             (conn, 'pre' if is_pre else 'post', target, key))

        return model.sig[target][key]

    model.sig[conn]['in'] = get_prepost_signal(is_pre=True)
    model.sig[conn]['out'] = get_prepost_signal(is_pre=False)

    decoders = None
    eval_points = None
    solver_info = None
    transform = full_transform(conn, slice_pre=False)

    # Figure out the signal going across this connection
    if (isinstance(conn.pre_obj, Node) or
            (isinstance(conn.pre_obj, Ensemble) and
             isinstance(conn.pre_obj.neuron_type, Direct))):
        # Node or Decoded connection in directmode
        if (conn.function is None and isinstance(conn.pre_slice, slice) and
                (conn.pre_slice.step is None or conn.pre_slice.step == 1)):
            signal = model.sig[conn]['in'][conn.pre_slice]
        else:
            signal = Signal(np.zeros(conn.size_mid), name='%s.func' % conn)
            fn = ((lambda x: x[conn.pre_slice]) if conn.function is None else
                  (lambda x: conn.function(x[conn.pre_slice])))
            model.add_op(SimPyFunc(
                output=signal, fn=fn, t_in=False, x=model.sig[conn]['in']))
    elif isinstance(conn.pre_obj, Ensemble):
        # Normal decoded connection
        eval_points, activities, targets = build_linear_system(
            model, conn, rng)

        # Use cached solver, if configured
        solver = model.decoder_cache.wrap_solver(conn.solver)

        if conn.solver.weights:
            # include transform in solved weights
            targets = np.dot(targets, transform.T)
            transform = np.array(1., dtype=np.float64)

            decoders, solver_info = solver(
                activities, targets, rng=rng,
                E=model.params[conn.post_obj].scaled_encoders.T)
            model.sig[conn]['out'] = model.sig[conn.post_obj.neurons]['in']
            signal_size = model.sig[conn]['out'].size
        else:
            decoders, solver_info = solver(activities, targets, rng=rng)
            signal_size = conn.size_mid

        # Add operator for decoders
        decoders = decoders.T

        model.sig[conn]['decoders'] = Signal(
            decoders, name="%s.decoders" % conn)
        signal = Signal(np.zeros(signal_size), name=str(conn))
        model.add_op(Reset(signal))
        model.add_op(DotInc(model.sig[conn]['decoders'],
                            model.sig[conn]['in'],
                            signal,
                            tag="%s decoding" % conn))
    else:
        # Direct connection
        signal = model.sig[conn]['in']

    # Add operator for filtering
    if conn.synapse is not None:
        signal = filtered_signal(model, conn, signal, conn.synapse)

    # Add operator for transform
    if isinstance(conn.post_obj, Neurons):
        if not model.has_built(conn.post_obj.ensemble):
            # Since it hasn't been built, it wasn't added to the Network,
            # which is most likely because the Neurons weren't associated
            # with an Ensemble.
            raise RuntimeError("Connection '%s' refers to Neurons '%s' "
                               "that are not a part of any Ensemble." % (
                                   conn, conn.post_obj))

        if conn.post_slice != slice(None):
            raise NotImplementedError(
                "Post-slices on connections to neurons are not implemented")

        gain = model.params[conn.post_obj.ensemble].gain[conn.post_slice]
        if transform.ndim < 2:
            transform = transform * gain
        else:
            transform *= gain[:, np.newaxis]

    model.sig[conn]['transform'] = Signal(transform,
                                          name="%s.transform" % conn)
    if transform.ndim < 2:
        model.add_op(ElementwiseInc(model.sig[conn]['transform'],
                                    signal,
                                    model.sig[conn]['out'],
                                    tag=str(conn)))
    else:
        model.add_op(DotInc(model.sig[conn]['transform'],
                            signal,
                            model.sig[conn]['out'],
                            tag=str(conn)))

    # Build learning rules
    if conn.learning_rule:
        if isinstance(conn.pre_obj, Ensemble):
            model.add_op(PreserveValue(model.sig[conn]['decoders']))
        else:
            model.add_op(PreserveValue(model.sig[conn]['transform']))

        if isinstance(conn.pre_obj, Ensemble) and conn.solver.weights:
            # TODO: make less hacky.
            # Have to do this because when a weight_solver
            # is provided, then learning rules should operate on
            # "decoders" which is really the weight matrix.
            model.sig[conn]['transform'] = model.sig[conn]['decoders']

        rule = conn.learning_rule
        if is_iterable(rule):
            for r in itervalues(rule) if isinstance(rule, dict) else rule:
                model.build(r)
        elif rule is not None:
            model.build(rule)

    model.params[conn] = BuiltConnection(decoders=decoders,
                                         eval_points=eval_points,
                                         transform=transform,
                                         solver_info=solver_info)