コード例 #1
0
def test_mark_signals():
    with nengo.Network() as net:
        ens0 = nengo.Ensemble(10, 1, neuron_type=nengo.LIF())
        ens1 = nengo.Ensemble(20, 1, neuron_type=nengo.Direct())
        ens2 = nengo.Ensemble(30, 1)
        conn0 = nengo.Connection(ens0, ens1)
        conn1 = nengo.Connection(ens0, ens1, learning_rule_type=nengo.PES())
        conn2 = nengo.Connection(ens0, ens2, learning_rule_type=nengo.Voja())
        nengo.Probe(ens2)

    model = nengo.builder.Model()
    model.build(net)

    tg = tensor_graph.TensorGraph(model, None, None, 1, None,
                                  utils.NullProgressBar(), None)
    tg.mark_signals()

    assert model.sig[ens0]["encoders"].trainable
    assert model.sig[ens1]["encoders"].trainable
    assert not model.sig[ens2]["encoders"].trainable
    assert model.sig[ens0.neurons]["bias"].trainable
    assert model.sig[ens2.neurons]["bias"].trainable
    assert model.sig[conn0]["weights"].trainable
    assert not model.sig[conn1]["weights"].trainable
    assert model.sig[conn2]["weights"].trainable

    trainables = (
        model.sig[ens0]["encoders"],
        model.sig[ens1]["encoders"],
        model.sig[ens0.neurons]["bias"],
        model.sig[ens2.neurons]["bias"],
        model.sig[conn0]["weights"],
        model.sig[conn2]["weights"],
    )

    for op in model.operators:
        for sig in op.all_signals:
            if sig in trainables:
                assert sig.trainable
            else:
                assert not sig.trainable
コード例 #2
0
def test_planner_config(config_planner):
    with nengo.Network() as net:
        if config_planner is not None:
            net.config.configures(nengo.Network)
            if config_planner:
                net.config[nengo.Network].set_param(
                    "planner", nengo.params.Parameter(
                        "planner", graph_optimizer.noop_planner))

    model = nengo.builder.Model()
    model.build(net)
    sig = nengo.builder.signal.Signal([1])
    sig2 = nengo.builder.signal.Signal([1])
    sig3 = nengo.builder.signal.Signal([1])
    model.add_op(nengo.builder.operator.DotInc(sig, sig2, sig3))
    model.add_op(nengo.builder.operator.DotInc(sig, sig2, sig3))

    tg = tensor_graph.TensorGraph(model, None, None, tf.float32, 1, None,
                                  utils.NullProgressBar())

    assert len(tg.plan) == (2 if config_planner else 1)
コード例 #3
0
    def call(self, inputs, training=None, progress=None, stateful=False):
        """
        Constructs the graph elements to simulate the model.

        Parameters
        ----------
        inputs : list of ``tf.Tensor``
            Input layers/tensors for the network (must match the structure defined in
            `.build_inputs`).
        training : bool
            Whether the network is being run in training or inference mode.  If None,
            uses the symbolic Keras learning phase variable.
        progress : `.utils.ProgressBar`
            Progress bar for construction stage.
        stateful : bool
            Whether or not to build the model to support preserving the internal state
            between executions.

        Returns
        -------
        probe_arrays : list of ``tf.Tensor``
            Tensors representing the output of all the Probes in the network (order
            corresponding to ``self.model.probes``, which is the order the Probes were
            instantiated).
        """

        super().call(inputs, training=training)

        if training == 1 and self.inference_only:
            raise BuildError(
                "TensorGraph was created with inference_only=True; cannot be built "
                "with training=%s" % training)

        tf.random.set_seed(self.seed)

        if progress is None:
            progress = utils.NullProgressBar()

        # reset signaldict
        self.signals.reset()

        # create these constants once here for reuse in different operators
        self.signals.dt = tf.constant(self.dt, self.dtype)
        self.signals.dt_val = self.dt  # store the actual value as well
        self.signals.zero = tf.constant(0, self.dtype)
        self.signals.one = tf.constant(1, self.dtype)

        # set up invariant inputs
        with trackable.no_automatic_dependency_tracking_scope(self):
            self.node_inputs = {}
        for n, inp in zip(self.invariant_inputs, inputs):
            # specify shape of inputs (keras sometimes loses this shape information)
            inp.set_shape([self.minibatch_size, inp.shape[1], n.size_out])

            self.node_inputs[n] = inp

        self.steps_to_run = inputs[-1][0, 0]

        # initialize op builder
        build_config = builder.BuildConfig(
            inference_only=self.inference_only,
            lif_smoothing=config.get_setting(self.model, "lif_smoothing"),
            cpu_only=self.device == "/cpu:0" or not utils.tf_gpu_installed,
            rng=np.random.RandomState(self.seed),
            training=(tf.keras.backend.learning_phase()
                      if training is None else training),
        )
        self.op_builder = builder.Builder(self.plan, self.signals,
                                          build_config)

        # pre-build stage
        with progress.sub("pre-build stage", max_value=len(self.plan)) as sub:
            self.op_builder.build_pre(sub)

        # build stage
        with progress.sub("build stage",
                          max_value=len(self.plan) * self.unroll) as sub:
            steps_run, probe_arrays, final_internal_state, final_base_params = (
                self._build_loop(sub)
                if self.use_loop else self._build_no_loop(sub))

        # store these so that they can be accessed after the initial build
        with trackable.no_automatic_dependency_tracking_scope(self):
            self.steps_run = steps_run
            self.probe_arrays = probe_arrays
            self.final_internal_state = final_internal_state
            self.final_base_params = final_base_params

        # logging
        logger.info("Number of reads: %d",
                    sum(x for x in self.signals.read_types.values()))
        for x in self.signals.read_types.items():
            logger.info("    %s: %d", *x)
        logger.info("Number of writes: %d",
                    sum(x for x in self.signals.write_types.values()))
        for x in self.signals.write_types.items():
            logger.info("    %s: %d", *x)

        # note: always return steps_run so that the simulation will run for the given
        # number of steps, even if there are no output probes
        outputs = list(probe_arrays.values()) + [steps_run]

        updates = []
        if stateful:
            # update saved state
            updates.extend(
                var.assign(val) for var, val in zip(self.saved_state.values(),
                                                    final_internal_state))

        # if any of the base params have changed (due to online learning rules) then we
        # also need to assign those back to the original variable (so that their
        # values will persist). any parameters targeted by online learning rules
        # will be minibatched, so we only need to update the minibatched params.
        for (key, var), val in zip(self.base_params.items(),
                                   final_base_params):
            try:
                minibatched = self.base_arrays_init["non_trainable"][key][-1]
            except KeyError:
                minibatched = self.base_arrays_init["trainable"][key][-1]

            if minibatched:
                updates.append(var.assign(val))

        logger.info("Number of variable updates: %d", len(updates))

        if len(updates) > 0:
            with tf.control_dependencies(updates):
                outputs = [tf.identity(x) for x in outputs]

        return outputs
コード例 #4
0
def test_mark_signals_config():
    with nengo.Network() as net:
        config.configure_settings(trainable=None)
        net.config[nengo.Ensemble].trainable = False

        with nengo.Network():
            # check that object in subnetwork inherits config from parent
            ens0 = nengo.Ensemble(10, 1, label="ens0")

            # check that ens.neurons can be set independent of ens
            net.config[ens0.neurons].trainable = True

            with nengo.Network():
                with nengo.Network():
                    # check that subnetworks can override parent configs
                    config.configure_settings(trainable=True)
                    ens1 = nengo.Ensemble(10, 1, label="ens1")

                    with nengo.Network():
                        # check that subnetworks inherit the trainable settings
                        # from parent networks
                        ens3 = nengo.Ensemble(10, 1, label="ens3")

            # check that instances can be set independent of class
            ens2 = nengo.Ensemble(10, 1, label="ens2")
            net.config[ens2].trainable = True

    model = nengo.builder.Model()
    model.build(net)

    progress = utils.NullProgressBar()

    tg = tensor_graph.TensorGraph(model, None, None, 1, None, progress, None)
    tg.mark_signals()

    assert not model.sig[ens0]["encoders"].trainable
    assert model.sig[ens0.neurons]["bias"].trainable

    assert model.sig[ens1]["encoders"].trainable

    assert model.sig[ens2]["encoders"].trainable

    assert model.sig[ens3]["encoders"].trainable

    # check that learning rule connections can be manually set to True
    with nengo.Network() as net:
        config.configure_settings(trainable=None)

        a = nengo.Ensemble(10, 1)
        b = nengo.Ensemble(10, 1)
        conn0 = nengo.Connection(a, b, learning_rule_type=nengo.PES())
        net.config[conn0].trainable = True

    model = nengo.builder.Model()
    model.build(net)

    tg = tensor_graph.TensorGraph(model, None, None, 1, None, progress, None)
    with pytest.warns(UserWarning):
        tg.mark_signals()

    assert model.sig[conn0]["weights"].trainable

    with nengo.Network() as net:
        config.configure_settings(trainable=None)

        a = nengo.Node([0])
        ens = nengo.Ensemble(10, 1)
        nengo.Connection(a, ens, learning_rule_type=nengo.Voja())
        net.config[nengo.Ensemble].trainable = True

    model = nengo.builder.Model()
    model.build(net)

    tg = tensor_graph.TensorGraph(model, None, None, 1, None, progress, None)
    with pytest.warns(UserWarning):
        tg.mark_signals()

    assert model.sig[ens]["encoders"].trainable

    # check that models with no toplevel work
    sig = nengo.builder.signal.Signal([0])
    op = nengo.builder.operator.Reset(sig, 1)
    model = nengo.builder.Model()
    model.add_op(op)

    tg = tensor_graph.TensorGraph(model, None, None, 1, None, progress, None)
    with pytest.warns(UserWarning):
        tg.mark_signals()

    assert not sig.trainable