def test_tensor_layer(Simulator):
    with nengo.Network() as net:
        inp = nengo.Node(np.arange(12))

        # check that connection arguments work
        layer0 = tensor_layer(inp, tf.identity, transform=2)

        assert isinstance(layer0, TensorNode)
        p0 = nengo.Probe(layer0)

        # check that arguments are passed to layer function
        layer1 = tensor_layer(layer0,
                              lambda x, axis: tf.reduce_sum(x, axis=axis),
                              axis=1,
                              shape_in=(2, 6))
        assert layer1.size_out == 6
        p1 = nengo.Probe(layer1)

        # check that ensemble layers work
        layer2 = tensor_layer(layer1,
                              nengo.RectifiedLinear(),
                              gain=[1] * 6,
                              bias=[-20] * 6)
        assert isinstance(layer2, nengo.ensemble.Neurons)
        assert np.allclose(layer2.ensemble.gain, 1)
        assert np.allclose(layer2.ensemble.bias, -20)
        p2 = nengo.Probe(layer2)

        # check that size_in can be inferred from transform
        layer3 = tensor_layer(layer2, lambda x: x, transform=np.ones((1, 6)))
        assert layer3.size_in == 1

        # check that size_in can be inferred from shape_in
        layer4 = tensor_layer(layer3,
                              lambda x: x,
                              transform=nengo.dists.Uniform(-1, 1),
                              shape_in=(2, ))
        assert layer4.size_in == 2

    with Simulator(net, minibatch_size=2) as sim:
        sim.step()

    x = np.arange(12) * 2
    assert np.allclose(sim.data[p0], x)

    x = np.sum(np.reshape(x, (2, 6)), axis=0)
    assert np.allclose(sim.data[p1], x)

    x = np.maximum(x - 20, 0)
    assert np.allclose(sim.data[p2], x)
Exemple #2
0
 def convert_dense(self, model, pre_layer, input_shape, index,
                   onnx_model_graph):
     onnx_model_graph_node = onnx_model_graph.node
     node_info = onnx_model_graph_node[index]
     dense_num = self.get_dense_num(node_info, onnx_model_graph)
     neuron_type = self.get_neuronType(index, onnx_model_graph_node)
     with model:
         x = nengo_dl.tensor_layer(pre_layer,
                                   tf.layers.dense,
                                   units=dense_num)
         if neuron_type != "softmax":
             if neuron_type == "lif":
                 x = nengo_dl.tensor_layer(
                     x, nengo.LIF(amplitude=self.amplitude))
             elif neuron_type == "lifrate":
                 x = nengo_dl.tensor_layer(
                     x, nengo.LIFRate(amplitude=self.amplitude))
             elif neuron_type == "adaptivelif":
                 x = nengo_dl.tensor_layer(
                     x, nengo.AdaptiveLIF(amplitude=self.amplitude))
             elif neuron_type == "adaptivelifrate":
                 x = nengo_dl.tensor_layer(
                     x, nengo.AdaptiveLIFRate(amplitude=self.amplitude))
             elif neuron_type == "izhikevich":
                 x = nengo_dl.tensor_layer(
                     x, nengo.Izhikevich(amplitude=self.amplitude))
             elif neuron_type == "softlifrate":
                 x = nengo_dl.tensor_layer(
                     x,
                     nengo_dl.neurons.SoftLIFRate(amplitude=self.amplitude))
             elif neuron_type == None:  #default neuron_type = LIF
                 x = nengo_dl.tensor_layer(
                     x, nengo.LIF(amplitude=self.amplitude))
     output_shape = [dense_num, 1]
     return model, output_shape, x
def test_tensor_layer(Simulator):
    with nengo.Network() as net:
        inp = nengo.Node(np.arange(12))

        # check that connection arguments work
        layer0 = tensor_layer(inp, tf.identity, transform=2)

        assert isinstance(layer0, TensorNode)
        p0 = nengo.Probe(layer0)

        # check that arguments are passed to layer function
        layer1 = tensor_layer(
            layer0, lambda x, axis: tf.reduce_sum(x, axis=axis), axis=1,
            shape_in=(2, 6))
        assert layer1.size_out == 6
        p1 = nengo.Probe(layer1)

        # check that ensemble layers work
        layer2 = tensor_layer(layer1, nengo.RectifiedLinear(), gain=[1] * 6,
                              bias=[-20] * 6)
        assert isinstance(layer2, nengo.ensemble.Neurons)
        assert np.allclose(layer2.ensemble.gain, 1)
        assert np.allclose(layer2.ensemble.bias, -20)
        p2 = nengo.Probe(layer2)

        # check that size_in can be inferred from transform
        layer3 = tensor_layer(layer2, lambda x: x,
                              transform=np.ones((1, 6)))
        assert layer3.size_in == 1

        # check that size_in can be inferred from shape_in
        layer4 = tensor_layer(
            layer3, lambda x: x, transform=nengo.dists.Uniform(-1, 1),
            shape_in=(2,))
        assert layer4.size_in == 2

    with Simulator(net, minibatch_size=2) as sim:
        sim.step()

    x = np.arange(12) * 2
    assert np.allclose(sim.data[p0], x)

    x = np.sum(np.reshape(x, (2, 6)), axis=0)
    assert np.allclose(sim.data[p1], x)

    x = np.maximum(x - 20, 0)
    assert np.allclose(sim.data[p2], x)
Exemple #4
0
 def convert_flatten(self, model, pre_layer, input_shape):
     with model:
         x = nengo_dl.tensor_layer(pre_layer, tf.layers.flatten)
     output_shape = 1
     for index in range(len(input_shape)):
         output_shape *= input_shape[index]
     output_shape = [output_shape, 1]
     return model, output_shape, x
Exemple #5
0
 def convert_conv2d(self, model, pre_layer, input_shape, index,
                    onnx_model_graph):
     onnx_model_graph_node = onnx_model_graph.node
     node_info = onnx_model_graph_node[index]
     neuron_type = self.get_neuronType(index, onnx_model_graph_node)
     filters = self.get_filterNum(node_info, onnx_model_graph)
     for index in range(len(node_info.attribute)):
         if node_info.attribute[index].name == "kernel_shape":
             kernel_size = node_info.attribute[index].ints[0]
         elif node_info.attribute[index].name == "strides":
             strides = node_info.attribute[index].ints[0]
         elif node_info.attribute[index].name == "auto_pad":
             padding = node_info.attribute[index].s.decode('ascii').lower()
             if padding != "valid":
                 padding = "same"
     if padding == "same":
         output_shape = [input_shape[0], input_shape[1], filters]
     else:
         output_shape = [
             int((input_shape[0] - kernel_size) / strides + 1),
             int((input_shape[1] - kernel_size) / strides + 1), filters
         ]
     with model:
         x = nengo_dl.tensor_layer(pre_layer,
                                   tf.layers.conv2d,
                                   shape_in=(input_shape[0], input_shape[1],
                                             input_shape[2]),
                                   filters=filters,
                                   kernel_size=kernel_size,
                                   padding=padding)
         if neuron_type == "lif":
             x = nengo_dl.tensor_layer(x,
                                       nengo.LIF(amplitude=self.amplitude))
         elif neuron_type == "lifrate":
             x = nengo_dl.tensor_layer(
                 x, nengo.LIFRate(amplitude=self.amplitude))
         elif neuron_type == "adaptivelif":
             x = nengo_dl.tensor_layer(
                 x, nengo.AdaptiveLIF(amplitude=self.amplitude))
         elif neuron_type == "adaptivelifrate":
             x = nengo_dl.tensor_layer(
                 x, nengo.AdaptiveLIFRate(amplitude=self.amplitude))
         elif neuron_type == "izhikevich":
             x = nengo_dl.tensor_layer(
                 x, nengo.Izhikevich(amplitude=self.amplitude))
         elif neuron_type == "softlifrate":
             x = nengo_dl.tensor_layer(
                 x, nengo_dl.neurons.SoftLIFRate(amplitude=self.amplitude))
         elif neuron_type == None:  #default neuron_type = LIF
             x = nengo_dl.tensor_layer(x,
                                       nengo.LIF(amplitude=self.amplitude))
     return model, output_shape, x
Exemple #6
0
def test_tensor_layer_deprecation(Simulator):
    with nengo.Network() as net:
        inp = nengo.Node([0])
        with pytest.warns(DeprecationWarning, match="nengo_dl.Layer instead"):
            out = tensor_layer(inp, lambda x: x + 1)
        p = nengo.Probe(out)

    with Simulator(net) as sim:
        sim.run_steps(5)

    assert np.allclose(sim.data[p], 1)
Exemple #7
0
    def _build(self, num_classes, num_layers, num_filters, kernel_sizes):
        channel_each_layer = ([1] + num_filters)
        with nengo.Network() as net:
            # set some default parameters for the neurons that will make
            # the training progress more smoothly
            net.config[nengo.Ensemble].max_rates = nengo.dists.Choice([100])
            net.config[nengo.Ensemble].intercepts = nengo.dists.Choice([0])
            neuron_type = nengo.LIF(amplitude = 0.01)

            # init state, make neural trainable
            nengo_dl.configure_settings(trainable = True)

            inp = nengo.Node([0] * self.input_size[0] * self.input_size[1])

            x = nengo_dl.tensor_layer(inp, tf.layers.conv2d,
                    shape_in = (self.input_size[0], self.input_size[1], channel_each_layer[0]),
                    filters = num_filters[0], kernel_size = kernel_sizes[0], padding = 'same')
            x = nengo_dl.tensor_layer(x, neuron_type)

            for i in range(1, num_layers):
                x = nengo_dl.tensor_layer(x, tf.layers.conv2d,
                        shape_in = (self.input_size[0], self.input_size[1], channel_each_layer[i]),
                        filters = num_filters[i], kernel_size = kernel_sizes[i], padding = 'same')
                x = nengo_dl.tensor_layer(x, neuron_type)

            x = nengo_dl.tensor_layer(x, tf.layers.average_pooling2d,
                    shape_in = (self.input_size[0], self.input_size[1], channel_each_layer[-1]),
                    pool_size = (self.input_size[0], self.input_size[1]),
                    strides = (self.input_size[0], self.input_size[1]))

            x = nengo_dl.tensor_layer(x, tf.layers.dense, units = num_classes)
            out_p = nengo.Probe(x)
            out_p_filt = nengo.Probe(x, synapse = self.synapse)
            return inp, out_p, out_p_filt, net
    def build_network(neuron_type, ens_params):
        with nengo.Network() as net:
            nengo_dl.configure_settings(trainable=False)

            inp = nengo.Node([0] * 28 * 28)

            x = nengo_dl.tensor_layer(inp,
                                      tf.layers.conv2d,
                                      shape_in=(28, 28, 1),
                                      filters=32,
                                      kernel_size=3)
            x = nengo_dl.tensor_layer(x, neuron_type, **ens_params)

            x = nengo_dl.tensor_layer(x,
                                      tf.layers.conv2d,
                                      shape_in=(26, 26, 32),
                                      filters=64,
                                      kernel_size=3)
            x = nengo_dl.tensor_layer(x, neuron_type, **ens_params)

            x = nengo_dl.tensor_layer(x,
                                      tf.layers.average_pooling2d,
                                      shape_in=(24, 24, 64),
                                      pool_size=2,
                                      strides=2)

            x = nengo_dl.tensor_layer(x,
                                      tf.layers.conv2d,
                                      shape_in=(12, 12, 64),
                                      filters=128,
                                      kernel_size=3)
            x = nengo_dl.tensor_layer(x, neuron_type, **ens_params)

            x = nengo_dl.tensor_layer(x,
                                      tf.layers.average_pooling2d,
                                      shape_in=(10, 10, 128),
                                      pool_size=2,
                                      strides=2)

            x = nengo_dl.tensor_layer(x, tf.layers.dense, units=10)

        return net, inp, x
Exemple #9
0
def test_tensor_layer(Simulator):
    with nengo.Network() as net:
        inp = nengo.Node(np.arange(12))

        layer0 = tensor_layer(inp, tf.identity, transform=2)

        assert isinstance(layer0, TensorNode)
        p0 = nengo.Probe(layer0)

        layer1 = tensor_layer(layer0,
                              lambda x, axis: tf.reduce_sum(x, axis=axis),
                              axis=1,
                              shape_in=(2, 6))
        assert layer1.size_out == 6
        p1 = nengo.Probe(layer1)

        layer2 = tensor_layer(layer1,
                              nengo.RectifiedLinear(),
                              gain=[1] * 6,
                              bias=[-20] * 6)
        assert isinstance(layer2, nengo.ensemble.Neurons)
        assert np.allclose(layer2.ensemble.gain, 1)
        assert np.allclose(layer2.ensemble.bias, -20)
        p2 = nengo.Probe(layer2)

    with Simulator(net, minibatch_size=2) as sim:
        sim.step()

    x = np.arange(12) * 2
    assert np.allclose(sim.data[p0], x)

    x = np.sum(np.reshape(x, (2, 6)), axis=0)
    assert np.allclose(sim.data[p1], x)

    x = np.maximum(x - 20, 0)
    assert np.allclose(sim.data[p2], x)
Exemple #10
0
 def convert_avgpool2d(self, model, pre_layer, input_shape, node_info):
     for index in range(len(node_info.attribute)):
         if node_info.attribute[index].name == "kernel_shape":
             pool_size = node_info.attribute[index].ints[0]
         elif node_info.attribute[index].name == "strides":
             strides = node_info.attribute[index].ints[0]
     output_shape = [
         int(input_shape[0] / strides),
         int(input_shape[1] / strides), input_shape[2]
     ]
     with model:
         x = nengo_dl.tensor_layer(pre_layer,
                                   tf.layers.average_pooling2d,
                                   shape_in=(input_shape[0], input_shape[1],
                                             input_shape[2]),
                                   pool_size=pool_size,
                                   strides=strides)
     return model, output_shape, x
def test_check_gradients_error(Simulator):
    # check_gradients detects nans in gradient
    with nengo.Network() as net:
        x = nengo.Node([0])
        y = tensor_layer(x, lambda x: 1 / x)
        nengo.Probe(y)

    with Simulator(net) as sim:
        with pytest.raises(SimulationError):
            sim.check_gradients()

    # check_gradients detects errors in gradient (in this case caused by the
    # fact that nengo.Alpha doesn't have a TensorFlow implementation)
    with nengo.Network() as net:
        x = nengo.Node([0])
        nengo.Probe(x, synapse=nengo.Alpha(0.1))

    with Simulator(net) as sim:
        with pytest.raises(SimulationError):
            sim.check_gradients()
Exemple #12
0
 def convert_batchnormalization2d(self, model, pre_layer, input_shape,
                                  node_info):
     for index in range(len(node_info.attribute)):
         if node_info.attribute[index].name == "momentum":
             momentum = round(node_info.attribute[index].f, 4)
             if momentum == 0:
                 momentum = 0.99
         elif node_info.attribute[index].name == "epsilon":
             epsilon = round(node_info.attribute[index].f, 4)
             if epsilon == 0:
                 epsilon = 0.001
     with model:
         x = nengo_dl.tensor_layer(pre_layer,
                                   tf.layers.batch_normalization,
                                   shape_in=(input_shape[0], input_shape[1],
                                             input_shape[2]),
                                   momentum=momentum,
                                   epsilon=epsilon)
     output_shape = input_shape
     return model, output_shape, x
Exemple #13
0
    # we'll make all the nengo objects in the network
    # non-trainable. we could train them if we wanted, but they don't
    # add any representational power. note that this doesn't affect
    # the internal components of tensornodes, which will always be
    # trainable or non-trainable depending on the code written in
    # the tensornode.
    nengo_dl.configure_settings(trainable=False)

    # the input node that will be used to feed in input images
    inp = nengo.Node([0] * 64 * 64)

    # add the first convolutional layer
    x = nengo_dl.tensor_layer(inp,
                              tf.layers.conv2d,
                              shape_in=(64, 64, 1),
                              filters=32,
                              kernel_size=3)

    # apply the neural nonlinearity
    x = nengo_dl.tensor_layer(x, neuron_type)

    # add another convolutional layer
    x = nengo_dl.tensor_layer(x,
                              tf.layers.conv2d,
                              shape_in=(62, 62, 32),
                              filters=64,
                              kernel_size=3)
    x = nengo_dl.tensor_layer(x, neuron_type)

    # add a pooling layer
Exemple #14
0
    def _build_net(self):
        with nengo.Network() as net:
            # set some default parameters for the neurons that will make
            # the training progress more smoothly
            net.config[nengo.Ensemble].max_rates = nengo.dists.Choice([100])
            net.config[nengo.Ensemble].intercepts = nengo.dists.Choice([0])
            neuron_type = nengo.LIF(amplitude=0.01)

            # we'll make all the nengo objects in the network
            # non-trainable. we could train them if we wanted, but they don't
            # add any representational power. note that this doesn't affect
            # the internal components of tensornodes, which will always be
            # trainable or non-trainable depending on the code written in
            # the tensornode.
            nengo_dl.configure_settings(trainable=False)

            # the input node that will be used to feed in input images
            self.inp = nengo.Node([0] * 28 * 28, label='input')

            # add the first convolutional layer
            x = nengo_dl.tensor_layer(self.inp,
                                      tf.layers.conv2d,
                                      shape_in=(28, 28, 1),
                                      filters=32,
                                      kernel_size=3)
            x.label = 'conv1_pre'
            x.conn_type = 'conv2d'
            x = nengo_dl.tensor_layer(x, tf.identity)
            x.label = 'conv1'
            x.conn_type = 'identity'

            # apply the neural nonlinearity
            x = nengo_dl.tensor_layer(x, neuron_type)

            # add another convolutional layer
            x = nengo_dl.tensor_layer(x,
                                      tf.layers.conv2d,
                                      shape_in=(26, 26, 32),
                                      filters=64,
                                      kernel_size=3)
            x.label = 'conv2_pre'
            x.conn_type = 'conv2d'
            x = nengo_dl.tensor_layer(x, tf.identity)
            x.label = 'conv2'
            x.conn_type = 'identity'
            x = nengo_dl.tensor_layer(x, neuron_type)

            # add a pooling layer
            x = nengo_dl.tensor_layer(x,
                                      tf.layers.average_pooling2d,
                                      shape_in=(24, 24, 64),
                                      pool_size=2,
                                      strides=2)
            x.label = 'pool1_pre'
            x.conn_type = 'average_pooling2d'
            x = nengo_dl.tensor_layer(x, tf.identity)
            x.label = 'pool1'
            x.conn_type = 'identity'

            # another convolutional layer
            x = nengo_dl.tensor_layer(x,
                                      tf.layers.conv2d,
                                      shape_in=(12, 12, 64),
                                      filters=128,
                                      kernel_size=3)
            x.label = 'conv3_pre'
            x.conn_type = 'conv2d'
            x = nengo_dl.tensor_layer(x, tf.identity)
            x.label = 'conv3'
            x.conn_type = 'identity'
            x = nengo_dl.tensor_layer(x, neuron_type)
            x.label = 'conv3_act'

            # another pooling layer
            x = nengo_dl.tensor_layer(x,
                                      tf.layers.average_pooling2d,
                                      shape_in=(10, 10, 128),
                                      pool_size=2,
                                      strides=2)
            x.label = 'pool2_pre'
            x.conn_type = 'average_pooling2d'
            x = nengo_dl.tensor_layer(x, tf.identity)
            x.label = 'pool2'
            x.conn_type = 'identity'

            # linear readout
            x = nengo_dl.tensor_layer(x, tf.layers.dense, units=10)
            x.label = 'output_pre'
            x.conn_type = 'dense'
            x = nengo_dl.tensor_layer(
                x,
                tf.identity)  # TODO maybe use nengo passthrough nodes instead?
            x.label = 'output'
            # x = nengo_dl.tensor_layer(x, tf.identity)
            # x.label = 'output_id'

            # x = x + stim
            self.stim = nengo.Node([0] * self.action_space_size, label='stim')
            # self.stim_conns = attach_stim(self.stim, x, (np.arange(10), np.arange(15)))

            # we'll create two different output probes, one with a filter
            # (for when we're simulating the network over time and
            # accumulating spikes), and one without (for when we're
            # training the network using a rate-based approximation)
            self.out_p = nengo.Probe(x)
            self.out_p_filt = nengo.Probe(x, synapse=0.1)

            # probe stimulated sites
            net_nodes = [node.label for node in net.nodes]
            for node in self.attach_at.keys():
                self.probes.append(
                    nengo.Probe(net.nodes[net_nodes.index(node)]))

        return net
Exemple #15
0
def mnist(use_tensor_layer=True):
    """
    A network designed to stress-test tensor layers (based on mnist net).

    Parameters
    ----------
    use_tensor_layer : bool
        If True, use individual tensor_layers to build the network, as opposed
        to a single TensorNode containing all layers.

    Returns
    -------
    net : `nengo.Network`
        benchmark network
    """

    with nengo.Network() as net:
        # create node to feed in images
        net.inp = nengo.Node(np.ones(28 * 28))

        if use_tensor_layer:
            nengo_nl = nengo.RectifiedLinear()

            ensemble_params = dict(max_rates=nengo.dists.Choice([100]),
                                   intercepts=nengo.dists.Choice([0]))
            amplitude = 1
            synapse = None

            x = nengo_dl.tensor_layer(net.inp,
                                      tf.layers.conv2d,
                                      shape_in=(28, 28, 1),
                                      filters=32,
                                      kernel_size=3)
            x = nengo_dl.tensor_layer(x, nengo_nl, **ensemble_params)

            x = nengo_dl.tensor_layer(x,
                                      tf.layers.conv2d,
                                      shape_in=(26, 26, 32),
                                      transform=amplitude,
                                      filters=32,
                                      kernel_size=3)
            x = nengo_dl.tensor_layer(x, nengo_nl, **ensemble_params)

            x = nengo_dl.tensor_layer(x,
                                      tf.layers.average_pooling2d,
                                      shape_in=(24, 24, 32),
                                      synapse=synapse,
                                      transform=amplitude,
                                      pool_size=2,
                                      strides=2)

            x = nengo_dl.tensor_layer(x, tf.layers.dense, units=128)
            x = nengo_dl.tensor_layer(x, nengo_nl, **ensemble_params)

            x = nengo_dl.tensor_layer(x,
                                      tf.layers.dropout,
                                      rate=0.4,
                                      transform=amplitude)

            x = nengo_dl.tensor_layer(x, tf.layers.dense, units=10)
        else:
            nl = tf.nn.relu

            # def softlif_layer(x, sigma=1, tau_ref=0.002, tau_rc=0.02,
            #                   amplitude=1):
            #     # x -= 1
            #     z = tf.nn.softplus(x / sigma) * sigma
            #     z += 1e-10
            #     rates = amplitude / (tau_ref + tau_rc * tf.log1p(1 / z))
            #     return rates

            @nengo_dl.reshaped((28, 28, 1))
            def mnist_node(_, x):  # pragma: no cover
                x = tf.layers.conv2d(x,
                                     filters=32,
                                     kernel_size=3,
                                     activation=nl)
                x = tf.layers.conv2d(x,
                                     filters=32,
                                     kernel_size=3,
                                     activation=nl)
                x = tf.layers.average_pooling2d(x, pool_size=2, strides=2)
                x = tf.contrib.layers.flatten(x)
                x = tf.layers.dense(x, 128, activation=nl)
                x = tf.layers.dropout(x, rate=0.4)
                x = tf.layers.dense(x, 10)

                return x

            node = nengo_dl.TensorNode(mnist_node,
                                       size_in=28 * 28,
                                       size_out=10)
            x = node
            nengo.Connection(net.inp, node, synapse=None)

        net.p = nengo.Probe(x)

    return net
Exemple #16
0
def build_network(neuron_type):
    with nengo.Network() as net:
        # we'll make all the nengo objects in the network
        # non-trainable. we could train them if we wanted, but they don't
        # add any representational power so we can save some computation
        # by ignoring them. note that this doesn't affect the internal
        # components of tensornodes, which will always be trainable or
        # non-trainable depending on the code written in the tensornode.
        nengo_dl.configure_settings(trainable=False)

        # the input node that will be used to feed in input images
        inp = nengo.Node(nengo.processes.PresentInput(mnist.test.images, 0.2))

        # add the first convolutional layer
        x = nengo_dl.tensor_layer(inp,
                                  tf.layers.conv2d,
                                  shape_in=(28, 28, 1),
                                  filters=32,
                                  kernel_size=3)

        # apply the neural nonlinearity
        x = nengo_dl.tensor_layer(x, neuron_type, **ens_params)

        # add another convolutional layer
        x = nengo_dl.tensor_layer(x,
                                  tf.layers.conv2d,
                                  shape_in=(26, 26, 32),
                                  filters=16,
                                  kernel_size=3)
        x = nengo_dl.tensor_layer(x, neuron_type, **ens_params)
        # apply the neural nonlinearity
        x = nengo_dl.tensor_layer(x, neuron_type, **ens_params)

        #       add another convolutional layer
        x = nengo_dl.tensor_layer(x,
                                  tf.layers.conv2d,
                                  shape_in=(24, 24, 16),
                                  filters=8,
                                  kernel_size=3)
        #         add a pooling layer
        x = nengo_dl.tensor_layer(x,
                                  tf.layers.max_pooling2d,
                                  shape_in=(22, 22, 8),
                                  pool_size=2,
                                  strides=2)
        # add a pooling layer
        x = nengo_dl.tensor_layer(x,
                                  tf.layers.max_pooling2d,
                                  shape_in=(11, 11, 8),
                                  pool_size=2,
                                  strides=2)
        # add a dense layer, with neural nonlinearity.
        # note that for all-to-all connections like this we can use the
        # normal nengo connection transform to implement the weights
        # (instead of using a separate tensor_layer). we'll use a
        # Glorot uniform distribution to initialize the weights.
        x, conn = nengo_dl.tensor_layer(x,
                                        neuron_type,
                                        **ens_params,
                                        transform=nengo_dl.dists.Glorot(),
                                        shape_in=(128, ),
                                        return_conn=True)
        # we need to set the weights and biases to be trainable
        # (since we set the default to be trainable=False)
        # note: we used return_conn=True above so that we could access
        # the connection object for this reason.
        net.config[x].trainable = True
        net.config[conn].trainable = True

        # add a dropout layer
        x = nengo_dl.tensor_layer(x, tf.layers.dropout, rate=0.3)

        # the final 10 dimensional class output
        x = nengo_dl.tensor_layer(x, tf.layers.dense, units=10)

    return net, inp, x
def spiking_mnist(ctx, n_epochs):
    load = ctx.obj["load"]
    reps = ctx.obj["reps"]

    neuron_type = nengo.LIF(amplitude=0.01)
    ens_params = dict(max_rates=nengo.dists.Choice([100]),
                      intercepts=nengo.dists.Choice([0]))
    minibatch_size = 200
    n_steps = 50

    with nengo.Network() as net:
        nengo_dl.configure_settings(trainable=False)

        inp = nengo.Node([0] * 28 * 28)

        x = nengo_dl.tensor_layer(inp,
                                  tf.layers.conv2d,
                                  shape_in=(28, 28, 1),
                                  filters=32,
                                  kernel_size=3)
        x = nengo_dl.tensor_layer(x, neuron_type, **ens_params)

        x = nengo_dl.tensor_layer(x,
                                  tf.layers.conv2d,
                                  shape_in=(26, 26, 32),
                                  filters=64,
                                  kernel_size=3)
        x = nengo_dl.tensor_layer(x, neuron_type, **ens_params)

        x = nengo_dl.tensor_layer(x,
                                  tf.layers.average_pooling2d,
                                  shape_in=(24, 24, 64),
                                  pool_size=2,
                                  strides=2)

        x = nengo_dl.tensor_layer(x,
                                  tf.layers.conv2d,
                                  shape_in=(12, 12, 64),
                                  filters=128,
                                  kernel_size=3)
        x = nengo_dl.tensor_layer(x, neuron_type, **ens_params)

        x = nengo_dl.tensor_layer(x,
                                  tf.layers.average_pooling2d,
                                  shape_in=(10, 10, 128),
                                  pool_size=2,
                                  strides=2)

        out = nengo_dl.tensor_layer(x, tf.layers.dense, units=10)

        out_p = nengo.Probe(out)
        spk_out_p = nengo.Probe(out, synapse=0.1)

    if load:
        with open("spiking_mnist_data_saved.pkl", "rb") as f:
            results = pickle.load(f)
    else:
        results = {"pre": [], "post": [], "spiking": []}

    urlretrieve("http://deeplearning.net/data/mnist/mnist.pkl.gz",
                "mnist.pkl.gz")
    with gzip.open("mnist.pkl.gz") as f:
        train_data, _, test_data = pickle.load(f, encoding="latin1")
    train_data = list(train_data)
    test_data = list(test_data)
    for data in (train_data, test_data):
        one_hot = np.zeros((data[0].shape[0], 10))
        one_hot[np.arange(data[0].shape[0]), data[1]] = 1
        data[1] = one_hot

    train_data = {
        inp: train_data[0][:, None, :],
        out_p: train_data[1][:, None, :]
    }
    test_data = {
        inp: test_data[0][:, None, :],
        out_p: test_data[1][:, None, :]
    }
    test_data_time = {
        inp: np.tile(test_data[inp], (1, n_steps, 1)),
        spk_out_p: np.tile(test_data[out_p], (1, n_steps, 1))
    }

    for _ in range(reps):
        # construct the simulator
        with nengo_dl.Simulator(net, minibatch_size=minibatch_size) as sim:

            def objective(x, y):
                return tf.nn.softmax_cross_entropy_with_logits_v2(logits=x,
                                                                  labels=y)

            opt = tf.train.RMSPropOptimizer(learning_rate=0.001)

            def classification_error(outputs, targets):
                return 100 * tf.reduce_mean(
                    tf.cast(
                        tf.not_equal(tf.argmax(outputs[:, -1], axis=-1),
                                     tf.argmax(targets[:, -1], axis=-1)),
                        tf.float32))

            # collect error before training
            results["pre"].append(
                sim.loss(test_data, {out_p: classification_error},
                         training=True))
            print("error before training: %.2f%%" % results["pre"][-1])

            # run training
            sim.train(train_data,
                      opt,
                      objective={out_p: objective},
                      n_epochs=n_epochs)

            # collect error after training
            results["post"].append(
                sim.loss(test_data, {out_p: classification_error},
                         training=True))
            print("error after training: %.2f%%" % results["post"][-1])

            # collect spiking error
            results["spiking"].append(
                sim.loss(test_data_time, {spk_out_p: classification_error},
                         training=False))
            print("spiking neuron error: %.2f%%" % results["spiking"][-1])

        with open("spiking_mnist_data.pkl", "wb") as f:
            pickle.dump(results, f)

    print("pre", bootstrap_ci(results["pre"]))
    print("post", bootstrap_ci(results["post"]))
    print("spiking", bootstrap_ci(results["spiking"]))
    sim.run(1.0, data={a: np.random.randn(10, 1000, 1)})
    print(sim.data[p])

# snippet 5 (section 3.2)
import tensorflow as tf

inputs = np.random.randn(50, 1000, 1)
targets = inputs**2

with nengo_dl.Simulator(net, minibatch_size=10) as sim:
    sim.train(
        data={a: inputs,
              p: targets},
        optimizer=tf.train.AdamOptimizer(),
        n_epochs=2,
        objective={p: nengo_dl.objectives.mse})

# snippet 6 (section 3.3)
with net:
    def tensor_func(time, x):
        return tf.layers.dense(x, 100, activation=tf.nn.relu)
    t = nengo_dl.TensorNode(tensor_func, size_in=1)
    nengo.Connection(a, t)
    nengo.Connection(t, b.neurons)

# snippet 7 (section 3.3)
with net:
    t = nengo_dl.tensor_layer(a, tf.layers.dense, units=100,
                              activation=tf.nn.relu)
    nengo.Connection(t, b.neurons)
# num_test, _, _, _ =  test_features.shape
# num_classes = len(np.unique(train_labels))

# #pre-processing 
# train_features = train_features.astype('float32')/255
# test_features = test_features.astype('float32')/255
# # convert class labels to binary class labels
# train_labels = np_utils.to_categorical(train_labels, num_classes)
# test_labels = np_utils.to_categorical(test_labels, num_classes)

with nengo.Network() as net:
	neuron_type = nengo.LIF(amplitude=0.001)
	inp = nengo.Node([0]*3*32*32)
	nengo_dl.configure_settings(trainable=False)

	x = nengo_dl.tensor_layer(inp, tf.layers.conv2d, shape_in=(32,32,3), filters=32, kernel_size=3, use_bias=True)
	x = nengo_dl.tensor_layer(x, neuron_type)

	x = nengo_dl.tensor_layer(x, tf.layers.dropout, rate=0.1)

	x = nengo_dl.tensor_layer(x, tf.layers.conv2d,shape_in=(30,30,32),filters=64, kernel_size=5, strides=2, use_bias=False)
	x = nengo_dl.tensor_layer(x, neuron_type)

	x = nengo_dl.tensor_layer(x, tf.layers.dropout,rate=0.2)

	x = nengo_dl.tensor_layer(x, tf.layers.flatten)

	x = nengo_dl.tensor_layer(x, tf.layers.dense, units=128)
	x = nengo_dl.tensor_layer(x, neuron_type)

	x = nengo_dl.tensor_layer(x, tf.layers.dropout,rate=0.3)
Exemple #20
0
# snippet 5 (section 3.2)
import tensorflow as tf

inputs = np.random.randn(50, 1000, 1)
targets = inputs**2

with nengo_dl.Simulator(net, minibatch_size=10) as sim:
    sim.train(inputs={a: inputs},
              targets={p: targets},
              optimizer=tf.train.AdamOptimizer(),
              n_epochs=2,
              objective="mse")

# snippet 6 (section 3.3)
with net:

    def tensor_func(t, x):
        return tf.layers.dense(x, 100, activation=tf.nn.relu)

    t = nengo_dl.TensorNode(tensor_func, size_in=1)
    nengo.Connection(a, t)
    nengo.Connection(t, b.neurons)

# snippet 7 (section 3.3)
with net:
    t = nengo_dl.tensor_layer(a,
                              tf.layers.dense,
                              units=100,
                              activation=tf.nn.relu)
    nengo.Connection(t, b.neurons)
def mnist(use_tensor_layer=True):
    """
    A network designed to stress-test tensor layers (based on mnist net).

    Parameters
    ----------
    use_tensor_layer : bool
        If True, use individual tensor_layers to build the network, as opposed
        to a single TensorNode containing all layers.

    Returns
    -------
    net : `nengo.Network`
        benchmark network
    """

    with nengo.Network() as net:
        # create node to feed in images
        net.inp = nengo.Node(np.ones(28 * 28))

        if use_tensor_layer:
            nengo_nl = nengo.RectifiedLinear()

            ensemble_params = dict(max_rates=nengo.dists.Choice([100]),
                                   intercepts=nengo.dists.Choice([0]))
            amplitude = 1
            synapse = None

            x = nengo_dl.tensor_layer(
                net.inp, tf.layers.conv2d, shape_in=(28, 28, 1), filters=32,
                kernel_size=3
            )
            x = nengo_dl.tensor_layer(x, nengo_nl,
                                      **ensemble_params)

            x = nengo_dl.tensor_layer(
                x, tf.layers.conv2d, shape_in=(26, 26, 32),
                transform=amplitude, filters=32, kernel_size=3
            )
            x = nengo_dl.tensor_layer(x, nengo_nl,
                                      **ensemble_params)

            x = nengo_dl.tensor_layer(
                x, tf.layers.average_pooling2d, shape_in=(24, 24, 32),
                synapse=synapse, transform=amplitude, pool_size=2, strides=2)

            x = nengo_dl.tensor_layer(
                x, tf.layers.dense, units=128
            )
            x = nengo_dl.tensor_layer(x, nengo_nl,
                                      **ensemble_params)

            x = nengo_dl.tensor_layer(x, tf.layers.dropout, rate=0.4,
                                      transform=amplitude)

            x = nengo_dl.tensor_layer(x, tf.layers.dense, units=10)
        else:
            nl = tf.nn.relu

            # def softlif_layer(x, sigma=1, tau_ref=0.002, tau_rc=0.02,
            #                   amplitude=1):
            #     # x -= 1
            #     z = tf.nn.softplus(x / sigma) * sigma
            #     z += 1e-10
            #     rates = amplitude / (tau_ref + tau_rc * tf.log1p(1 / z))
            #     return rates

            @nengo_dl.reshaped((28, 28, 1))
            def mnist_node(_, x):  # pragma: no cover
                x = tf.layers.conv2d(x, filters=32, kernel_size=3,
                                     activation=nl)
                x = tf.layers.conv2d(x, filters=32, kernel_size=3,
                                     activation=nl)
                x = tf.layers.average_pooling2d(x, pool_size=2, strides=2)
                x = tf.contrib.layers.flatten(x)
                x = tf.layers.dense(x, 128, activation=nl)
                x = tf.layers.dropout(x, rate=0.4)
                x = tf.layers.dense(x, 10)

                return x

            node = nengo_dl.TensorNode(mnist_node, size_in=28 * 28,
                                       size_out=10)
            x = node
            nengo.Connection(net.inp, node, synapse=None)

        net.p = nengo.Probe(x)

    return net
Exemple #22
0
    def build_network(neuron_type,
                      drop_p,
                      l2_weight,
                      n_units=1024,
                      num_layers=4,
                      output_size=1):
        with nengo.Network() as net:

            use_dropout = False
            if drop_p:
                use_dropout = True

            #net.config[nengo.Connection].synapse = None
            #nengo_dl.configure_settings(trainable=False)

            # input node
            inp = nengo.Node([0])

            shape_in = 1
            x = inp

            # the regularizer is a function, so why not reuse it
            reg = tf.contrib.layers.l2_regularizer(l2_weight)

            class DenseLayer(object):
                i = 0

                def pre_build(self, shape_in, shape_out):
                    self.W = tf.get_variable("weights" + str(DenseLayer.i),
                                             shape=(shape_in[1], shape_out[1]),
                                             regularizer=reg)
                    self.B = tf.get_variable("biases" + str(DenseLayer.i),
                                             shape=(1, shape_out[1]),
                                             regularizer=reg)
                    DenseLayer.i += 1

                def __call__(self, t, x):
                    return x @ self.W + self.B

            for n in range(num_layers):
                # add a fully connected layer

                a = nengo_dl.TensorNode(DenseLayer(),
                                        size_in=shape_in,
                                        size_out=n_units,
                                        label='dense{}'.format(n))
                nengo.Connection(x, a, synapse=None)

                shape_in = n_units
                x = a

                # apply an activation function
                x = nengo_dl.tensor_layer(x, neuron_type, **ens_params)

                # add a dropout layer
                x = nengo_dl.tensor_layer(x,
                                          tf.layers.dropout,
                                          rate=drop_p,
                                          training=use_dropout)

            # add an output layer
            a = nengo_dl.TensorNode(DenseLayer(),
                                    size_in=shape_in,
                                    size_out=output_size)
            nengo.Connection(x, a, synapse=None)

        return net, inp, a
Exemple #23
0
            tf.not_equal(tf.argmax(outputs[:, -1], axis=-1),
                         tf.argmax(targets[:, -1], axis=-1)), tf.float32))


with nengo.Network(seed=1000) as net:
    net.config[nengo.Ensemble].max_rates = nengo.dists.Choice([100])
    net.config[nengo.Ensemble].intercepts = nengo.dists.Choice([0])
    default_neuron_type = nengo.LIF(amplitude=0.01)
    nengo_dl.configure_settings(trainable=False)

    inp = nengo.Node([0] * 28 * 28 * 1)
    x = inp

    x = nengo_dl.tensor_layer(x,
                              tf.layers.conv2d,
                              shape_in=(28, 28, 1),
                              filters=6,
                              kernel_size=5,
                              padding="same")
    x = nengo_dl.tensor_layer(x, nengo.LIF(amplitude=0.01))

    x = nengo_dl.tensor_layer(x,
                              tf.layers.average_pooling2d,
                              shape_in=(28, 28, 6),
                              pool_size=2,
                              strides=2)

    x = nengo_dl.tensor_layer(x,
                              tf.layers.conv2d,
                              shape_in=(14, 14, 6),
                              filters=16,
                              kernel_size=5,
Exemple #24
0
    def _build_net(self):
        with nengo.Network() as net:
            # set some default parameters for the neurons that will make
            # the training progress more smoothly
            net.config[nengo.Ensemble].max_rates = nengo.dists.Choice([100])
            net.config[nengo.Ensemble].intercepts = nengo.dists.Choice([0])
            neuron_type = nengo.LIF(amplitude=0.01)

            # we'll make all the nengo objects in the network
            # non-trainable. we could train them if we wanted, but they don't
            # add any representational power. note that this doesn't affect
            # the internal components of tensornodes, which will always be
            # trainable or non-trainable depending on the code written in
            # the tensornode.
            nengo_dl.configure_settings(trainable=False)

            # the input node that will be used to feed in input images
            self.inp = nengo.Node([0] * 28 * 28)

            # ENCODER
            # add the first convolutional layer
            x = nengo_dl.tensor_layer(self.inp,
                                      tf.layers.conv2d,
                                      shape_in=(28, 28, 1),
                                      filters=32,
                                      kernel_size=3)

            # apply the neural nonlinearity
            x = nengo_dl.tensor_layer(x, neuron_type)

            # add another convolutional layer
            x = nengo_dl.tensor_layer(x,
                                      tf.layers.conv2d,
                                      shape_in=(26, 26, 32),
                                      filters=64,
                                      kernel_size=3)
            x = nengo_dl.tensor_layer(x, neuron_type)

            # add a pooling layer
            x = nengo_dl.tensor_layer(x,
                                      tf.layers.conv2d,
                                      shape_in=(24, 24, 64),
                                      filters=64,
                                      kernel_size=2,
                                      strides=2)
            # x = nengo_dl.tensor_layer(x, tf.layers.average_pooling2d, shape_in=(24, 24, 64), pool_size=2, strides=2)

            # another convolutional layer
            # (W - Fw + 2P) / Sw + 1
            x = nengo_dl.tensor_layer(x,
                                      tf.layers.conv2d,
                                      shape_in=(12, 12, 64),
                                      filters=128,
                                      kernel_size=3)
            x = nengo_dl.tensor_layer(x, neuron_type)

            # another pooling layer
            x = nengo_dl.tensor_layer(x,
                                      tf.layers.conv2d,
                                      shape_in=(10, 10, 128),
                                      filters=128,
                                      kernel_size=2,
                                      strides=2)
            # x = nengo_dl.tensor_layer(x, tf.layers.average_pooling2d, shape_in=(10, 10, 128), pool_size=2, strides=2)

            # latent
            self.latent = x

            # DECODER
            x = nengo_dl.tensor_layer(x,
                                      tf.layers.conv2d_transpose,
                                      shape_in=(5, 5, 128),
                                      filters=64,
                                      kernel_size=2,
                                      strides=2)
            x = nengo_dl.tensor_layer(x, neuron_type)
            x = nengo_dl.tensor_layer(x,
                                      tf.layers.conv2d_transpose,
                                      shape_in=(12, 12, 64),
                                      filters=64,
                                      kernel_size=3)
            x = nengo_dl.tensor_layer(x,
                                      tf.layers.conv2d_transpose,
                                      shape_in=(24, 24, 64),
                                      filters=32,
                                      kernel_size=2,
                                      strides=2)
            x = nengo_dl.tensor_layer(x, neuron_type)
            x = nengo_dl.tensor_layer(x,
                                      tf.layers.conv2d_transpose,
                                      shape_in=(26, 26, 32),
                                      filters=1,
                                      kernel_size=3)
            x = nengo_dl.tensor_layer(x, neuron_type)
            x = nengo_dl.tensor_layer(self.inp,
                                      tf.layers.conv2d,
                                      shape_in=(28, 28, 1),
                                      filters=1,
                                      kernel_size=3)

            # linear readout
            x = nengo_dl.tensor_layer(x, tf.layers.dense, units=10)
            x = nengo_dl.tensor_layer(x, tf.identity)

            # x = x + out_stim
            self.out_stim = nengo.Node([0] * 10)
            self.stim_conns = attach_stim(self.out_stim, x,
                                          (np.arange(10), np.arange(15)))

            # we'll create two different output probes, one with a filter
            # (for when we're simulating the network over time and
            # accumulating spikes), and one without (for when we're
            # training the network using a rate-based approximation)
            self.out_p = nengo.Probe(x)
            self.out_p_filt = nengo.Probe(x, synapse=0.1)

        return net
Exemple #25
0
        neuron_type = nengo.LIF(amplitude=0.01)

        # we'll make all the nengo objects in the network
        # non-trainable. we could train them if we wanted, but they don't
        # add any representational power. note that this doesn't affect
        # the internal components of tensornodes, which will always be
        # trainable or non-trainable depending on the code written in
        # the tensornode.
        nengo_dl.configure_settings(trainable=False)

        # the input node that will be used to feed in input images
        inp = nengo.Node([0] * 28 * 28)

        # add the first convolutional layer
        x = nengo_dl.tensor_layer(
            inp, tf.layers.conv2d, shape_in=(28, 28, 1), filters=32,
            kernel_size=3)

        # apply the neural nonlinearity
        x = nengo_dl.tensor_layer(x, neuron_type)

        # add another convolutional layer
        x = nengo_dl.tensor_layer(
            x, tf.layers.conv2d, shape_in=(26, 26, 32),
            filters=64, kernel_size=3)
        x = nengo_dl.tensor_layer(x, neuron_type)

        # add a pooling layer
        x = nengo_dl.tensor_layer(
            x, tf.layers.average_pooling2d, shape_in=(24, 24, 64),
            pool_size=2, strides=2)
Exemple #26
0
    neuron_type = nengo.LIF(amplitude=0.01)

    # we'll make all the nengo objects in the network
    # non-trainable. we could train them if we wanted, but they don't
    # add any representational power. note that this doesn't affect
    # the internal components of tensornodes, which will always be
    # trainable or non-trainable depending on the code written in
    # the tensornode.
    nengo_dl.configure_settings(trainable=False)

    # the input node that will be used to feed in input images
    inp = nengo.Node([0] * 28 * 28)

    # fully connected encoder
    x = nengo_dl.tensor_layer(inp,
                              tf.layers.dense,
                              shape_in=(28 * 28, ),
                              units=32)  # TODO try less parameters
    x = nengo_dl.tensor_layer(x, neuron_type)

    x = nengo_dl.tensor_layer(x, tf.layers.dense, shape_in=(32, ), units=16)
    x = nengo_dl.tensor_layer(x, neuron_type)

    x = nengo_dl.tensor_layer(x, tf.layers.dense, shape_in=(16, ), units=8)
    latent = nengo_dl.tensor_layer(x, neuron_type)

    # fully connected decoder
    x = nengo_dl.tensor_layer(latent,
                              tf.layers.dense,
                              shape_in=(8, ),
                              units=16)
    x = nengo_dl.tensor_layer(x, neuron_type)