def convert_flatten(self, model, pre_layer, input_shape):
     with model:
         x = nengo_dl.Layer(tf.keras.layers.Flatten)(pre_layer)
     output_shape = 1
     for index in range(len(input_shape)):
         output_shape *= input_shape[index]
     output_shape = [output_shape, 1]
     print('Flatten finish')
     return model, output_shape, x
    def convert_conv2d(self, model, pre_layer, input_shape, index,
                       onnx_model_graph):
        onnx_model_graph_node = onnx_model_graph.node
        node_info = onnx_model_graph_node[index]
        neuron_type = self.get_neuronType(index, onnx_model_graph_node)
        filters = self.get_filterNum(node_info, onnx_model_graph)
        for index in range(len(node_info.attribute)):
            if node_info.attribute[index].name == "kernel_shape":
                kernel_size = node_info.attribute[index].ints[0]
            elif node_info.attribute[index].name == "strides":
                strides = node_info.attribute[index].ints[0]
            elif node_info.attribute[index].name == "auto_pad":
                padding = node_info.attribute[index].s.decode('ascii').lower()
                if padding != "valid":
                    padding = "same"
        if padding == "same":
            output_shape = [input_shape[0], input_shape[1], filters]
        else:
            output_shape = [
                int((input_shape[0] - kernel_size) / strides + 1),
                int((input_shape[1] - kernel_size) / strides + 1), filters
            ]
        with model:
            x = nengo_dl.Layer(tf.keras.layers.Convolution2D(
                filters=filters, kernel_size=kernel_size, padding=padding))\
                (pre_layer, shape_in=(input_shape[0], input_shape[1], input_shape[2]))

            # activation
            if neuron_type == "lif":
                x = nengo_dl.Layer(nengo.LIF(amplitude=self.amplitude))(x)
                print('activation lif 추가 완료')
            elif neuron_type == "lifrate":
                x = nengo_dl.Layer(nengo.LIFRate(amplitude=self.amplitude))(x)
            elif neuron_type == "adaptivelif":
                x = nengo_dl.Layer(
                    nengo.AdaptiveLIF(amplitude=self.amplitude))(x)
            elif neuron_type == "adaptivelifrate":
                x = nengo_dl.Layer(
                    nengo.AdaptiveLIFRate(amplitude=self.amplitude))(x)
            elif neuron_type == "izhikevich":
                x = nengo_dl.Layer(
                    nengo.Izhikevich(amplitude=self.amplitude))(x)
            elif neuron_type == "softlifrate":
                x = nengo_dl.Layer(
                    nengo_dl.neurons.SoftLIFRate(amplitude=self.amplitude))(x)
            elif neuron_type == None:  # default neuron_type = LIF
                x = nengo_dl.Layer(nengo.LIF(amplitude=self.amplitude))(x)
            print('convert_conv2d finish')
        return model, output_shape, x
Beispiel #3
0
    def __init__(self,
                 param_file,
                 dim=256,
                 maze_id_dim=256,
                 n_sensors=36,
                 hidden_size=1024,
                 net_seed=13,
                 n_steps=30):
        self.net = nengo.Network(seed=net_seed)
        with self.net:
            # set some default parameters for the neurons that will make
            # the training progress more smoothly
            # net.config[nengo.Ensemble].max_rates = nengo.dists.Choice([100])
            # net.config[nengo.Ensemble].intercepts = nengo.dists.Choice([0])
            self.net.config[nengo.Connection].synapse = None
            neuron_type = nengo.LIF(amplitude=0.01)

            # this is an optimization to improve the training speed,
            # since we won't require stateful behaviour in this example
            nengo_dl.configure_settings(stateful=False)

            # the input node that will be used to feed in (context, location, goal)
            inp = nengo.Node(np.zeros((n_sensors * 4 + maze_id_dim, )))

            x = nengo_dl.Layer(tf.keras.layers.Dense(units=hidden_size))(inp)
            x = nengo_dl.Layer(neuron_type)(x)

            out = nengo_dl.Layer(tf.keras.layers.Dense(units=dim))(x)

            self.out_p = nengo.Probe(out, label="out_p")
            self.out_p_filt = nengo.Probe(out, synapse=0.1, label="out_p_filt")

        self.sim = nengo_dl.Simulator(self.net, minibatch_size=1)
        self.sim.load_params(param_file)
        self.sim.compile(loss={self.out_p_filt: mse_loss})
        self.n_steps = n_steps
 def convert_avgpool2d(self, model, pre_layer, input_shape, node_info):
     for index in range(len(node_info.attribute)):
         if node_info.attribute[index].name == "kernel_shape":
             pool_size = node_info.attribute[index].ints[0]
         elif node_info.attribute[index].name == "strides":
             strides = node_info.attribute[index].ints[0]
     output_shape = [
         int(input_shape[0] / strides),
         int(input_shape[1] / strides), input_shape[2]
     ]
     with model:
         x = nengo_dl.Layer(
             tf.keras.layers.AveragePooling2D(
                 pool_size=pool_size,
                 strides=strides))(pre_layer,
                                   shape_in=(input_shape[0], input_shape[1],
                                             input_shape[2]))
     print('convert_avgpool2d finish')
     return model, output_shape, x
 def convert_batchnormalization2d(self, model, pre_layer, input_shape,
                                  node_info):
     for index in range(len(node_info.attribute)):
         if node_info.attribute[index].name == "momentum":
             momentum = round(node_info.attribute[index].f, 4)
             if momentum == 0:
                 momentum = 0.99
         elif node_info.attribute[index].name == "epsilon":
             epsilon = round(node_info.attribute[index].f, 4)
             if epsilon == 0:
                 epsilon = 0.001
     with model:
         x = nengo_dl.Layer(tf.keras.layers.batch_normalization)(
             pre_layer,
             shape_in=(input_shape[0], input_shape[1], input_shape[2]),
             momentum=momentum,
             epsilon=epsilon)
     output_shape = input_shape
     print('convert_batchnormalization2d finish')
     return model, output_shape, x  # x를 return 하면서 모델을 계속 쌓아감
    def convert_dense(self, model, pre_layer, input_shape, index,
                      onnx_model_graph):
        onnx_model_graph_node = onnx_model_graph.node
        node_info = onnx_model_graph_node[index]
        dense_num = self.get_dense_num(node_info, onnx_model_graph)
        neuron_type = self.get_neuronType(
            index,
            onnx_model_graph_node)  # node들 지나다니면서 - neuron이 op_type이 어떤건지 찾음

        with model:
            x = nengo_dl.Layer(
                tf.keras.layers.Dense(units=dense_num))(pre_layer)
            if neuron_type != "softmax":
                if neuron_type == "lif":
                    x = nengo_dl.Layer(nengo.LIF(amplitude=self.amplitude))(x)
                elif neuron_type == "lifrate":
                    x = nengo_dl.Layer(
                        nengo.LIFRate(amplitude=self.amplitude))(x)
                elif neuron_type == "adaptivelif":
                    x = nengo_dl.Layer(
                        nengo.AdaptiveLIF(amplitude=self.amplitude))(x)
                elif neuron_type == "adaptivelifrate":
                    x = nengo_dl.Layer(
                        nengo.AdaptiveLIFRate(amplitude=self.amplitude))(x)
                elif neuron_type == "izhikevich":
                    x = nengo_dl.Layer(
                        nengo.Izhikevich(amplitude=self.amplitude))(x)
                elif neuron_type == "softlifrate":
                    x = nengo_dl.Layer(
                        nengo_dl.neurons.SoftLIFRate(
                            amplitude=self.amplitude))(x)
                elif neuron_type == None:  # default neuron_type = LIF
                    x = nengo_dl.Layer(nengo.LIF(amplitude=self.amplitude))(x)
        output_shape = [dense_num, 1]
        print('convert Dense finish')
        return model, output_shape, x  # x를 return 하면서 모델을 계속 쌓아감
Beispiel #7
0
    # the training progress more smoothly
    net.config[nengo.Ensemble].max_rates = nengo.dists.Choice([100])
    net.config[nengo.Ensemble].intercepts = nengo.dists.Choice([0])
    net.config[nengo.Connection].synapse = None
    neuron_type = nengo.LIF(amplitude=0.01)

    # this is an optimization to improve the training speed,
    # since we won't require stateful behaviour in this example
    nengo_dl.configure_settings(stateful=False)

    # the input node that will be used to feed in input images
    inp = nengo.Node(np.zeros(28 * 28))

    # add the first convolutional layer
    x = nengo_dl.Layer(tf.keras.layers.Conv2D(filters=32, kernel_size=3))(
        inp, shape_in=(28, 28, 1)
    )
    x = nengo_dl.Layer(neuron_type)(x)

    # add the second convolutional layer
    x = nengo_dl.Layer(tf.keras.layers.Conv2D(filters=64, strides=2, kernel_size=3))(
        x, shape_in=(26, 26, 32)
    )
    x = nengo_dl.Layer(neuron_type)(x)

    # add the third convolutional layer
    x = nengo_dl.Layer(tf.keras.layers.Conv2D(filters=128, strides=2, kernel_size=3))(
        x, shape_in=(12, 12, 64)
    )
    x = nengo_dl.Layer(neuron_type)(x)
Beispiel #8
0
with nengo.Network(seed=args.net_seed) as net:
    # set some default parameters for the neurons that will make
    # the training progress more smoothly
    # net.config[nengo.Ensemble].max_rates = nengo.dists.Choice([100])
    # net.config[nengo.Ensemble].intercepts = nengo.dists.Choice([0])
    net.config[nengo.Connection].synapse = None
    neuron_type = nengo.LIF(amplitude=0.01)

    # this is an optimization to improve the training speed,
    # since we won't require stateful behaviour in this example
    nengo_dl.configure_settings(stateful=False)

    # the input node that will be used to feed in (context, location, goal)
    inp = nengo.Node(np.zeros((args.dim * 2 + args.maze_id_dim, )))

    x = nengo_dl.Layer(tf.keras.layers.Dense(units=args.hidden_size))(inp)
    x = nengo_dl.Layer(neuron_type)(x)

    out = nengo_dl.Layer(tf.keras.layers.Dense(units=2))(x)

    out_p = nengo.Probe(out, label="out_p")
    out_p_filt = nengo.Probe(out, synapse=0.1, label="out_p_filt")

# minibatch_size = 200
minibatch_size = 256
sim = nengo_dl.Simulator(net, minibatch_size=minibatch_size)

print("\nSimulator Built\n")
print(test_output.shape)
# add single timestep to training data
train_input = train_input[:, None, :]
Beispiel #9
0
def lmu(theta, input_d, native_nengo=False, dtype="float32"):
    """
    A network containing a single Legendre Memory Unit cell and dense readout.

    See [1]_ for more details.

    Parameters
    ----------
    theta : int
        Time window parameter for LMU.
    input_d : int
        Dimensionality of input signal.
    native_nengo : bool
        If True, build the LMU out of Nengo objects. Otherwise, build the LMU
        directly in TensorFlow, and use a `.TensorNode` to wrap the whole cell.
    dtype : str
        Float dtype to use for internal parameters of LMU when ``native_nengo=False``
        (``native_nengo=True`` will use the dtype of the Simulator).

    Returns
    -------
    net : `nengo.Network`
        Benchmark network

    References
    ----------
    .. [1] Aaron R. Voelker, Ivana Kajić, and Chris Eliasmith. Legendre memory units:
       continuous-time representation in recurrent neural networks.
       In Advances in Neural Information Processing Systems. 2019.
       https://papers.nips.cc/paper/9689-legendre-memory-units-continuous-time-representation-in-recurrent-neural-networks.
    """
    if native_nengo:
        # building LMU cell directly out of Nengo objects

        class LMUCell(nengo.Network):
            """Implements an LMU cell as a Nengo network."""

            def __init__(self, units, order, theta, input_d, **kwargs):
                super().__init__(**kwargs)

                # compute the A and B matrices according to the LMU's mathematical
                # derivation (see the paper for details)
                Q = np.arange(order, dtype=np.float64)
                R = (2 * Q + 1)[:, None] / theta
                j, i = np.meshgrid(Q, Q)

                A = np.where(i < j, -1, (-1.0) ** (i - j + 1)) * R
                B = (-1.0) ** Q[:, None] * R
                C = np.ones((1, order))
                D = np.zeros((1,))

                A, B, _, _, _ = cont2discrete((A, B, C, D), dt=1.0, method="zoh")

                with self:
                    nengo_dl.configure_settings(trainable=None)

                    # create objects corresponding to the x/u/m/h variables in LMU
                    self.x = nengo.Node(size_in=input_d)
                    self.u = nengo.Node(size_in=1)
                    self.m = nengo.Node(size_in=order)
                    self.h = nengo_dl.TensorNode(
                        tf.nn.tanh, shape_in=(units,), pass_time=False
                    )

                    # compute u_t
                    # note that setting synapse=0 (versus synapse=None) adds a
                    # one-timestep delay, so we can think of any connections with
                    # synapse=0 as representing value_{t-1}
                    nengo.Connection(
                        self.x, self.u, transform=np.ones((1, input_d)), synapse=None
                    )
                    nengo.Connection(
                        self.h, self.u, transform=np.zeros((1, units)), synapse=0
                    )
                    nengo.Connection(
                        self.m, self.u, transform=np.zeros((1, order)), synapse=0
                    )

                    # compute m_t
                    # in this implementation we'll make A and B non-trainable, but they
                    # could also be optimized in the same way as the other parameters
                    conn = nengo.Connection(self.m, self.m, transform=A, synapse=0)
                    self.config[conn].trainable = False
                    conn = nengo.Connection(self.u, self.m, transform=B, synapse=None)
                    self.config[conn].trainable = False

                    # compute h_t
                    nengo.Connection(
                        self.x,
                        self.h,
                        transform=np.zeros((units, input_d)),
                        synapse=None,
                    )
                    nengo.Connection(
                        self.h, self.h, transform=np.zeros((units, units)), synapse=0
                    )
                    nengo.Connection(
                        self.m,
                        self.h,
                        transform=nengo_dl.dists.Glorot(distribution="normal"),
                        synapse=None,
                    )

        with nengo.Network(seed=0) as net:
            # remove some unnecessary features to speed up the training
            nengo_dl.configure_settings(
                trainable=None, stateful=False, keep_history=False,
            )

            # input node
            net.inp = nengo.Node(np.zeros(input_d))

            # lmu cell
            lmu_cell = LMUCell(units=212, order=256, theta=theta, input_d=input_d)
            conn = nengo.Connection(net.inp, lmu_cell.x, synapse=None)
            net.config[conn].trainable = False

            # dense linear readout
            out = nengo.Node(size_in=10)
            nengo.Connection(
                lmu_cell.h, out, transform=nengo_dl.dists.Glorot(), synapse=None
            )

            # record output. note that we set keep_history=False above, so this will
            # only record the output on the last timestep (which is all we need
            # on this task)
            net.p = nengo.Probe(out)
    else:
        # putting everything in a tensornode

        # define LMUCell
        class LMUCell(tf.keras.layers.AbstractRNNCell):
            """Implement LMU as Keras RNN cell."""

            def __init__(self, units, order, theta, **kwargs):
                super().__init__(**kwargs)

                self.units = units
                self.order = order
                self.theta = theta

                Q = np.arange(order, dtype=np.float64)
                R = (2 * Q + 1)[:, None] / theta
                j, i = np.meshgrid(Q, Q)

                A = np.where(i < j, -1, (-1.0) ** (i - j + 1)) * R
                B = (-1.0) ** Q[:, None] * R
                C = np.ones((1, order))
                D = np.zeros((1,))

                self._A, self._B, _, _, _ = cont2discrete(
                    (A, B, C, D), dt=1.0, method="zoh"
                )

            @property
            def state_size(self):
                """Size of RNN state variables."""
                return self.units, self.order

            @property
            def output_size(self):
                """Size of cell output."""
                return self.units

            def build(self, input_shape):
                """Set up all the weight matrices used inside the cell."""

                super().build(input_shape)

                input_dim = input_shape[-1]
                self.input_encoders = self.add_weight(
                    shape=(input_dim, 1), initializer=tf.initializers.ones(),
                )
                self.hidden_encoders = self.add_weight(
                    shape=(self.units, 1), initializer=tf.initializers.zeros(),
                )
                self.memory_encoders = self.add_weight(
                    shape=(self.order, 1), initializer=tf.initializers.zeros(),
                )
                self.input_kernel = self.add_weight(
                    shape=(input_dim, self.units), initializer=tf.initializers.zeros(),
                )
                self.hidden_kernel = self.add_weight(
                    shape=(self.units, self.units), initializer=tf.initializers.zeros(),
                )
                self.memory_kernel = self.add_weight(
                    shape=(self.order, self.units),
                    initializer=tf.initializers.glorot_normal(),
                )
                self.AT = self.add_weight(
                    shape=(self.order, self.order),
                    initializer=tf.initializers.constant(self._A.T),
                    trainable=False,
                )
                self.BT = self.add_weight(
                    shape=(1, self.order),
                    initializer=tf.initializers.constant(self._B.T),
                    trainable=False,
                )

            def call(self, inputs, states):
                """Compute cell output and state updates."""

                h_prev, m_prev = states

                # compute u_t from the above diagram
                u = (
                    tf.matmul(inputs, self.input_encoders)
                    + tf.matmul(h_prev, self.hidden_encoders)
                    + tf.matmul(m_prev, self.memory_encoders)
                )

                # compute updated memory state vector (m_t in diagram)
                m = tf.matmul(m_prev, self.AT) + tf.matmul(u, self.BT)

                # compute updated hidden state vector (h_t in diagram)
                h = tf.nn.tanh(
                    tf.matmul(inputs, self.input_kernel)
                    + tf.matmul(h_prev, self.hidden_kernel)
                    + tf.matmul(m, self.memory_kernel)
                )

                return h, [h, m]

        with nengo.Network(seed=0) as net:
            # remove some unnecessary features to speed up the training
            # we could set use_loop=False as well here, but leaving it for parity
            # with native_nengo
            nengo_dl.configure_settings(stateful=False)

            net.inp = nengo.Node(np.zeros(theta))

            rnn = nengo_dl.Layer(
                tf.keras.layers.RNN(
                    LMUCell(units=212, order=256, theta=theta, dtype=dtype),
                    return_sequences=False,
                )
            )(net.inp, shape_in=(theta, input_d))

            out = nengo.Node(size_in=10)
            nengo.Connection(rnn, out, transform=nengo_dl.dists.Glorot(), synapse=None)

            net.p = nengo.Probe(out)

    return net
Beispiel #10
0
def mnist(use_tensor_layer=True):
    """
    A network designed to stress-test tensor layers (based on mnist net).

    Parameters
    ----------
    use_tensor_layer : bool
        If True, use individual tensor_layers to build the network, as opposed
        to a single TensorNode containing all layers.

    Returns
    -------
    net : `nengo.Network`
        benchmark network
    """

    with nengo.Network() as net:
        # create node to feed in images
        net.inp = nengo.Node(np.ones(28 * 28))

        if use_tensor_layer:
            nengo_nl = nengo.RectifiedLinear()

            ensemble_params = dict(
                max_rates=nengo.dists.Choice([100]), intercepts=nengo.dists.Choice([0])
            )
            amplitude = 1
            synapse = None

            x = nengo_dl.Layer(tf.keras.layers.Conv2D(filters=32, kernel_size=3))(
                net.inp, shape_in=(28, 28, 1)
            )
            x = nengo_dl.Layer(nengo_nl)(x, **ensemble_params)

            x = nengo_dl.Layer(tf.keras.layers.Conv2D(filters=32, kernel_size=3))(
                x, shape_in=(26, 26, 32), transform=amplitude
            )
            x = nengo_dl.Layer(nengo_nl)(x, **ensemble_params)

            x = nengo_dl.Layer(
                tf.keras.layers.AveragePooling2D(pool_size=2, strides=2)
            )(x, shape_in=(24, 24, 32), synapse=synapse, transform=amplitude)

            x = nengo_dl.Layer(tf.keras.layers.Dense(units=128))(x)
            x = nengo_dl.Layer(nengo_nl)(x, **ensemble_params)

            x = nengo_dl.Layer(tf.keras.layers.Dropout(rate=0.4))(
                x, transform=amplitude
            )

            x = nengo_dl.Layer(tf.keras.layers.Dense(units=10))(x)
        else:
            nl = tf.nn.relu

            # def softlif_layer(x, sigma=1, tau_ref=0.002, tau_rc=0.02,
            #                   amplitude=1):
            #     # x -= 1
            #     z = tf.nn.softplus(x / sigma) * sigma
            #     z += 1e-10
            #     rates = amplitude / (tau_ref + tau_rc * tf.log1p(1 / z))
            #     return rates

            def mnist_node(x):  # pragma: no cover (runs in TF)
                x = tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation=nl)(x)
                x = tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation=nl)(x)
                x = tf.keras.layers.AveragePooling2D(pool_size=2, strides=2)(x)
                x = tf.keras.layers.Flatten()(x)
                x = tf.keras.layers.Dense(128, activation=nl)(x)
                x = tf.keras.layers.Dropout(rate=0.4)(x)
                x = tf.keras.layers.Dense(10)(x)

                return x

            node = nengo_dl.TensorNode(
                mnist_node, shape_in=(28, 28, 1), shape_out=(10,)
            )
            x = node
            nengo.Connection(net.inp, node, synapse=None)

        net.p = nengo.Probe(x)

    return net
Beispiel #11
0
def main_SNN(args):
    config = Config()
    config.training_volume = args.training_volume
    config.input_dim = args.input_dim
    config.encoding_dim = args.encoding_dim
    config.scale = args.scale
    if args.runtime_measurement:
        config.n_time_measures = 10
    else:
        config.n_time_measures = 1

    # nego net params
    do_rate = 0.5
    num_epochs = 200
    enc_dim = 1000
    minibatch_size = 500
    seed = 0

    # load dataset
    data = load_dataset(args.dataset, config)
    X_train = data[0]
    X_test = data[1]
    y_train = data[2]
    y_test = data[3]
    config = data[4]

    # if train test data not a list, create one
    if type(X_train) == list:
        print("given data is not a list")
        X_train_list = X_train
        X_test_list = X_test
        y_train_list = y_train
        y_test_list = y_test
    else:
        X_train_list = [X_train]
        X_test_list = [X_test]
        y_train_list = [y_train]
        y_test_list = [y_test]

    #######################################################################################
    # statistical iteration
    #######################################################################################
    acc_mean = []
    f1_mean = []

    for stat_it in range(args.stat_iterations):
        logger.info('Statistial iteration: ' + str(stat_it))
        seed = stat_it

        # train for each element in list (that is why we need list form, even if it contains only one element)
        logger.info('Training data contains ' + str(len(X_train)) +
                    ' training instances...')
        scores = []
        accs = []
        for it in range(len(X_train_list)):
            logger.info(('.......'))
            logger.info('instance ' + str(it) + ':')

            X_train = X_train_list[it]
            X_test = X_test_list[it]
            y_train = y_train_list[it]
            y_test = y_test_list[it]

            # use only fraction of training samples (if given)
            X_train = X_train[1:int(X_train.shape[0] *
                                    config.training_volume), :, :]
            y_train = y_train[1:int(y_train.shape[0] * config.training_volume)]

            y_train_oh = one_hot(y_train)
            y_test_oh = one_hot(y_test)

            # config.input_dim = X_train.shape[1]
            logger.info('Training dataset shape: ' + str(X_train.shape) +
                        str(y_train.shape))
            logger.info('Test dataset shape: ' + str(X_test.shape) +
                        str(y_test.shape))

            config.n_classes = len(np.unique(y_train))
            config.n_inputs = X_train.shape[2]
            config.n_steps = X_train.shape[1]

            #######################################################################################
            # create HDC vectors (encoding)
            #######################################################################################
            # create HDC vectors
            t_train, X_train, traces_train, init_vecs = create_HDC_vectors(
                config, X_train)
            t_test, X_test, traces_test, init_vecs = create_HDC_vectors(
                config, X_test)
            preprocessing_time = t_train + t_test

            # normalize HDC encodings
            m = np.mean(X_train, axis=0)
            s = np.std(X_train, axis=0)
            config.m = m
            config.s = s
            X_train = np.divide(X_train - m, s)
            X_test = np.divide(X_test - m, s)

            #######################################################################################
            # nengo model training
            #######################################################################################

            net = nengo.Network(seed=seed + 1)

            with net:
                # set some default parameters for the neurons that will make
                # the training progress more smoothly
                net.config[nengo.Ensemble].max_rates = nengo.dists.Choice(
                    [100])
                net.config[nengo.Ensemble].intercepts = nengo.dists.Choice([0])
                net.config[nengo.Connection].synapse = None
                neuron_type = nengo.LIF(amplitude=0.01)

                # this is an optimization to improve the training speed,
                # since we won't require stateful behaviour in this example
                nengo_dl.configure_settings(stateful=False)

                # the input node that will be used to feed in input vectors
                inp = nengo.Node(np.zeros(config.input_dim))

                x = nengo_dl.Layer(tf.keras.layers.Dropout(rate=do_rate))(inp)
                x = nengo_dl.Layer(neuron_type)(x)

                x = nengo_dl.Layer(tf.keras.layers.Dense(units=enc_dim))(x)
                x = nengo_dl.Layer(neuron_type)(x)

                out = nengo_dl.Layer(
                    tf.keras.layers.Dense(units=len(y_train_oh[0])))(x)

                # we'll create two different output probes, one with a filter
                # (for when we're simulating the network over time and
                # accumulating spikes), and one without (for when we're
                # training the network using a rate-based approximation)
                out_p = nengo.Probe(out, label="out_p")
                out_p_filt = nengo.Probe(out, synapse=0.1, label="out_p_filt")

            sim = nengo_dl.Simulator(net,
                                     minibatch_size=minibatch_size,
                                     device="/gpu:0")

            # run training
            sim.compile(
                optimizer=tf.optimizers.RMSprop(0.001),
                loss={
                    out_p: tf.losses.CategoricalCrossentropy(from_logits=True)
                },
            )

            # add single timestep to training data
            X_train = X_train[:, None, :]
            y_train_oh = y_train_oh[:, None]

            # when testing our network with spiking neurons we will need to run it
            # over time, so we repeat the input/target data for a number of
            # timesteps.
            n_steps = 30
            X_test = np.tile(X_test[:, None, :], (1, n_steps, 1))
            y_test_oh = np.tile(y_test_oh[:, None], (n_steps, 1))

            def classification_accuracy(y_true, y_pred):
                return tf.metrics.categorical_accuracy(y_true[:, -1],
                                                       y_pred[:, -1])

            accuracy = sim.evaluate(X_test, {out_p_filt: y_test_oh},
                                    verbose=0)["loss"],
            print("Accuracy before training:", accuracy)

            cb_time = TimingCallback()
            sim.fit(
                X_train,
                {out_p: y_train_oh},
                epochs=num_epochs,
                callbacks=[cb_time],
            )
            # log training time
            epoch_time = cb_time.logs
            mean_epoch_time = np.mean(epoch_time)
            training_time = np.sum(epoch_time)

            # save the parameters to file
            # sim.save_params("./nengo_dl_params")

            #############################################################################################
            # evaluation of results
            #############################################################################################

            sim.compile(loss={out_p_filt: classification_accuracy})

            # runtime measurement
            t = []
            for i in range(config.n_time_measures):
                t1 = time()
                accuracy = sim.evaluate(X_test, {out_p_filt: y_test_oh},
                                        verbose=0)["loss"],
                inference_time = time() - t1
                t.append(inference_time)
            inference_time = np.mean(t)
            print("Accuracy after training:", accuracy)
            accs.append(accuracy)

            sim2 = nengo_dl.Simulator(net, minibatch_size=1, device="/gpu:0")

            y_pred = sim2.predict(X_test)
            y_pred_am = np.argmax(y_pred[out_p_filt][:, -1, :], axis=1)
            y_pred_am.shape = (y_pred_am.shape[0], 1)
            f1_score = sklearn.metrics.f1_score(y_test,
                                                y_pred_am,
                                                average='micro')
            f1_score_weighted = sklearn.metrics.f1_score(y_test,
                                                         y_pred_am,
                                                         average='weighted')

            logger.info("Training time: " + str(training_time))
            logger.info("Mean epoch time: " + str(mean_epoch_time))
            logger.info("Inference time: " + str(inference_time))

            logger.info("Preprocessing time for training: " + str(t_train))
            logger.info("Preprocessing time for testing: " + str(t_test))
            logger.info("Inference time one sequence [ms]: " +
                        str((inference_time * 1000 + t_test * 1000) /
                            X_test.shape[0]))

            print('Accuracy on training data: ')
            report = classification_report(y_test, y_pred_am, output_dict=True)
            logger.info(classification_report(y_test, y_pred_am))

            print("Confusion matrix:")
            confusion_matrix = metrics.confusion_matrix(y_test, y_pred_am)
            print(confusion_matrix)

            # f1 score
            scores.append(f1_score_weighted)
            logger.info("F1 Score: " + str(f1_score_weighted))

            # close simulator
            sim.close()
            sim2.close()

        # add results to statistical result array
        acc_mean.append(np.mean(accs))
        f1_mean.append(np.mean(scores))

    # save as mat files
    save_dic = {
        "report": report,
        "confusion_matrix": confusion_matrix,
        "config": config,
        "pred": y_pred,
        "label": y_test,
        "f1_mean": np.mean(f1_mean)
    }
    savemat(
        "results/" + args.dataset + "/results_Nengo_net_" +
        str(config.input_dim) + "_" + str(config.scale) + "_" +
        str(config.encoding_dim) + '_' + str(config.training_volume) + ".mat",
        save_dic)

    logger.info('Accuracy results of statistical repetitions: ' +
                str(acc_mean))
    logger.info('F1 scores of statistical repetitions: ' + str(f1_mean))

    # write all scores to extra file
    logger.info('Mean Score: ' + str(np.mean(f1_mean)))
    logger.info('Mean Accuracy: ' + str(np.mean(acc_mean)))
    with open("results/results_" + args.dataset + "_SNN.txt", 'a') as file:
        file.write(
            str(config.input_dim) + '\t' + str(config.encoding_dim) + '\t' +
            str(config.scale) + '\t' + str(args.stat_iterations) + '\t' +
            str(round(np.mean(f1_mean), 3)) + '\t' +
            str(round(np.mean(acc_mean), 3)) + '\t' +
            str(round(np.std(f1_mean), 3)) + '\t' +
            str(round(np.std(acc_mean), 3)) + '\n')
Beispiel #12
0
    # set some default parameters for the neurons that will make
    # the training progress more smoothly
    net.config[nengo.Ensemble].max_rates = nengo.dists.Choice([100])
    net.config[nengo.Ensemble].intercepts = nengo.dists.Choice([0])
    net.config[nengo.Connection].synapse = None
    neuron_type = nengo.LIF(amplitude=0.01)

    # this is an optimization to improve the training speed,
    # since we won't require stateful behaviour in this example
    nengo_dl.configure_settings(stateful=False)

    # the input node that will be used to feed in input images
    inp = nengo.Node(np.zeros(28 * 28))

    # add the first convolutional layer
    x = nengo_dl.Layer(tf.keras.layers.Conv2D(
        filters=32, kernel_size=3))(inp, shape_in=(28, 28, 1)) # (72, 128, 1)
    x = nengo_dl.Layer(neuron_type)(x)

    # add the second convolutional layer
    x = nengo_dl.Layer(tf.keras.layers.Conv2D(
        filters=64, strides=2, kernel_size=3))(x, shape_in=(26, 26, 32)) # 5/8 - 282240 (70, 126, 32)
    x = nengo_dl.Layer(neuron_type)(x)

    # add the third convolutional layer
    x = nengo_dl.Layer(tf.keras.layers.Conv2D(
        filters=128, strides=2, kernel_size=3))(x, shape_in=(12, 12, 64)) # 3/8 134912 (34, 62, 64)
    x = nengo_dl.Layer(neuron_type)(x)

    # linear readout
    unitRange = max(train_labels) + 1
    out = nengo_dl.Layer(tf.keras.layers.Dense(units=unitRange))(x)