Beispiel #1
0
    def save_connections(self, path=None):
        """Write parameters of a neural network to disk.

        The parameters between two layers are saved in a text file.
        They can then be used to connect pyNN populations e.g. with
        ``sim.Projection(layer1, layer2, sim.FromListConnector(filename))``,
        where ``sim`` is a simulator supported by pyNN, e.g. Brian, NEURON, or
        NEST.

        Parameters
        ----------

        path: string, optional
            Path to directory where connections are saved. Defaults to
            ``settings['path']``.

        Return
        ------
            Text files containing the layer connections. Each file is named
            after the layer it connects to, e.g. ``layer2.txt`` if connecting
            layer1 to layer2.
        """

        if path is None:
            path = settings['path']

        echo("Saving connections to {}...\n".format(path))

        # Iterate over layers to save each projection in a separate txt file.
        for projection in self.connections:
            filepath = os.path.join(path, projection.label.partition('→')[-1])
            if confirm_overwrite(filepath):
                projection.save('connections', filepath)
        echo("Done.\n")
Beispiel #2
0
    def add_layer(self, layer):
        """Add empty layer."""

        self.conns = []
        self.layers.append(
            self.sim.Population(int(np.prod(layer.output_shape[1:])),
                                self.sim.IF_cond_exp,
                                self.cellparams,
                                label=layer.name))
        if hasattr(layer, 'activation') and layer.activation == 'softmax':
            echo("WARNING: Activation 'softmax' not implemented. " +
                 "Using 'relu' activation instead.\n")
Beispiel #3
0
    def build_convolution(self, layer):
        """Build convolution layer."""

        [weights, biases] = layer.get_weights()
        i_offset = np.empty(np.prod(layer.output_shape[1:]))
        n = int(len(i_offset) / len(biases))
        for i in range(len(biases)):
            i_offset[i:(i + 1) * n] = biases[i]
        self.layers[-1].set(i_offset=i_offset)

        nx = layer.input_shape[3]  # Width of feature map
        ny = layer.input_shape[2]  # Hight of feature map
        kx = layer.nb_col  # Width of kernel
        ky = layer.nb_row  # Hight of kernel
        px = int((kx - 1) / 2)  # Zero-padding columns
        py = int((ky - 1) / 2)  # Zero-padding rows
        if layer.border_mode == 'valid':
            # In border_mode 'valid', the original sidelength is
            # reduced by one less than the kernel size.
            mx = nx - kx + 1  # Number of columns in output filters
            my = ny - ky + 1  # Number of rows in output filters
            x0 = px
            y0 = py
        elif layer.border_mode == 'same':
            mx = nx
            my = ny
            x0 = 0
            y0 = 0
        else:
            raise Exception("Border_mode {} not supported".format(
                layer.border_mode))
        # Loop over output filters 'fout'
        for fout in range(weights.shape[0]):
            for y in range(y0, ny - y0):
                for x in range(x0, nx - x0):
                    target = x - x0 + (y - y0) * mx + fout * mx * my
                    # Loop over input filters 'fin'
                    for fin in range(weights.shape[1]):
                        for k in range(-py, py + 1):
                            if not 0 <= y + k < ny:
                                continue
                            source = x + (y + k) * nx + fin * nx * ny
                            for l in range(-px, px + 1):
                                if not 0 <= x + l < nx:
                                    continue
                                self.conns.append(
                                    (source + l, target,
                                     weights[fout, fin, py - k,
                                             px - l], settings['delay']))
                echo('.')
            echo(' {:.1%}\n'.format(((fout + 1) * weights.shape[1]) /
                                    (weights.shape[0] * weights.shape[1])))
Beispiel #4
0
    def build_convolution(self, layer):
        """Build convolution layer."""

        weights = layer.get_weights()[0]  # [W, b][0]
        nx = layer.input_shape[3]  # Width of feature map
        ny = layer.input_shape[2]  # Hight of feature map
        kx = layer.nb_col  # Width of kernel
        ky = layer.nb_row  # Hight of kernel
        px = int((kx - 1) / 2)  # Zero-padding columns
        py = int((ky - 1) / 2)  # Zero-padding rows
        if layer.border_mode == 'valid':
            # In border_mode 'valid', the original sidelength is
            # reduced by one less than the kernel size.
            mx = nx - kx + 1  # Number of columns in output filters
            my = ny - ky + 1  # Number of rows in output filters
            x0 = px
            y0 = py
        elif layer.border_mode == 'same':
            mx = nx
            my = ny
            x0 = 0
            y0 = 0
        else:
            raise Exception("Border_mode {} not supported".format(
                layer.border_mode))
        # Loop over output filters 'fout'
        for fout in range(weights.shape[0]):
            for y in range(y0, ny - y0):
                for x in range(x0, nx - x0):
                    target = x - x0 + (y - y0) * mx + fout * mx * my
                    # Loop over input filters 'fin'
                    for fin in range(weights.shape[1]):
                        for k in range(-py, py + 1):
                            if not 0 <= y + k < ny:
                                continue
                            source = x + (y + k) * nx + fin * nx * ny
                            for l in range(-px, px + 1):
                                if not 0 <= x + l < nx:
                                    continue
                                self.connections[-1].connect(i=source + l,
                                                             j=target)
                                self.connections[-1].w[source + l, target] = (
                                    weights[fout, fin, py - k, px - l] *
                                    self.sim.volt)
                echo('.')
            echo(' {:.1%}\n'.format(((fout + 1) * weights.shape[1]) /
                                    (weights.shape[0] * weights.shape[1])))
    def build(self, parsed_model, **kwargs):
        """Compile a SNN to prepare for simulation with INI simulator.

        Convert an ANN to a spiking neural network, using layers derived from
        Keras base classes.

        Aims at simulating the network on a self-implemented Integrate-and-Fire
        simulator using a timestepped approach.

        Sets the ``snn`` and ``get_output`` attributes of this class.

        Parameters
        ----------

        parsed_model: Keras model
            Parsed input model; result of applying
            ``model_lib.extract(input_model)`` to the ``input model``.
        """

        self.parsed_model = parsed_model

        echo('\n' + "Compiling spiking network...\n")

        # Pass time variable to first layer
        input_time = theano.tensor.scalar('time')
        kwargs2 = {'time_var': input_time}

        # Iterate over layers to create spiking neurons and connections.
        for layer in parsed_model.layers:
            echo("Building layer: {}\n".format(layer.name))
            spike_layer = getattr(self.sim, 'Spike' + layer.__class__.__name__)
            self.snn.add(spike_layer(**layer.get_config()))
            self.snn.layers[-1].set_weights(layer.get_weights())
            self.sim.init_neurons(self.snn.layers[-1],
                                  v_thresh=settings['v_thresh'],
                                  tau_refrac=settings['tau_refrac'],
                                  **kwargs2)
            kwargs2 = {}

        # Compile
        self.compile_snn(input_time)
Beispiel #6
0
    def build_pooling(self, layer):
        """Build pooling layer."""

        if layer.__class__.__name__ == 'MaxPooling2D':
            echo("WARNING: Layer type 'MaxPooling' not supported yet. " +
                 "Falling back on 'AveragePooling'.\n")
        nx = layer.input_shape[3]  # Width of feature map
        ny = layer.input_shape[2]  # Hight of feature map
        dx = layer.pool_size[1]  # Width of pool
        dy = layer.pool_size[0]  # Hight of pool
        sx = layer.strides[1]
        sy = layer.strides[0]
        for fout in range(layer.input_shape[1]):  # Feature maps
            for y in range(0, ny - dy + 1, sy):
                for x in range(0, nx - dx + 1, sx):
                    target = int(x / sx + y / sy * ((nx - dx) / sx + 1) +
                                 fout * nx * ny / (dx * dy))
                    for k in range(dy):
                        source = x + (y + k) * nx + fout * nx * ny
                        for l in range(dx):
                            self.conns.append(
                                (source + l, target, 1 / (dx * dy),
                                 settings['delay']))
                echo('.')
            echo(' {:.1%}\n'.format((1 + fout) / layer.input_shape[1]))
    def save(self, path=None, filename=None):
        """Write model architecture and parameters to disk.

        Parameters
        ----------

        path: string, optional
            Path to directory where to save model to. Defaults to
            ``settings['path']``.

        filename: string, optional
            Name of file to write model to. Defaults to
            ``settings['filename_snn']``.
        """

        if path is None:
            path = settings['path']
        if filename is None:
            filename = settings['filename_snn']
        filepath = os.path.join(path, filename + '.h5')

        echo("Saving model to {}...\n".format(filepath))
        self.snn.save(filepath, settings['overwrite'])
Beispiel #8
0
    def build(self, parsed_model, **kwargs):
        """
        Compile a spiking neural network to prepare for simulation.

        Written in pyNN (http://neuralensemble.org/docs/PyNN/).
        pyNN is a simulator-independent language for building neural network
        models. It allows running the converted net in a Spiking Simulator like
        Brian, NEURON, or NEST.

        During compilation, two lists are created and stored to disk:
        ``layers`` and ``connections``. Each entry in ``layers`` represents a
        population of neurons, given by a pyNN ``Population`` object. The
        neurons in these layers are connected by pyNN ``Projection`` s, stored
        in ``connections`` list.

        This compilation method performs the connection process between layers.
        This means, if the session was started with a call to ``sim.setup()``,
        the converted network can be tested right away, using the simulator
        ``sim``.

        However, when starting a new session (calling ``sim.setup()`` after
        conversion), the ``layers`` have to be reloaded from disk using
        ``load_assembly``, and the connections reestablished manually. This is
        implemented in ``run`` method, go there for details.
        See ``snntoolbox.core.pipeline.test_full`` about how to simulate after
        converting.

        Parameters
        ----------

        parsed_model: Keras model
            Parsed input model; result of applying
            ``model_lib.extract(input_model)`` to the ``input model``.
        """

        self.parsed_model = parsed_model

        echo('\n' + "Compiling spiking network...\n")

        self.add_input_layer(parsed_model.layers[0].batch_input_shape)

        # Iterate over layers to create spiking neurons and connections.
        for layer in parsed_model.layers:
            layer_type = layer.__class__.__name__
            if 'Flatten' in layer_type:
                continue
            echo("Building layer: {}\n".format(layer.name))
            self.add_layer(layer)
            if layer_type == 'Dense':
                self.build_dense(layer)
            elif layer_type == 'Convolution2D':
                self.build_convolution(layer)
            elif layer_type in {'MaxPooling2D', 'AveragePooling2D'}:
                self.build_pooling(layer)
            self.connect_layer()

        echo("Compilation finished.\n\n")
Beispiel #9
0
    def build(self, parsed_model, **kwargs):
        """Compile SNN to prepare for simulation with Brian2.

        Parameters
        ----------

        parsed_model: Keras model
            Parsed input model; result of applying
            ``model_lib.extract(input_model)`` to the ``input model``.
        """

        self.parsed_model = parsed_model

        echo('\n' + "Compiling spiking network...\n")

        self.add_input_layer(parsed_model.layers[0].batch_input_shape)

        # Iterate over hidden layers to create spiking neurons and store
        # connections.
        for layer in parsed_model.layers:
            layer_type = layer.__class__.__name__
            if 'Flatten' in layer_type:
                continue
            echo("Building layer: {}\n".format(layer.name))
            self.add_layer(layer)
            if layer_type == 'Dense':
                self.build_dense(layer)
            elif layer_type == 'Convolution2D':
                self.build_convolution(layer)
            elif layer_type in {'MaxPooling2D', 'AveragePooling2D'}:
                self.build_pooling(layer)

        echo("Compilation finished.\n\n")

        # Track the output layer spikes. Add monitor here if it was not already
        # appended above (because settings['verbose'] < 1)
        if len(self.spikemonitors) < len(self.layers):
            self.spikemonitors.append(self.sim.SpikeMonitor(self.layers[-1]))

        # Create snapshot of network
        self.store()
    def run(self, x_test=None, y_test=None, dataflow=None, **kwargs):
        """Simulate a SNN with LIF and Poisson input.

        Simulate a spiking network with leaky integrate-and-fire units and
        Poisson input, using mean pooling and a timestepped approach.

        If ``settings['verbose'] > 1``, the toolbox plots the spiketrains
        and spikerates of each neuron in each layer, for the first sample of
        the first batch of ``x_test``.

        This is somewhat costly in terms of memory and time, but can be useful
        for debugging the network's general functioning.

        Parameters
        ----------

        x_test: float32 array
            The input samples to test.
            With data of the form (channels, num_rows, num_cols),
            x_test has dimension (num_samples, channels*num_rows*num_cols)
            for a multi-layer perceptron, and
            (num_samples, channels, num_rows, num_cols) for a convolutional
            net.
        y_test: float32 array
            Ground truth of test data. Has dimension (num_samples, num_classes)
        dataflow : keras.DataFlowGenerator

        kwargs: Optional[dict]
            - s: Optional[dict]
                Settings. If not given, the ``snntoolobx.config.settings``
                dictionary is used.
            - path: Optional[str]
                Where to store the output plots. If no path given, this value is
                taken from the settings dictionary.

        Returns
        -------

        total_acc: float
            Number of correctly classified samples divided by total number of
            test samples.
        """

        import numpy as np
        from ann_architectures.imagenet.utils import preprocess_input
        from snntoolbox.core.util import get_activations_batch
        from snntoolbox.io_utils.plotting import output_graphs
        from snntoolbox.io_utils.plotting import plot_confusion_matrix
        from snntoolbox.io_utils.plotting import plot_error_vs_time
        from snntoolbox.io_utils.plotting import plot_input_image
        from snntoolbox.io_utils.plotting import plot_spikecount_vs_time

        s = kwargs['settings'] if 'settings' in kwargs else settings
        log_dir = kwargs['path'] if 'path' in kwargs \
            else s['log_dir_of_current_run']

        # Load neuron layers and connections if conversion was done during a
        # previous session.
        if self.get_output is None:
            echo("Restoring layer connections...\n")
            self.load()
            self.parsed_model = keras.models.load_model(os.path.join(
                s['path_wd'], s['filename_parsed_model']+'.h5'))

        si = s['sample_indices_to_test'] \
            if 'sample_indices_to_test' in s else []
        if not si == []:
            assert len(si) == s['batch_size'], dedent("""
                You attempted to test the SNN on a total number of samples that
                is not compatible with the batch size with which the SNN was
                converted. Either change the number of samples to test to be
                equal to the batch size, or convert the ANN again using the
                corresponding batch size.""")
            if x_test is None:
                # Probably need to turn off shuffling in ImageDataGenerator
                # for this to produce the desired samples.
                x_test, y_test = dataflow.next()
            x_test = np.array([x_test[i] for i in si])
            y_test = np.array([y_test[i] for i in si])

        # Divide the test set into batches and run all samples in a batch in
        # parallel.
        num_batches = int(np.floor(s['num_to_test'] / s['batch_size']))

        if s['verbose'] > 1:
            print("Starting new simulation...\n")
        # Allocate a list 'spiketrains_batch' with the following specification:
        # Each entry in ``spiketrains_batch`` contains a tuple
        # ``(spiketimes, label)`` for each layer of the network (for the first
        # batch only, and excluding ``Flatten`` layers).
        # ``spiketimes`` is an array where the last index contains the spike
        # times of the specific neuron, and the first indices run over the
        # number of neurons in the layer:
        # (batch_size, n_chnls, n_rows, n_cols, duration)
        # ``label`` is a string specifying both the layer type and the index,
        # e.g. ``'03Dense'``.
        if s['verbose'] > 2:
            spiketrains_batch = []
            for layer in self.snn.layers:
                if 'Flatten' in layer.name:
                    continue
                shape = list(layer.output_shape) + [int(s['duration']/s['dt'])]
                spiketrains_batch.append((np.zeros(shape, 'float32'),
                                          layer.name))
            # Allocate list for plotting the error vs simulation time
            err = []

        record_error_vs_time = False
        if record_error_vs_time:
            err = []

        truth = []
        guesses = []
        rescale_fac = 1
        num_classes = 0
        for batch_idx in range(num_batches):
            # Get a batch of samples
            if x_test is None:
                x_batch, y_batch = dataflow.next()
                imagenet = False
                if imagenet:  # Only for imagenet!
                    x_batch = preprocess_input(x_batch)
            else:
                batch_idxs = range(s['batch_size'] * batch_idx,
                                   s['batch_size'] * (batch_idx + 1))
                x_batch = x_test[batch_idxs, :]
                y_batch = y_test[batch_idxs, :]
            num_classes = y_batch.shape[1]

            # Either use Poisson spiketrains as inputs to the SNN, or take the
            # original data.
            if s['poisson_input']:
                # This factor determines the probability threshold for cells in
                # the input layer to fire a spike. Increasing ``input_rate``
                # increases the firing rate of the input and subsequent layers.
                rescale_fac = np.max(x_batch) * 1000 / (s['input_rate']*s['dt'])
            else:
                # Simply use the analog values of the original data as input.
                inp = x_batch

            # Reset network variables.
            self.sim.reset(self.snn.layers[-1])

            # Loop through simulation time.
            output = np.zeros((s['batch_size'], y_batch.shape[1]),
                              dtype='int32')
            num_timesteps = int(s['duration'] / s['dt'])
            if s['verbose'] > 2:
                total_spike_count_over_time = np.zeros((num_timesteps,
                                                        s['batch_size']))

            t_idx = 0
            for t in np.arange(0, s['duration'], s['dt']):
                if s['poisson_input']:
                    # Create poisson input.
                    spike_snapshot = \
                        np.random.random_sample(x_batch.shape) * rescale_fac
                    inp = (spike_snapshot <= np.abs(x_batch)).astype('float32')
                    # For BinaryNets, with input that is not normalized and not
                    # all positive, we stimulate with spikes of the same size
                    # as the maximum activation, and the same sign as the
                    # corresponding activation. Is there a better solution?
                    # inp *= np.max(x_batch) * np.sign(x_batch)
                # Main step: Propagate poisson input through network and record
                # output spikes.
                if s['online_normalization']:
                    out_spikes, ts, total_spike_count, thresh, max_spikerate, \
                        spiketrain = self.get_output(inp, float(t))
                    print('Time: {:.2f}, thresh: {:.2f},'
                          ' max_spikerate: {:.2f}'.format(
                            float(np.array(ts)),
                            float(np.array(thresh)),
                            float(np.array(max_spikerate))))
                else:
                    out_spikes, ts, total_spike_count = \
                        self.get_output(inp, float(t))
                # Count number of spikes in output layer during whole
                # simulation.
                output += out_spikes.astype('int32')
                # For the first batch only, record the spiketrains of each
                # neuron in each layer.
                verb = 1 if record_error_vs_time else 2
                if batch_idx == 0 and s['verbose'] > verb:
                    j = 0
                    for i, layer in enumerate(self.snn.layers):
                        if 'Flatten' in self.snn.layers[i].name:
                            continue
                        spiketrains_batch[j][0][Ellipsis, t_idx] = \
                            layer.spiketrain.get_value()
                        j += 1
                    total_spike_count_over_time[t_idx] = \
                        np.array(total_spike_count)
                    # Get result by comparing the guessed class (i.e. the index
                    # of the neuron in the last layer which spiked most) to the
                    # ground truth.
                    truth_tmp = np.argmax(y_batch, axis=1)
                    guesses_tmp = np.argmax(output, axis=1)
                    err.append(np.mean(truth_tmp != guesses_tmp))
                t_idx += 1
                if s['verbose'] > 1:
                    echo('.')

            if s['verbose'] > 0:
                echo('\n')
                echo("Batch {} of {} completed ({:.1%})\n".format(
                    batch_idx + 1, num_batches, (batch_idx + 1) / num_batches))
                truth += list(np.argmax(y_batch, axis=1))
                guesses += list(np.argmax(output, axis=1))
                avg = np.mean(np.array(truth) == np.array(guesses))
                echo("Moving average accuracy: {:.2%}.\n".format(avg))
                with open(os.path.join(log_dir, 'accuracy.txt'), 'w') as f:
                    f.write("Moving average accuracy after batch " +
                            "{} of {}: {:.2%}.\n".format(batch_idx + 1,
                                                         num_batches, avg))

                if record_error_vs_time:
                    ann_err = self.ANN_err if hasattr(self, 'ANN_err') else None
                    plot_error_vs_time(err, ann_err=ann_err, path=log_dir)
                    with open(os.path.join(log_dir, 'err_vs_time.txt'), 'w') \
                            as f:
                        f.write(str(err))

                if batch_idx == 0 and s['verbose'] > 2:
                    plot_input_image(x_batch[0], int(np.argmax(y_batch[0])),
                                     log_dir)
                    ann_err = self.ANN_err if hasattr(self, 'ANN_err') else None
                    plot_error_vs_time(err, ann_err=ann_err, path=log_dir)
                    with open(os.path.join(log_dir, 'err_vs_time.txt'), 'w') \
                            as f:
                        f.write(str(err))
                    plot_spikecount_vs_time(total_spike_count_over_time,
                                            log_dir)
                    plot_confusion_matrix(truth, guesses, log_dir)
                    activations_batch = get_activations_batch(
                        self.parsed_model, x_batch)
                    output_graphs(spiketrains_batch, activations_batch, log_dir)
                    # del spiketrains_batch

                save_activations = True
                if save_activations:
                    print("Saving activations")
                    activations_batch = get_activations_batch(
                        self.parsed_model, x_batch)
                    path = os.path.join(log_dir, 'activations')
                    if not os.path.isdir(path):
                        os.makedirs(path)
                    np.savez_compressed(os.path.join(path, str(batch_idx)),
                                        activations=activations_batch,
                                        spiketrains=spiketrains_batch)

        count = np.zeros(num_classes)
        match = np.zeros(num_classes)
        for gt, p in zip(truth, guesses):
            count[gt] += 1
            if gt == p:
                match[gt] += 1
        avg_acc = np.mean(match / count)
        total_acc = np.mean(np.array(truth) == np.array(guesses))
        if s['verbose'] > 2:
            plot_confusion_matrix(truth, guesses, log_dir)
        echo("Simulation finished.\n\n")
        echo("Total accuracy: {:.2%} on {} test samples.\n\n".format(total_acc,
             len(guesses)))
        echo("Accuracy averaged over classes: {}".format(avg_acc))

        return total_acc
Beispiel #11
0
    def run(self, x_test, y_test, **kwargs):
        """Simulate a spiking network with IF units and Poisson input in pyNN.

        Simulate a spiking network with IF units and Poisson input in pyNN,
        using a simulator like Brian, NEST, NEURON, etc.

        This function will randomly select ``settings['num_to_test']`` test
        samples among ``x_test`` and simulate the network on those.

        Alternatively, a list of specific input samples can be given to the
        toolbox GUI, which will then be used for testing.

        If ``settings['verbose'] > 1``, the simulator records the
        spiketrains and membrane potential of each neuron in each layer, for
        the last sample.

        This is somewhat costly in terms of memory and time, but can be useful
        for debugging the network's general functioning.

        Parameters
        ----------

        x_test : float32 array
            The input samples to test. With data of the form
            (channels, num_rows, num_cols), x_test has dimension
            (num_samples, channels*num_rows*num_cols) for a multi-layer
            perceptron, and (num_samples, channels, num_rows, num_cols) for a
            convolutional net.
        y_test : float32 array
            Ground truth of test data. Has dimension (num_samples, num_classes)
        kwargs: Optional[dict]
            - s: Optional[dict]
                Settings. If not given, the ``snntoolobx.config.settings``
                dictionary is used.
            - path: Optional[str]
                Where to store the output plots. If no path given, this value is
                taken from the settings dictionary.

        Returns
        -------

        total_acc : float
            Number of correctly classified samples divided by total number of
            test samples.
        """

        from snntoolbox.io_utils.plotting import plot_confusion_matrix

        s = kwargs['settings'] if 'settings' in kwargs else settings
        log_dir = kwargs['path'] if 'path' in kwargs \
            else s['log_dir_of_current_run']

        # Load input layer
        input_layer = None
        for obj in self.snn.objects:
            if 'poissongroup' in obj.name and 'thresholder' not in obj.name:
                input_layer = obj
        assert input_layer, "No input layer found."

        # Update parameters
        namespace = {
            'v_thresh': s['v_thresh'] * self.sim.volt,
            'v_reset': s['v_reset'] * self.sim.volt,
            'tau_m': s['tau_m'] * self.sim.ms
        }
        results = []
        guesses = []
        truth = []

        # Iterate over the number of samples to test
        for test_num in range(s['num_to_test']):
            # If a list of specific input samples is given, iterate over that,
            # and otherwise pick a random test sample from among all possible
            # input samples in x_test.
            si = s['sample_indices_to_test']
            ind = randint(0, len(x_test) - 1) if si == [] else si[test_num]

            # Add Poisson input.
            if s['verbose'] > 1:
                echo("Creating poisson input...\n")
            input_layer.rates = x_test[ind, :].flatten() * s['input_rate'] * \
                self.sim.Hz

            # Run simulation for 'duration'.
            if s['verbose'] > 1:
                echo("Starting new simulation...\n")
            self.snn.store()
            self.snn.run(s['duration'] * self.sim.ms, namespace=namespace)

            # Get result by comparing the guessed class (i.e. the index of the
            # neuron in the last layer which spiked most) to the ground truth.
            guesses.append(np.argmax(self.spikemonitors[-1].count))
            truth.append(np.argmax(y_test[ind, :]))
            results.append(guesses[-1] == truth[-1])

            if s['verbose'] > 0:
                echo("Sample {} of {} completed.\n".format(
                    test_num + 1, s['num_to_test']))
                echo("Moving average accuracy: {:.2%}.\n".format(
                    np.mean(results)))

            if s['verbose'] > 1 and test_num == s['num_to_test'] - 1:
                echo("Simulation finished. Collecting results...\n")
                self.collect_plot_results(x_test[ind:ind + s['batch_size']],
                                          test_num)

            # Reset simulation time and recorded network variables for next
            # run.
            if s['verbose'] > 1:
                echo("Resetting simulator...\n")
            # Skip during last run so the recorded variables are not discarded
            if test_num < s['num_to_test'] - 1:
                self.snn.restore()
            if s['verbose'] > 1:
                echo("Done.\n")

        if s['verbose'] > 1:
            plot_confusion_matrix(truth, guesses, log_dir)

        total_acc = np.mean(results)
        ss = '' if s['num_to_test'] == 1 else 's'
        echo("Total accuracy: {:.2%} on {} test sample{}.\n\n".format(
            total_acc, s['num_to_test'], ss))

        self.snn.restore()

        return total_acc
Beispiel #12
0
    def run(self, x_test, y_test, kwargs):
        """Simulate a spiking network with IF units and Poisson input in pyNN.

        Simulate a spiking network with IF units and Poisson input in pyNN,
        using a simulator like Brian, NEST, NEURON, etc.

        This function will randomly select ``settings['num_to_test']`` test
        samples among ``x_test`` and simulate the network on those.

        Alternatively, a list of specific input samples can be given to the
        toolbox GUI, which will then be used for testing.

        If ``settings['verbose'] > 1``, the simulator records the
        spiketrains and membrane potential of each neuron in each layer, for
        the last sample.

        This is somewhat costly in terms of memory and time, but can be useful
        for debugging the network's general functioning.

        Parameters
        ----------

        x_test: float32 array
            The input samples to test. With data of the form
            (channels, num_rows, num_cols), x_test has dimension
            (num_samples, channels*num_rows*num_cols) for a multi-layer
            perceptron, and (num_samples, channels, num_rows, num_cols) for a
            convolutional net.
        y_test: float32 array
            Ground truth of test data. Has dimension (num_samples, num_classes)
        kwargs: Optional[dict]
            - s: Optional[dict]
                Settings. If not given, the ``snntoolobx.config.settings``
                dictionary is used.
            - path: Optional[str]
                Where to store the output plots. If no path given, this value is
                taken from the settings dictionary.

        Returns
        -------

        total_acc: float
            Number of correctly classified samples divided by total number of
            test samples.
        """

        import keras
        from snntoolbox.io_utils.plotting import plot_confusion_matrix

        s = kwargs['settings'] if 'settings' in kwargs else settings
        log_dir = kwargs['path'] if 'path' in kwargs \
            else s['log_dir_of_current_run']

        # Setup pyNN simulator if it was not passed on from a previous session.
        if len(self.layers) == 0:
            echo("Restoring layer connections...\n")
            self.load()
            self.parsed_model = keras.models.load_model(
                os.path.join(s['path'], s['filename_parsed_model'] + '.h5'))

        # Set cellparameters of neurons in each layer and initialize membrane
        # potential.
        for layer in self.layers[1:]:
            layer.set(**self.cellparams)
            layer.initialize(v=self.layers[1].get('v_rest'))
        # The spikes of the last layer are recorded by default because they
        # contain the networks output (classification guess).
        self.layers[-1].record(['spikes'])

        results = []
        guesses = []
        truth = []

        # Iterate over the number of samples to test
        for test_num in range(s['num_to_test']):
            # Specify variables to record. For performance reasons, record
            # spikes and potential only for the last test sample. Have to
            # reload network in order to tell the layers to record new
            # variables.
            if s['verbose'] > 1 and test_num == s['num_to_test'] - 1:
                if s['num_to_test'] > 1:
                    echo("For last run, record spike rates and membrane " +
                         "potential of all layers.\n")
                    self.load()
                self.layers[0].record(['spikes'])
                for layer in self.layers[1:]:
                    layer.set(**self.cellparams)
                    layer.initialize(v=self.layers[1].get('v_rest'))
                    if s['verbose'] == 3:
                        layer.record(['spikes', 'v'])
                    else:
                        layer.record(['spikes'])

            # If a list of specific input samples is given, iterate over that,
            # and otherwise pick a random test sample from among all possible
            # input samples in x_test.
            si = s['sample_indices_to_test']
            ind = randint(0, len(x_test) - 1) if si == [] else si[test_num]

            # Add Poisson input.
            if s['verbose'] > 1:
                echo("Creating poisson input...\n")
            rates = x_test[ind, :].flatten()
            for (i, ss) in enumerate(self.layers[0]):
                ss.rate = rates[i] * s['input_rate']

            # Run simulation for 'duration'.
            if s['verbose'] > 1:
                echo("Starting new simulation...\n")
            self.sim.run(s['duration'])

            # Get result by comparing the guessed class (i.e. the index of the
            # neuron in the last layer which spiked most) to the ground truth.
            output = [
                len(spiketrain) for spiketrain in
                self.layers[-1].get_data().segments[-1].spiketrains
            ]
            guesses.append(np.argmax(output))
            truth.append(np.argmax(y_test[ind, :]))
            results.append(guesses[-1] == truth[-1])

            if s['verbose'] > 0:
                echo("Sample {} of {} completed.\n".format(
                    test_num + 1, s['num_to_test']))
                echo("Moving average accuracy: {:.2%}.\n".format(
                    np.mean(results)))

            if s['verbose'] > 1 and test_num == s['num_to_test'] - 1:
                echo("Simulation finished. Collecting results...\n")
                self.collect_plot_results(x_test[ind:ind + s['batch_size']],
                                          test_num)

            # Reset simulation time and recorded network variables for next run
            if s['verbose'] > 1:
                echo("Resetting simulator...\n")
            self.sim.reset()
            if s['verbose'] > 1:
                echo("Done.\n")

        if s['verbose'] > 1:
            plot_confusion_matrix(truth, guesses, log_dir)

        total_acc = np.mean(results)
        ss = '' if s['num_to_test'] == 1 else 's'
        echo("Total accuracy: {:.2%} on {} test sample{}.\n\n".format(
            total_acc, s['num_to_test'], ss))

        return total_acc