Esempio n. 1
0
    def _check_state_var_recordings():
        """Assert that the option state_var_recordings is properly defined.

        Args:

        Returns:
        """
        for i, tup in enumerate(config.state_var_recordings):
            if not isinstance(tup, tuple) or len(tup) != 6:
                err_msg = 'Option \'state_var_recordings\' should be ' \
                              + ' a list of tuples of size 6.'
                raise ConfigException(err_msg)

            t, l, var, inds, _, _ = tup
            types = ['ne', 'ni', 'ee', 'ei', 'ie']
            if not (t in types and isinstance(l, int) and l >= 0 \
                    and (isinstance(var, str) or isinstance(var, list)) \
                    and (isinstance(inds, (bool, int)) \
                         or isinstance(inds, list))):
                err_msg = 'The tuple %s from option ' % (str(tup))\
                          + '\'state_var_recordings\' is not properly ' \
                          + 'formated.'
                raise ConfigException(err_msg)

            if isinstance(var, str):
                config.state_var_recordings[i] = \
                    utils.set_tuple_item(tup, 2, [var])
            if isinstance(inds, int) and not isinstance(inds, bool):
                config.state_var_recordings[i] = \
                    utils.set_tuple_item(tup, 3, [inds])
Esempio n. 2
0
    def _check_spike_event_recordings():
        """Assert that the option spike_event_recordings is properly defined.

        Args:

        Returns:
        """
        for i, tup in enumerate(config.spike_event_recordings):
            if not isinstance(tup, tuple) or len(tup) != 4:
                err_msg = 'Option \'spike_event_recordings\' should be ' \
                              + ' a list of tuples of size 4.'
                raise ConfigException(err_msg)

            t, l, var, _ = tup
            if not (t in ['ne', 'ni'] and isinstance(l, int) and l >= 0 \
                    and (var is None or isinstance(var, str)
                         or isinstance(var, list))):
                err_msg = 'The tuple %s from option ' % (str(tup))\
                          + '\'spike_event_recordings\' is not properly ' \
                          + 'formated.'
                raise ConfigException(err_msg)

            if isinstance(var, str):
                config.spike_event_recordings[i] = \
                    utils.set_tuple_item(tup, 2, [var])
Esempio n. 3
0
 def vars_exists(tup, layer, source, var, inds=None):
     if var is not None and isinstance(var, list):
         for v in var:
             if not hasattr(source, v):
                 print(source.get_states().keys())
                 raise ConfigException('Variable ' + v + ' does not ' \
                                       + 'exist from recording %s.' \
                                       % (str(tup)))
     if inds is not None and isinstance(inds, list):
         for i in inds:
             if not (i >= 0 and i < source.N):
                 raise ConfigException('Recording %s cannot have ' \
                               % (str(tup)) + 'index %d.' % (i))
Esempio n. 4
0
    def __init__(self, network, patterns):
        """Induce pattern into network as specified in config.

        Args:
            network: An instance of the class NetworkModel.
            patterns: An instance of the class PatternGeneration.

        Returns:
        """
        super().__init__()

        self._network = network
        self._patterns = patterns
        self._eq_state = EqStateVars()

        self._active_output = None

        if config.pattern_induction_method == 'simple':
            self._simple_induction()
        elif config.pattern_induction_method == 'impact_modulation':
            self._impact_modulation_induction()
        else:
            raise ConfigException('The chosen induction_method \'%s\' is ' \
                                  % (config.pattern_induction_method) + \
                                  'unknown. Please reconsider the option ' \
                                  + '\'pattern_induction_method\'.')

        self._patterns.register(self)
        self._eq_state.register(self)
Esempio n. 5
0
    def _impact_modulation_induction(self):
        """In this induction method, the pattern scales the influence of
        synaptic modification on the postsynaptic potential according to the
        pattern strength. The exact way of modification is defined by the user
        due to the variable 'patmod'.

        This method simply ensures, that the variable 'patmod' is part of at
        least one synaptic state.

        Note, that the default value for patmod is set in the constructor of
        EqStateVars.

        Args:

        Returns:
        """
        patmod_considered = False
        for l in range(1, self._network.num_layers):
            _, _, ei, ie, ee = self._network.brian_objects(l)
            for synapses in [ei,ie,ee]:
                if 'patmod' in synapses.get_states().keys():
                    patmod_considered = True
                    break
            if patmod_considered:
                break
        if not patmod_considered:
            error_msg = 'Induction method %s requires consideration of ' \
                    % (config.pattern_induction_method) + 'synaptic variable'\
                    + ' \'patmod\'.'
            raise ConfigException(error_msg)
Esempio n. 6
0
    def apply_layer_wise_decay(self):
        """The influence of the pattern on lower layers should not be as strong
        as its influence on higher layers. Otherwise, the network is unlikely
        to learn the input distribution. Therefore, this method adopts the
        decay as specified by config.pattern_layer_influence.

        Args:

        Returns:
        """
        if self._layer_wise_decay_applied:
            logger.warning("Layer-wise decay on patterns has been applied ' \
                           + 'more than once.")

        self._layer_wise_decay_applied = True

        influence = config.pattern_layer_influence
        if isinstance(influence, str):
            if influence == 'linear':
                influence = lambda l, L: l / (L - 1)
            elif influence == 'quadratic':
                influence = lambda l, L: (l / (L - 1))**2
            else:
                raise ConfigException('Method ' + influence + 'unknown for ' \
                                      + 'config option \'' \
                                      + 'pattern_layer_influence\'.')

        for i, pattern in enumerate(self._patterns):
            scaler = influence(i + 1, self._network.num_layers)
            self._patterns[i] = pattern * scaler

        # Notify observers.
        self.update_observers(self.__class__.__name__)
Esempio n. 7
0
    def __init__(self, network):
        """Initiate the pattern generation according to the user configs.

        Args:
            network: An instance of the class NetworkModel, that fully defines
                the number of excitatory neurons per layer and there
                connectivity.

        Returns:
        """
        super().__init__()

        self._network = network

        self._num_patterns = network.data.output_size

        # Will contain a numpy matrix of size layer_size_i x num_patterns for
        # each layer i starting at i equals 1 (omit input layer).
        self._patterns = []

        # For how many layers do we need to generate a pattern? For all hidden
        # layers plus the output layer. But if we do not want to use a
        # classifier to readout the output, then we simply use an orthogonal
        # pattern for the output. Hence, in this case we only want to generate
        # a pattern for all hidden layers according to the config and use an
        # fully orthogonal pattern for the output.
        self._patterns_to_generate = network.num_layers - 2  # num hidden layers
        if config.output_size is not None:
            self._patterns_to_generate += 1

        if config.pattern_type == 'random':
            self._random_pattern()
        elif config.pattern_type == 'sparse_random':
            self._sparse_random_pattern()
        elif config.pattern_type == 'dense_orthogonal':
            self._dense_orthogonal_pattern()
        elif config.pattern_type == 'sparse_blurred':
            self._sparse_blurred_pattern()
        elif config.pattern_type == 'sparse_threshold':
            self._sparse_threshold_pattern()
        elif config.pattern_type == 'max_mutual_equidistant':
            self._max_mutual_equidistant_pattern()
        else:
            raise ConfigException('The chosen pattern type \'%s\' is ' \
                                  % (config.pattern_type) + 'unknown. Please' \
                                  + ' reconsider the option \'pattern_type\'.')

        # Add pattern of output layer pattern (identity matrix), if no extra
        # readout classifier shall be used.
        if config.output_size is None:
            self._patterns.append(
                np.identity(self._num_patterns, dtype=np.float32))

        # This flag shall make sure, that we do not unintentionally scale the
        # pattern such that it adopts its layer-wise influence more than once.
        self._layer_wise_decay_applied = False
Esempio n. 8
0
    def _check_population_rate_recordings():
        """Assert that the option population_rate_recordings is properly
        defined.

        Args:

        Returns:
        """
        for tup in config.population_rate_recordings:
            if not isinstance(tup, tuple) or len(tup) != 5:
                err_msg = 'Option \'population_rate_recordings\' should be ' \
                              + ' a list of tuples of size 5.'
                raise ConfigException(err_msg)

            t, l, _, _, _ = tup
            if not (t in ['ne', 'ni'] and isinstance(l, int) and l >= 0):
                err_msg = 'The tuple %s from option ' % (str(tup))\
                          + '\'population_rate_recordings\' is not properly ' \
                          + 'formated.'
                raise ConfigException(err_msg)
def _transform_to_array(eq_module, variables, size):
    """Transform the variables, specified by a set of strings, into arrays if
    they are single value.

    Args:
        eq_module: A module, that contains global variables, that can be
            accessed.
        variables: A set of strings, specifying variables in the module
            eq_module.
        size: The size of the arrays to generate.

    Returns:
    """
    for var_name in variables:
        assert (hasattr(eq_module, var_name))
        var = getattr(eq_module, var_name)
        if isinstance(var, list):
            if len(var) != size:
                raise ConfigException('Wrongly configured equations. The ' \
                                      + 'attribute %s has not the correct ' \
                                      % (var_name) + 'length.')
        else:
            setattr(eq_module, var_name, [var] * size)
Esempio n. 10
0
    def _max_mutual_equidistant_pattern(self):
        """Generate patterns, such that the mutual euclidean distance between
        patterns is maximized.

        The method utilizes the construction of a simplex with mutual
        equidistant points.

        Args:

        Returns:
        """
        for i in range(1, 1 + self._patterns_to_generate):
            n = self._network.layer_size(i)
            m = self._num_patterns

            if m > n + 1:
                raise ConfigException('The pattern ' \
                                      + '\'_max_mutual_equidistant\' does ' \
                                      + 'not allow hidden layers with more ' \
                                      + 'than one additional neuron compared' \
                                      + 'to the output layer.')

            ap = simplex.get_unit_simplex(n, m)
            self._patterns.append(ap)
    def __init__(self, data):
        """Builds the network.

        Args:
            data: An instance of the class Dataset. The constructor actually
                only needs to know the input and output number of neurons from
                the dataset. However, we directly pass the whole Dataset
                instance, such that we don't have to pass it anymore in future
                function calls.

        Returns:
        """
        self._data = data

        # Determine number of hidden layers.
        num_hlayers = config.num_hidden_layers

        if isinstance(config.hidden_layer_sizes, list):
            num_hlayers = len(config.hidden_layer_sizes)

        # Check if configurations are consistent.
        if isinstance(config.lateral_inhibition_window, list) \
           and len(config.lateral_inhibition_window) != num_hlayers + 1:
            raise ConfigException('The length of the option list ' \
                                  + '\'lateral_inhibition_window\' does not' \
                                  + ' match the number of layers specified.')

        # Determine size of each layer.
        self._layer_sizes = []

        self._layer_sizes.append(data.input_size)
        if isinstance(config.hidden_layer_sizes, list):
            self._layer_sizes.extend(config.hidden_layer_sizes)
        else:
            self._layer_sizes.extend([config.hidden_layer_sizes] * num_hlayers)

        # Output layer size. The output layer has either as size the number of
        # classes or the user-defined size (if specified).
        if config.output_size is None:
            self._layer_sizes.append(data.output_size)
        else:
            self._layer_sizes.append(config.output_size)

        # Check and prepare equations
        equation_preparation.prepare_equations(num_hlayers+1)

        # To compute firing rates of neurons, we have to store the spike counts
        # of neurons before the input rates have been changed (see above
        # comment). The input layer can be ommitted.
        self._exc_prev_spike_count = []
        self._inh_prev_spike_count = [None]
        for i in range(len(self._layer_sizes)):
            self._exc_prev_spike_count.append(np.zeros(self._layer_sizes[i],
                                                       dtype=np.int64))
            if i > 0:
                self._inh_prev_spike_count.append( \
                    np.zeros(self._layer_sizes[i], dtype=np.int64))

        # In order to compute firing rates, we need to know the time difference
        # between the current time and a reference time.
        self._prev_simulation_time = np.float32(b2.defaultclock.t_)
        assert(self._prev_simulation_time == 0)

        # Layer-wise SpikeMonitors for exc. neurons.
        self._exc_spike_monitors = []
        # Layer-wise SpikeMonitors for inh. neurons (None for input layer).
        self._inh_spike_monitors = []
        # Excitatory NeuronGroup of each layer.
        self._exc_layer = []
        # Inhibitory NeuronGroup of each layer (will be None for input layer).
        self._inh_layer = []
        # Feed-forward connections from excitatory neurons of one layer to the
        # next one (fully-connected).
        self._ee_synapses = []
        # Excitatory to inhibitory connections within layer.
        self._ei_synapses = []
        # Inhibitory to excitatory connections within layer.
        self._ie_synapses = []

        ### Input Layer
        # The input of the network will be a Poisson Layer.
        self._input_group = b2.NeuronGroup(self._layer_sizes[0], 'rates : Hz',
                                           threshold='rand()<rates*dt',
                                           name='neurons_poisson_0')
        self._exc_layer.append(self._input_group)
        exc_sm_args, _ = Recordings.get_spike_monitor_args(0)
        self._exc_spike_monitors.append(b2.SpikeMonitor(self._input_group, \
            variables=exc_sm_args[0], record=exc_sm_args[1]))
        self._inh_layer.append(None)
        self._inh_spike_monitors.append(None)

        # There are no recurrent connections within the input layer.
        self._ei_synapses.append(None)
        self._ie_synapses.append(None)

        ### Hidden Layer + Output Layer
        # We can generate a seperate threade to setup each layer, as the setup
        # can be done independently.
        threads = []

        for i in range(num_hlayers + 1):
            if isinstance(config.lateral_inhibition_window, list):
                k = config.lateral_inhibition_window[i]
            else:
                k = config.lateral_inhibition_window

            threads.append(HiddenLayer(self._layer_sizes[i+1], i+1,  k,
                                       str(i+1)))

        if config.num_threads > 1:
            logger.warning('Multithreading during Network Initialization' + \
                           ' has been disabled due to known issues.')
        thread_chunks = utils.yield_chunks(threads, 1)
        #thread_chunks = utils.yield_chunks(threads, config.num_threads)

        for tc in thread_chunks:
            logger.debug('Starting threads to create %d layer/s in parallel.' \
                         % (len(tc)))
            for thread in tc:
                thread.start()

            for thread in tc:
                thread.join()

                exn = thread.exc_neurons
                inn = thread.inh_neurons
                eis = thread.ei_synapses
                ies = thread.ie_synapses

                l = len(self._exc_spike_monitors)
                exc_sm_args, inh_sm_args = Recordings.get_spike_monitor_args(l)

                self._exc_layer.append(exn)
                self._exc_spike_monitors.append(b2.SpikeMonitor(exn, \
                    variables=exc_sm_args[0], record=exc_sm_args[1]))
                self._inh_layer.append(inn)
                self._inh_spike_monitors.append(b2.SpikeMonitor(inn, \
                    variables=inh_sm_args[0], record=inh_sm_args[1]))

                self._ei_synapses.append(eis)
                self._ie_synapses.append(ies)

        ### Connect layers.
        for i in range(self.num_layers - 1):
            # Connect excitatory neurons of layer i with those of later i+1.
            eq = config._equation_module
            ees = bw.synapses(self._exc_layer[i], self._exc_layer[i+1],
                              eq.ee_model[i], eq.ee_method[i], eq.ee_on_pre[i],
                              eq.ee_on_post[i], eq.ee_delay[i],
                              eq.ee_namespace[i],
                              eq.ee_initialization[i],
                              name='synapses_ee_'+str(i+1),
                              connections=None, # Fully-connected
                              layer=i+1)
            self._ee_synapses.append(ees)

        ### Create the Brian simluation control center (Network)
        self._network = b2.Network()
        # Add all components to the network.
        self._network.add(self._exc_layer)
        self._network.add(self._inh_layer[1:])
        self._network.add(self._exc_spike_monitors)
        self._network.add(self._inh_spike_monitors[1:])
        self._network.add(self._ee_synapses)
        self._network.add(self._ei_synapses[1:])
        self._network.add(self._ie_synapses[1:])
        # Double-check correctness if one changes the code!
        #print(self._network.objects)

        # FIXME delete assertions
        assert(len(self._exc_layer) == self.num_layers)
        assert(len(self._inh_layer) == self.num_layers)
        assert(len(self._exc_spike_monitors) == self.num_layers)
        assert(len(self._inh_spike_monitors) == self.num_layers)
        assert(len(self._ei_synapses) == self.num_layers)
        assert(len(self._ie_synapses) == self.num_layers)

        self._eq_state = EqStateVars()
        self._eq_state.register(self)
Esempio n. 12
0
 def layer_exists(tup, layer):
     if not (layer >= 0 and layer < network.num_layers):
         raise ConfigException('Recording %s has non-existing layer.' \
                               % (str(tup)))
    random.seed(config.random_seed)
    bseed(config.random_seed)  # See brian docu for explanation.

    # Determine maximum number of threads to use.
    if config.num_threads is None:
        config.num_threads = multiprocessing.cpu_count()

    # Read the chosen dataset.
    logger.info('### Data preparation ...')
    if config.dataset == 'mnist':
        data = MNISTData()
    elif config.dataset == '7segment':
        data = SevenSegmentData()
    else:
        raise ConfigException('The chosen dataset \'%s\' is unknown. Please ' \
                              % config.dataset + 'reconsider the ''dataset''' \
                              + ' option of the configuration file.')
    logger.info('### Data preparation ... Done')

    # Assemble the network.
    logger.info('### Building Network ...')
    network = NetworkModel(data)
    logger.info('### Building Network ... Done')

    # Visualize just assembled network.
    if config.plot_network or config.save_network_plot:
        logger.info('### Generating Network Plot ...')
        draw_network.draw_network(network)
        logger.info('### Generating Network Plot ... Done')

    if config.induce_patterns:
def _check_state_vars(eq_module, variables):
    """Some state variables need extra treatment. This can be done in this
    method.

    - 'patmod': if 'impact_modulation' has been chosen as induction method,
      then this variable is required to effect synaptic dynamics in any way. If
      it is not included in any of the synaptic equations, a new equation is
      added to the presynaptic dynamics of exc.-exc. synapses.

    Args:
        eq_module: A module, that contains global variables, that can be
            accessed.
        variables: A set of strings, specifying variables in the module
            eq_module.

    Returns:
    """
    if config.induce_patterns and \
       config.pattern_induction_method == 'impact_modulation':
        # Check patmod.
        # No complex checking has been implemented. If check not possible, we
        # just abort it. It is later checked wether 'patmod' is part of at
        # least one synaptic state. If not, the program is cancelled.
        abort_check = False

        patmod_considered = False
        for var_name in variables:
            var = getattr(eq_module, var_name)
            for var_elem in var:
                if isinstance(var_elem, str):
                    if 'patmod' in var_elem:
                        patmod_considered = True
                        break
                elif isinstance(var_elem, (b2.Equations, dict)):
                    abort_check = True
                    break
            if patmod_considered or abort_check:
                break

        # The user has not considered patmod, therefore, it is additively
        # applied to the postsynaptic membrane here.
        if not patmod_considered and not abort_check:
            # Force default to be zero.
            config.impact_modulation_default = 0

            ee_model = getattr(eq_module, 'ee_model')
            ee_on_pre = getattr(eq_module, 'ee_on_pre')

            error_msg = 'Induction method %s requires consideration of ' \
                    % (config.pattern_induction_method) + 'synaptic variable'\
                    + ' \'patmod\'.'

            for i in range(len(ee_model)):
                eq = 'patmod : 1'
                if ee_model[i] is None:
                    ee_model[i] = eq
                elif isinstance(ee_model[i], b2.Equations):
                    raise ConfigException(error_msg)
                else:
                    ee_model[i] += '\n' + eq

            for i in range(len(ee_on_pre)):
                eq = 'v_post += patmod * volt'
                if ee_on_pre[i] is None:
                    ee_on_pre[i] = eq
                elif isinstance(ee_on_pre[i], dict):
                    raise ConfigException(error_msg)
                else:
                    ee_on_pre[i] += '\n' + eq

            setattr(eq_module, 'ee_model', ee_model)
            setattr(eq_module, 'ee_on_pre', ee_on_pre)

            logger.warning('Induction method %s has been chosen, but synaptic'\
                           % (config.pattern_induction_method) + ' variable ' \
                           + '\'patmod\' has not been introduced. Therefore, '\
                           + 'patterns do now increase the postsynpatic ' \
                           + 'potential on presynaptic spikes according to ' \
                           + 'their pattern weight.')
def synapses(source, target, model, method, on_pre, on_post, delay, namespace,
             initialization, name, connections=None, layer=None):
    """Create an instance of class Synapses.

    Note, all changes made later on (e.g., new connections after this method
    was called), will not be affected by the initialization.

    Args:
        target: See definition of class Synapses.
        source: See definition of class Synapses.
        model: See definition of class Synapses.
        method: See definition of class Synapses.
        on_pre: See definition of class Synapses.
        on_post: See definition of class Synapses.
        delay: See definition of class Synapses.
        initialization: A dictionary that maps from parameter names in the
            Synapses instance to values, that this parameter should be
            initialized with. (The values in the dict (if not single value)
            must match N_pre times N_post. Non-existing synapses will be
            ignored during parameter assignment.)
        namespace: See definition of class Synapses.
        name: See definition of class Synapses.
        connections: A tuple of two arrays corresponding to the parameters 'i'
            and 'j' in the method 'connect' of class Synapses. If None,
            full-connections are established.
        layer: The index of the current layer. If synapses are part of
            projections between consecutive layers, the value should be the
            index of the postsynaptic layer. If defined, a variable 'layer'
            with this index will be added to the namespace.

    Returns:
        An instance of class Synapses with initialized parameters as specified.
        If weights are not initialized due to the initialization dict, then a
        proper initialization method is chosen according to the config.
    """
    # We don't have to specify the layer for a Synapse, as it just uses the
    # variable from its postsynaptic NeuronGroup.
    #model, namespace = _add_layer(model, namespace, layer)

    synapses = b2.Synapses(source, target, model=model, method=method,
                           on_pre=on_pre, on_post=on_post, delay=delay,
                           namespace=namespace, name=name)

    # Connections must be established before weights can be initialized.
    # TODO instead of establishing connections here, give the user an wrapper
    # for the connect method, that applies initialization. (i.e., an own
    # Synapses class)
    if connections is None:
        synapses.connect()
    else:
        synapses.connect(i=connections[0], j=connections[1])

    init_weights = True

    if initialization is not None:
        for par, val in initialization.items():
            _initialize_parameter(synapses, par, val, (synapses.N_pre,
                                                       synapses.N_post))

        if 'w' in initialization.keys():
            init_weights = False

    if init_weights:
        if config.weight_initialization == 'uniform':
            val = lambda size: winit.uniform(size,
                                             wmin=config.winit_uniform_wmin,
                                             wmax=config.winit_uniform_wmax)
            _initialize_parameter(synapses, 'w', val, (synapses.N_pre,
                                                       synapses.N_post))
        else:
            raise ConfigException('The weight initialization method \'%s\' ' \
                                  % config.weight_initialization + 'is ' \
                                  + 'unknown')

    return synapses
Esempio n. 16
0
def evaluate_output_rates(rates,
                          labels,
                          num_classes,
                          tag,
                          store=True,
                          epoch=None):
    """Evaluate the firing rates of the output layer.

    If the output layer size has not been specified by the user, each output
    neuron corresponds to a different class and the maximum firing rate
    determines the predicted label. If the user wishes to use another readout
    mechanism on the customized output layer, this method is used instead. An
    overview of alternative readout methods is given in the config file.

    This method computes measures for multiclass classification problems, such
    as accuracy and f-scores. The scores are logged. If desired, the scores can
    also be stored in a file (with more details). The filename will be:
        'output_eval_<tag>.jsonl'

    Args:
        rates: A list of arrays. Each entry is an 1D array of spike counts
            (from the output layer).
        labels: A list of ground truth labels. Each entry corresponds to the
            rates given in the first parameter.
        num_classes: The number of output classes.
        tag: A name, that is associated with the data split, that is evaluated.
            This tag will define the filename. If an alternative readout
            classifier is used, this tag decides, wether this classifier has
            to be retrained.
        store: Whether to store the results or not. Results are appended as a
            single JSON object to a JSONL file.
        epoch: If specified, the epoch can later help to identify the results.

    Returns:
        The error (1-accuracy). (Or 1, if nothing can be evaluated)
    """

    y_true = []  # Gound truth
    y_pred = []  # Predicted sample

    rates_arr = np.asarray(rates)
    labels_arr = np.asarray(labels)

    # We want to ignore those samples, that elicited no output spiking. (This
    # is decided by the user, as he can tweak the simulation to continue as
    # long as no output spikes appeared).
    # I.e., if all output rates are zero, the sample is ignored.
    valid_rates = np.where(np.sum(rates_arr, axis=1) != 0)[0]
    ignored = labels_arr.size - valid_rates.size
    # Delete invalid samples.
    rates_arr = rates_arr[valid_rates]
    labels_arr = labels_arr[valid_rates]
    # Simple renaming.
    y_true = labels_arr

    # Helper functions to print results.
    _perc = lambda x: round(100 * x, 2)
    _sperc = lambda x: '%.2f%%' % (_perc(x))

    if ignored > 0:
        logger.warning('%d (%s) samples have been ignored during ' \
                       % (ignored, _sperc(ignored/len(rates))) \
                       + 'evaluation, since there has been no output ' \
                       + 'activity.')

    # Just a flag that is set to False for alternative classifiers, as they
    # provide less evaluation metrics. (I.e., some metrics cannot be calculated
    # and stored when using alternative classifiers).
    extra_scores = False

    # If we need no special readout. Meaning each output neuron simply
    # represents a class and no further logic is needed to classify the output.
    # Note, that this option provides several eval measures (such as
    # ambiguousness) that the alternative classifiers do not provide.
    if config.output_size is None:
        extra_scores = True

        # Predicted samples, where ambiguous outcomes are not necessarily
        # misclassification. I.e., if the correct class has one of the highest
        # outputs, it is considered as correct classification.
        y_pred_with_ambig = []
        confidence = []  # Confidence of correct output
        unambiguousness = []  # Normalized distance to second best prediction
        # Ambiguous samples are samples, that have multiple outputs with the
        # same maximum firing rate. They are considered as misclassifications.
        ambiguous = 0

        for i in range(rates_arr.shape[0]):
            frates = rates_arr[i, :]
            label = labels_arr[i]

            pred = np.argmax(frates)
            pred_with_ambig = pred
            # Ensure, that output is not ambiguous.
            counts = Counter(frates)
            if counts[frates[pred]] > 1:
                ambiguous += 1
                # Choose index, that is not the correct one, but has maximum
                # confidence. I.e., enforce misclassification.
                pred = np.argmax(
                    np.concatenate((frates[:label], [-1], frates[label + 1:])))
                assert (pred != label)

                if frates[pred] == frates[label]:
                    pred_with_ambig = label

            norm_frates = frates / np.linalg.norm(frates, 1)
            sec_best = np.argmax(
                np.concatenate((frates[:pred], frates[pred + 1:])))

            y_pred.append(pred)
            y_pred_with_ambig.append(pred_with_ambig)

            confidence.append(norm_frates[label])
            unambiguousness.append(norm_frates[pred] - norm_frates[sec_best])

        y_pred = np.asarray(y_pred)

    # Use an alternative readout classifier to evaluate the output rates.
    # Retrain the classifier if tag == 'training'.
    else:
        global CLASSIFIER

        # Retrain if necessary.
        if tag == 'training':
            logger.debug('Retraining readout classifier according to method:' \
                         + ' \'%s\'' % config.classification_method)
            if config.classification_method == 'highest_response':
                CLASSIFIER = HighestResponse()
                CLASSIFIER.fit(rates_arr, labels_arr, num_classes=num_classes)
            elif config.classification_method == 'svm':
                CLASSIFIER = SVM()
                CLASSIFIER.fit(rates_arr,
                               labels_arr,
                               C=config.svm_C,
                               kernel=config.svm_kernel)
            else:
                error_msg = 'Classification method \'%s\' is unknown. ' \
                        % config.classification_method
                raise ConfigException(error_msg)

        # Predict outcome for given rates.
        y_pred = CLASSIFIER.predict(rates_arr)

    if y_true.size == 0:
        return 1

    json_obj = dict()
    json_obj['timestamp'] = time.time()
    if epoch is not None:
        json_obj['epoch'] = epoch
    json_obj['num_samples'] = len(rates)
    json_obj['ignored'] = ignored
    json_obj['classification_method'] = None
    if config.output_size is not None:
        json_obj['classification_method'] = config.classification_method

    if extra_scores:
        json_obj['ambiguous'] = ambiguous

        if ambiguous > 0:
            logger.debug('%d (%s) samples had more than one output neuron with'\
                         % (ambiguous, _sperc(ambiguous/len(rates)))
                         + ' maximum confidence (ambiguous classification).')

    acc = accuracy_score(y_true, y_pred)
    json_obj['accuracy'] = acc
    logger.info('### %s accuracy: %s' % (tag, _sperc(acc)))

    if extra_scores and ambiguous > 0:
        acc_with_ambig = accuracy_score(y_true, y_pred_with_ambig)
        if acc_with_ambig != acc:
            json_obj['accuracy_with_ambiguous'] = acc_with_ambig
            logger.info('When ambiguous outcomes are allowed, the accuracy ' \
                        + 'would be: %s' % (_sperc(acc_with_ambig)))

    classes = list(range(num_classes))

    def _f_score(method):
        prec, rec, f1, _ = precision_recall_fscore_support(y_true, y_pred, \
            labels=classes, average=method)
        json_obj['prec_' + method] = prec
        json_obj['rec_' + method] = rec
        json_obj['f1_' + method] = f1
        return f1

    f1_micro = _f_score('micro')
    f1_macro = _f_score('macro')
    f1_weighted = _f_score('weighted')

    logger.info('Micro/Macro/Weighted - F-Scores: %.4f, %.4f, %.4f.' \
                % (round(f1_micro,4), round(f1_macro,4), round(f1_weighted,4)))

    # Label-wise f-scores.
    prec, rec, f1, supp = precision_recall_fscore_support(y_true, y_pred, \
        labels=classes, average=None)
    json_obj['labels'] = classes
    json_obj['prec'] = prec.tolist()
    json_obj['rec'] = rec.tolist()
    json_obj['f1'] = f1.tolist()
    json_obj['support'] = supp.tolist()

    # Prediction confidence and unambiguousness.
    if extra_scores:
        conf_mean = np.mean(confidence)
        conf_std = np.std(confidence)
        unambig_mean = np.mean(unambiguousness)
        unambig_std = np.std(unambiguousness)
        json_obj['confidence_mean'] = conf_mean
        json_obj['confidence_std'] = conf_std
        json_obj['unambiguousness_mean'] = unambig_mean
        json_obj['unambiguousness_std'] = unambig_std

        logger.info('Confidence for correct label [mean (std)]: %.4f (%.4f).' \
                    % (round(conf_mean, 4), round(conf_std, 4)))

        logger.info('Unambiguousness of the predictions (distance of best to ' \
                    + 'second-best prediction) [mean (std)]: %.4f (%.4f).' \
                    % (round(unambig_mean, 4), round(unambig_std, 4)))

    # Store results.
    if store:
        if not os.path.isdir(config.eval_dir):
            os.makedirs(config.eval_dir)
        filename = os.path.join(config.eval_dir,
                                'output_eval_' + tag + '.jsonl')

        with open(filename, 'a') as f:
            json_str = json.dumps(json_obj)
            f.write(json_str + '\n')
            f.flush()

        logger.debug('Appended output evaluations to %s.' % filename)

    return 1 - acc