def characterize_features(domain, experiment, occlusion=None, bars_type=None):
    """ Produces a graph of features averages and standard deviations.
    """
    features_prefix = constants.features_name(experiment, occlusion, bars_type)
    tf_filename = features_prefix + constants.testing_suffix

    labels_prefix = constants.labels_name
    tl_filename = labels_prefix + constants.testing_suffix

    features = get_all_data(tf_filename, domain)
    labels = get_all_data(tl_filename, 1)

    d = {}
    for i in constants.all_labels:
        d[i] = []

    for (i, feats) in zip(labels, features):
        # Separates features per label.
        d[i].append(feats)

    means = {}
    stdevs = {}
    for i in constants.all_labels:
        # The list of features becomes a matrix
        d[i] = np.array(d[i])
        means[i] = np.mean(d[i], axis=0)
        stdevs[i] = np.std(d[i], axis=0)

    plot_features_graph(domain, means, stdevs, experiment, occlusion,
                        bars_type)
def plot_features_graph(domain,
                        means,
                        stdevs,
                        experiment,
                        occlusion=None,
                        bars_type=None):
    """ Draws the characterist shape of features per label.

    The graph is a dots and lines graph with error bars denoting standard deviations.
    """
    ymin = np.PINF
    ymax = np.NINF
    for i in constants.all_labels:
        yn = (means[i] - stdevs[i]).min()
        yx = (means[i] + stdevs[i]).max()
        ymin = ymin if ymin < yn else yn
        ymax = ymax if ymax > yx else yx

    main_step = 100.0 / domain
    xrange = np.arange(0, 100, main_step)
    fmts = get_formats(constants.n_labels)

    for i in constants.all_labels:
        plt.clf()
        plt.figure(figsize=(12, 5))

        plt.errorbar(xrange,
                     means[i],
                     fmt=fmts[i],
                     yerr=stdevs[i],
                     label=str(i))
        plt.xlim(0, 100)
        plt.ylim(ymin, ymax)
        plt.xticks(xrange, labels='')

        plt.xlabel(_('Features'))
        plt.ylabel(_('Values'))
        plt.legend(loc='right')
        plt.grid(True)

        filename = constants.features_name(
            experiment, occlusion, bars_type) + '-' + str(i) + _('-english')
        plt.savefig(constants.picture_filename(filename), dpi=500)
def remember(experiment, occlusion = None, bars_type = None, tolerance = 0):
    """ Creates images from features.
    
    Uses the decoder part of the neural networks to (re)create images from features.

    Parameters
    ----------
    experiment : TYPE
        DESCRIPTION.
    occlusion : TYPE, optional
        DESCRIPTION. The default is None.
    tolerance : TYPE, optional
        DESCRIPTION. The default is 0.

    Returns
    -------
    None.

    """

    for i in range(constants.training_stages):
        testing_data_filename = constants.data_name + constants.testing_suffix
        testing_data_filename = constants.data_filename(testing_data_filename, i)
        testing_features_filename = constants.features_name(experiment, occlusion, bars_type) + constants.testing_suffix
        testing_features_filename = constants.data_filename(testing_features_filename, i)
        testing_labels_filename = constants.labels_name + constants.testing_suffix
        testing_labels_filename = constants.data_filename(testing_labels_filename, i)
        memories_filename = constants.memories_name(experiment, occlusion, bars_type, tolerance)
        memories_filename = constants.data_filename(memories_filename, i)
        labels_filename = constants.labels_name + constants.memory_suffix
        labels_filename = constants.data_filename(labels_filename, i)
        model_filename = constants.model_filename(constants.model_name, i)

        testing_data = np.load(testing_data_filename)
        testing_features = np.load(testing_features_filename)
        testing_labels = np.load(testing_labels_filename)
        memories = np.load(memories_filename)
        labels = np.load(labels_filename)
        model = tf.keras.models.load_model(model_filename)

        # Drop the classifier.
        autoencoder = Model(model.input, model.output[1])
        autoencoder.summary()

        # Drop the encoder
        input_mem = Input(shape=(constants.domain, ))
        decoded = get_decoder(input_mem)
        decoder = Model(inputs=input_mem, outputs=decoded)
        decoder.summary()

        for dlayer, alayer in zip(decoder.layers[1:], autoencoder.layers[11:]):
            dlayer.set_weights(alayer.get_weights())

        produced_images = decoder.predict(testing_features)
        n = len(testing_labels)

        Parallel(n_jobs=constants.n_jobs, verbose=5)( \
            delayed(store_images)(original, produced, constants.testing_directory(experiment, occlusion, bars_type), i, j, label) \
                for (j, original, produced, label) in \
                    zip(range(n), testing_data, produced_images, testing_labels))

        total = len(memories)
        steps = len(constants.memory_fills)
        step_size = int(total/steps)

        for j in range(steps):
            print('Decoding memory size ' + str(j) + ' and stage ' + str(i))
            start = j*step_size
            end = start + step_size
            mem_data = memories[start:end]
            mem_labels = labels[start:end]
            produced_images = decoder.predict(mem_data)

            Parallel(n_jobs=constants.n_jobs, verbose=5)( \
                delayed(store_memories)(label, produced, features, constants.memories_directory(experiment, occlusion, bars_type, tolerance), i, j) \
                    for (produced, features, label) in zip(produced_images, mem_data, mem_labels))
def test_recalling_fold(n_memories,
                        mem_size,
                        domain,
                        fold,
                        experiment,
                        occlusion=None,
                        bars_type=None,
                        tolerance=0):
    # Create the required associative memories.
    ams = dict.fromkeys(range(n_memories))
    for j in ams:
        ams[j] = AssociativeMemory(domain, mem_size, tolerance)

    suffix = constants.filling_suffix
    filling_features_filename = constants.features_name() + suffix
    filling_features_filename = constants.data_filename(
        filling_features_filename, fold)
    filling_labels_filename = constants.labels_name + suffix
    filling_labels_filename = constants.data_filename(filling_labels_filename,
                                                      fold)

    suffix = constants.testing_suffix
    testing_features_filename = constants.features_name(
        experiment, occlusion, bars_type) + suffix
    testing_features_filename = constants.data_filename(
        testing_features_filename, fold)
    testing_labels_filename = constants.labels_name + suffix
    testing_labels_filename = constants.data_filename(testing_labels_filename,
                                                      fold)

    filling_features = np.load(filling_features_filename)
    filling_labels = np.load(filling_labels_filename)
    testing_features = np.load(testing_features_filename)
    testing_labels = np.load(testing_labels_filename)

    filling_max = filling_features.max()
    testing_max = testing_features.max()
    fillin_min = filling_features.min()
    testing_min = testing_features.min()

    maximum = filling_max if filling_max > testing_max else testing_max
    minimum = fillin_min if fillin_min < testing_min else testing_min

    total = len(filling_features)
    percents = np.array(constants.memory_fills)
    steps = np.round(total * percents / 100.0).astype(int)

    stage_recalls = []
    stage_entropies = {}
    stage_mprecision = {}
    stage_mrecall = {}
    total_precisions = []
    total_recalls = []
    mismatches = []

    i = 0
    for j in range(len(steps)):
        k = steps[j]
        features = filling_features[i:k]
        labels = filling_labels[i:k]

        recalls, measures, entropies, total_precision, total_recall, mis_count = get_recalls(
            ams, mem_size, domain, minimum, maximum, features, labels,
            testing_features, testing_labels, fold)

        # A list of tuples (position, label, features)
        stage_recalls += recalls

        # An array with entropies per memory
        stage_entropies[j] = entropies

        # An array with precision per memory
        stage_mprecision[j] = measures[constants.precision_idx, :]

        # An array with recall per memory
        stage_mrecall[j] = measures[constants.recall_idx, :]

        #
        # Recalls and precisions per step
        total_recalls.append(total_recall)
        total_precisions.append(total_precision)

        i = k

        mismatches.append(mis_count)

    return fold, stage_recalls, stage_entropies, stage_mprecision, \
        stage_mrecall, np.array(total_precisions), np.array(
            total_recalls), np.array(mismatches)
def test_memories(domain, experiment):

    average_entropy = []
    stdev_entropy = []

    average_precision = []
    stdev_precision = []
    average_recall = []
    stdev_recall = []

    all_precision = []
    all_recall = []

    no_response = []
    no_correct_response = []
    no_correct_chosen = []
    correct_chosen = []
    total_responses = []

    labels_x_memory = constants.labels_per_memory[experiment]
    n_memories = int(constants.n_labels / labels_x_memory)

    for i in range(constants.training_stages):
        gc.collect()

        suffix = constants.filling_suffix
        training_features_filename = constants.features_name(
            experiment) + suffix
        training_features_filename = constants.data_filename(
            training_features_filename, i)
        training_labels_filename = constants.labels_name + suffix
        training_labels_filename = constants.data_filename(
            training_labels_filename, i)

        suffix = constants.testing_suffix
        testing_features_filename = constants.features_name(
            experiment) + suffix
        testing_features_filename = constants.data_filename(
            testing_features_filename, i)
        testing_labels_filename = constants.labels_name + suffix
        testing_labels_filename = constants.data_filename(
            testing_labels_filename, i)

        training_features = np.load(training_features_filename)
        training_labels = np.load(training_labels_filename)
        testing_features = np.load(testing_features_filename)
        testing_labels = np.load(testing_labels_filename)

        measures_per_size = np.zeros(
            (len(constants.memory_sizes), n_memories, constants.n_measures),
            dtype=np.float64)

        # An entropy value per memory size and memory.
        entropies = np.zeros((len(constants.memory_sizes), n_memories),
                             dtype=np.float64)
        behaviours = np.zeros(
            (len(constants.memory_sizes), constants.n_behaviours))

        print('Train the different co-domain memories -- NxM: ', experiment,
              ' run: ', i)
        # Processes running in parallel.
        list_measures_entropies = Parallel(
            n_jobs=constants.n_jobs,
            verbose=50)(delayed(get_ams_results)(
                midx, msize, domain, labels_x_memory, training_features,
                testing_features, training_labels, testing_labels)
                        for midx, msize in enumerate(constants.memory_sizes))

        for j, measures, entropy, behaviour in list_measures_entropies:
            measures_per_size[j, :, :] = measures.T
            entropies[j, :] = entropy
            behaviours[j, :] = behaviour

        ##########################################################################################

        # Calculate precision and recall

        precision = np.zeros((len(constants.memory_sizes), n_memories + 2),
                             dtype=np.float64)
        recall = np.zeros((len(constants.memory_sizes), n_memories + 2),
                          dtype=np.float64)

        for j, s in enumerate(constants.memory_sizes):
            precision[j, 0:n_memories] = measures_per_size[
                j, :, constants.precision_idx]
            precision[j, constants.mean_idx(n_memories)] = measures_per_size[
                j, :, constants.precision_idx].mean()
            precision[j, constants.std_idx(n_memories)] = measures_per_size[
                j, :, constants.precision_idx].std()
            recall[j, 0:n_memories] = measures_per_size[j, :,
                                                        constants.recall_idx]
            recall[j, constants.mean_idx(n_memories)] = measures_per_size[
                j, :, constants.recall_idx].mean()
            recall[j, constants.std_idx(n_memories)] = measures_per_size[
                j, :, constants.recall_idx].std()

        ###################################################################3##
        # Measures by memory size

        # Average entropy among al digits.
        average_entropy.append(entropies.mean(axis=1))
        stdev_entropy.append(entropies.std(axis=1))

        # Average precision as percentage
        average_precision.append(
            precision[:, constants.mean_idx(n_memories)] * 100)
        stdev_precision.append(precision[:, constants.std_idx(n_memories)] *
                               100)

        # Average recall as percentage
        average_recall.append(recall[:, constants.mean_idx(n_memories)] * 100)
        stdev_recall.append(recall[:, constants.std_idx(n_memories)] * 100)

        all_precision.append(behaviours[:, constants.precision_idx] * 100)
        all_recall.append(behaviours[:, constants.recall_idx] * 100)

        no_response.append(behaviours[:, constants.no_response_idx])
        no_correct_response.append(
            behaviours[:, constants.no_correct_response_idx])
        no_correct_chosen.append(behaviours[:,
                                            constants.no_correct_chosen_idx])
        correct_chosen.append(behaviours[:, constants.correct_response_idx])
        total_responses.append(behaviours[:, constants.mean_responses_idx])

    average_precision = np.array(average_precision)
    stdev_precision = np.array(stdev_precision)
    main_average_precision = []
    main_stdev_precision = []

    average_recall = np.array(average_recall)
    stdev_recall = np.array(stdev_recall)
    main_average_recall = []
    main_stdev_recall = []

    all_precision = np.array(all_precision)
    main_all_average_precision = []
    main_all_stdev_precision = []

    all_recall = np.array(all_recall)
    main_all_average_recall = []
    main_all_stdev_recall = []

    average_entropy = np.array(average_entropy)
    stdev_entropy = np.array(stdev_entropy)
    main_average_entropy = []
    main_stdev_entropy = []

    no_response = np.array(no_response)
    no_correct_response = np.array(no_correct_response)
    no_correct_chosen = np.array(no_correct_chosen)
    correct_chosen = np.array(correct_chosen)
    total_responses = np.array(total_responses)

    main_no_response = []
    main_no_correct_response = []
    main_no_correct_chosen = []
    main_correct_chosen = []
    main_total_responses = []
    main_total_responses_stdev = []

    for i in range(len(constants.memory_sizes)):
        main_average_precision.append(average_precision[:, i].mean())
        main_average_recall.append(average_recall[:, i].mean())
        main_average_entropy.append(average_entropy[:, i].mean())

        main_stdev_precision.append(stdev_precision[:, i].mean())
        main_stdev_recall.append(stdev_recall[:, i].mean())
        main_stdev_entropy.append(stdev_entropy[:, i].mean())

        main_all_average_precision.append(all_precision[:, i].mean())
        main_all_stdev_precision.append(all_precision[:, i].std())
        main_all_average_recall.append(all_recall[:, i].mean())
        main_all_stdev_recall.append(all_recall[:, i].std())

        main_no_response.append(no_response[:, i].mean())
        main_no_correct_response.append(no_correct_response[:, i].mean())
        main_no_correct_chosen.append(no_correct_chosen[:, i].mean())
        main_correct_chosen.append(correct_chosen[:, i].mean())
        main_total_responses.append(total_responses[:, i].mean())
        main_total_responses_stdev.append(total_responses[:, i].std())

    main_behaviours = [
        main_no_response, main_no_correct_response, main_no_correct_chosen,
        main_correct_chosen, main_total_responses
    ]

    np.savetxt(constants.csv_filename(
        'main_average_precision--{0}'.format(experiment)),
               main_average_precision,
               delimiter=',')
    np.savetxt(constants.csv_filename(
        'main_all_average_precision--{0}'.format(experiment)),
               main_all_average_precision,
               delimiter=',')
    np.savetxt(constants.csv_filename(
        'main_average_recall--{0}'.format(experiment)),
               main_average_recall,
               delimiter=',')
    np.savetxt(constants.csv_filename(
        'main_all_average_recall--{0}'.format(experiment)),
               main_all_average_recall,
               delimiter=',')
    np.savetxt(constants.csv_filename(
        'main_average_entropy--{0}'.format(experiment)),
               main_average_entropy,
               delimiter=',')

    np.savetxt(constants.csv_filename(
        'main_stdev_precision--{0}'.format(experiment)),
               main_stdev_precision,
               delimiter=',')
    np.savetxt(constants.csv_filename(
        'main_all_stdev_precision--{0}'.format(experiment)),
               main_all_stdev_precision,
               delimiter=',')
    np.savetxt(constants.csv_filename(
        'main_stdev_recall--{0}'.format(experiment)),
               main_stdev_recall,
               delimiter=',')
    np.savetxt(constants.csv_filename(
        'main_all_stdev_recall--{0}'.format(experiment)),
               main_all_stdev_recall,
               delimiter=',')
    np.savetxt(constants.csv_filename(
        'main_stdev_entropy--{0}'.format(experiment)),
               main_stdev_entropy,
               delimiter=',')

    np.savetxt(constants.csv_filename(
        'main_behaviours--{0}'.format(experiment)),
               main_behaviours,
               delimiter=',')

    plot_pre_graph(main_average_precision,
                   main_average_recall,
                   main_average_entropy,
                   main_stdev_precision,
                   main_stdev_recall,
                   main_stdev_entropy,
                   action=experiment)

    plot_pre_graph(main_all_average_precision,
                   main_all_average_recall,
                   main_average_entropy,
                   main_all_stdev_precision,
                   main_all_stdev_recall,
                   main_stdev_entropy,
                   'overall',
                   action=experiment)

    plot_size_graph(main_total_responses,
                    main_total_responses_stdev,
                    action=experiment)

    plot_behs_graph(main_no_response,
                    main_no_correct_response,
                    main_no_correct_chosen,
                    main_correct_chosen,
                    action=experiment)

    print('Test complete')
def main(action, occlusion=None, bar_type=None, tolerance=0):
    """ Distributes work.

    The main function distributes work according to the options chosen in the
    command line.
    """

    if (action == constants.TRAIN_NN):
        # Trains the neural networks.
        training_percentage = constants.nn_training_percent
        model_prefix = constants.model_name
        stats_prefix = constants.stats_model_name

        history = convnet.train_networks(training_percentage, model_prefix,
                                         action)
        save_history(history, stats_prefix)
    elif (action == constants.GET_FEATURES):
        # Generates features for the memories using the previously generated
        # neural networks.
        training_percentage = constants.nn_training_percent
        am_filling_percentage = constants.am_filling_percent
        model_prefix = constants.model_name
        features_prefix = constants.features_name(action)
        labels_prefix = constants.labels_name
        data_prefix = constants.data_name

        history = convnet.obtain_features(model_prefix, features_prefix,
                                          labels_prefix, data_prefix,
                                          training_percentage,
                                          am_filling_percentage, action)
        save_history(history, features_prefix)
    elif action == constants.CHARACTERIZE:
        # Generates graphs of mean and standard distributions of feature values,
        # per digit class.
        characterize_features(constants.domain, action)
    elif (action == constants.EXP_1) or (action == constants.EXP_2):
        # The domain size, equal to the size of the output layer of the network.
        test_memories(constants.domain, action)
    elif (action == constants.EXP_3):
        test_recalling(constants.domain, constants.partial_ideal_memory_size,
                       action)
    elif (action == constants.EXP_4):
        convnet.remember(action)
    elif (constants.EXP_5 <= action) and (action <= constants.EXP_10):
        # Generates features for the data sections using the previously generate
        # neural network, introducing (background color) occlusion.
        training_percentage = constants.nn_training_percent
        am_filling_percentage = constants.am_filling_percent
        model_prefix = constants.model_name
        features_prefix = constants.features_name(action, occlusion, bar_type)
        labels_prefix = constants.labels_name
        data_prefix = constants.data_name

        history = convnet.obtain_features(model_prefix, features_prefix,
                                          labels_prefix, data_prefix,
                                          training_percentage,
                                          am_filling_percentage, action,
                                          occlusion, bar_type)
        save_history(history, features_prefix)
        characterize_features(constants.domain, action, occlusion, bar_type)
        test_recalling(constants.domain, constants.partial_ideal_memory_size,
                       action, occlusion, bar_type, tolerance)
        convnet.remember(action, occlusion, bar_type, tolerance)