Exemple #1
0
def mean():
    kernel_weights, layer, sizes, weights = get_layer_weigh_list()
    mean_std = {'mean': [], 'std': []}
    labels = []
    for i in range(1, 17):
        previous = weights[i - 1]
        previous_kernel = []
        for n in range(previous.shape[0]):
            previous_kernel.append(np.mean(np.abs(previous[n])))
        impact = []
        for j in range(sizes[i]):
            to_analyze = np.abs(kernel_weights[f'{layer[i]}_kernel{j}'])
            kernel_impact = 0.0
            for k in range(to_analyze.shape[0]):
                previous_mean = previous_kernel[k]
                value = np.mean(to_analyze[k])
                kernel_impact = kernel_impact + (previous_mean * value)
            impact.append(kernel_impact)
        mu, std = norm.fit(impact)
        mean_std['mean'].append(mu)
        mean_std['std'].append(std)
        labels.append(f'L{i-1}({previous.shape[0]})_to_L{i}({sizes[i]})')
    plot_data_base(mean_std,
                   name=f'Mean+Std_with_prev_layer',
                   x_labels=labels,
                   x_name=f'Kernel impact mean + std',
                   y_name='',
                   rotate=True)
def mean_var_overview(model_name, random):
    import torch.nn as nn
    model = load_model(model_name, random)
    means = []
    stds = []
    layers = []
    for name, m in model.named_modules():
        if type(m) == nn.Conv2d:
            layers.append(name)
            weights = m.weight.data.cpu().numpy()
            kernel_means = []
            kernel_stds = []
            for kernel_no in range(weights.shape[0]):
                kernel = weights[kernel_no]
                kernel_weights = kernel.flatten()
                kernel_means.append(np.mean(kernel_weights))
                kernel_stds.append(np.std(kernel_weights))
            means.append(np.mean(kernel_means))
            stds.append(np.mean(kernel_stds))
    plot_data_base({
        'means': means,
        'stds': stds
    },
                   f'Mean and Variance of kernels ' +
                   ('Trained' if not random else 'Untrained'),
                   layers,
                   x_name='Layer number',
                   y_name='value',
                   scale_fix=[-0.05, 0.2])
Exemple #3
0
def conv2_conv3_2():
    kernel_weights, layer, sizes, weights = get_layer_weigh_list(False)
    values = {}
    values['sum_prev'] = []
    values['mean_prev'] = []
    values['std_prev'] = []
    for j in range(512):
        i = weights[5][j]
        values['mean_prev'].append(np.mean(i))
        values['std_prev'].append(np.std(i))
        values['sum_prev'].append(np.sum(i))
    l = weights[6].squeeze()
    std_corr = []
    sum_corr = []
    for j in range(len(l)):
        i = l[j]
        # scatter_plot(i,values['mean_prev'], x_label=f'Kernel {j} layer 7', y_label="Mean layer 6")
        std_corr.append(
            scatter_plot(i,
                         values['std_prev'],
                         x_label=f'Kernel {j} layer 7',
                         y_label="Std layer 6"))
        sum_corr.append(
            scatter_plot(i,
                         values['sum_prev'],
                         x_label=f'Kernel {j} layer 7',
                         y_label="Sum layer 6"))
    plot_data_base({'Sum/Avg': sum_corr, 'Std': std_corr}, 'Correlations')
Exemple #4
0
def conv2():
    kernel_weights, layer, sizes, weights = get_layer_weigh_list(False)
    values = {}
    values['sum_prev'] = []
    # values['mean_prev'] = []
    # values['std_prev'] = []
    for j in range(1):
        i = weights[5][j]
        for ch in i:
            # values['mean_prev'].append(np.mean(ch))
            # values['std_prev'].append(np.std(ch))
            values['sum_prev'].append(np.sum(ch))
    # values['sum'] = []
    # values['mean'] = []
    # values['std'] = []
    # layer = weights[6].squeeze().T
    # for j in range(512):
    #     i = layer[j]
    #     values['mean'].append(np.mean(i))
    #     values['std'].append(np.std(i))
    #     values['sum'].append(np.sum(i))
    # scatter_plot(values['mean'],values['mean_prev'], x_label='Mean layer 7', y_label="Mean layer 6")
    # scatter_plot(values['std'],values['std_prev'], x_label='Std layer 7', y_label="Std layer 6")
    # scatter_plot(values['sum'],values['sum_prev'], x_label='Sum layer 7', y_label="Sum layer 6")
    plot_data_base(values, 'Properties')
def plot_first_epochs(models, epochs=None, brain=True, convergence=True, ax=None):
    model_dict = {}
    conn = get_connection()
    if epochs is None:
        epochs = (0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 2, 3, 4, 5, 6)
    data = {}
    x_values = {}
    if convergence and 'CORnet-S_full' in convergence_epoch:
        full_tr = load_scores(conn, [f'CORnet-S_full_epoch_{convergence_epoch["CORnet-S_full"]:02d}'], benchmarks)[
            f'CORnet-S_full_epoch_{convergence_epoch["CORnet-S_full"]:02d}']
    else:
        full_tr = load_scores(conn, ['CORnet-S_full_epoch_06'], benchmarks)['CORnet-S_full_epoch_06']
    for model, name in models.items():
        names = []
        for epoch in epochs:
            if epoch % 1 == 0:
                names.append(f'{model}_epoch_{epoch:02d}')
            else:
                names.append(f'{model}_epoch_{epoch:.1f}')
        if convergence and model in convergence_epoch:
            names.append(f'{model}_epoch_{convergence_epoch[model]:02d}')
        model_dict = load_scores(conn, names, benchmarks)
        scores = []
        for epoch in epochs:
            if brain:
                full = np.mean(full_tr[2:5])
                if epoch % 1 == 0:
                    frac = (np.mean(model_dict[f'{model}_epoch_{int(epoch):02d}'][2:5]) / full) * 100
                    scores.append(frac)
                else:
                    frac = (np.mean(model_dict[f'{model}_epoch_{epoch:.1f}'][2:5]) / full) * 100
                    scores.append(frac)
            else:
                full = np.mean(full_tr[5])
                if epoch % 1 == 0:
                    frac = (np.mean(model_dict[f'{model}_epoch_{int(epoch):02d}'][5]) / full) * 100
                    scores.append(frac)
                else:
                    frac = (np.mean(model_dict[f'{model}_epoch_{epoch:.1f}'][5]) / full) * 100
                    scores.append(frac)
        if convergence and model in convergence_epoch:
            if brain:
                frac = (np.mean(model_dict[f'{model}_epoch_{convergence_epoch[model]:02d}'][2:5]) / full) * 100
                scores.append(frac)
            else:
                frac = (np.mean(model_dict[f'{model}_epoch_{convergence_epoch[model]:02d}'][5]) / full) * 100
                scores.append(frac)
            x_values[name] = epochs + [convergence_epoch[model]]
        else:
            x_values[name] = epochs
        data[name] = scores

    title = f'Brain scores mean vs epochs' if brain else 'Imagenet score vs epochs'
    plot_data_base(data, 'First epochs', x_values, 'Epochs', 'Brain Predictivity [% of standard training]',
                   x_ticks=epochs + [30, 40, 50], log=True,
                   percent=True, special_xaxis=True, legend=False, only_blue=False, palette=red_palette, annotate=True,
                   annotate_pos=1, ax=ax)
def plot_first_epochs(models, epochs=None, brain=True, convergence=True, model_name='resnet'):
    model_dict = {}
    conn = get_connection()
    if epochs is None:
        epochs = (0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 2, 3, 4, 5, 6)
    data = {}
    x_values = {}
    conv_number = convergence_epoch[f'{model_name}_v1_CORnet-S_full']
    full_tr = load_scores(conn, [f'{model_name}_v1_CORnet-S_full_epoch_{conv_number}'], benchmarks)[
        f'{model_name}_v1_CORnet-S_full_epoch_{conv_number}']
    for model, name in models.items():
        names = []
        for epoch in epochs:
            if epoch % 1 == 0:
                names.append(f'{model}_epoch_{epoch:02d}')
            else:
                names.append(f'{model}_epoch_{epoch:.1f}')
        if convergence and model in convergence_epoch:
            names.append(f'{model}_epoch_{convergence_epoch[model]:02d}')
        model_dict = load_scores(conn, names, benchmarks)
        scores = []
        for epoch in epochs:
            if brain:
                full = np.mean(full_tr[2:5])
                if epoch % 1 == 0:
                    frac = (np.mean(model_dict[f'{model}_epoch_{int(epoch):02d}'][2:5]) / full) * 100
                    scores.append(frac)
                else:
                    frac = (np.mean(model_dict[f'{model}_epoch_{epoch:.1f}'][2:5]) / full) * 100
                    scores.append(frac)
            else:
                full = np.mean(full_tr[5])
                if epoch % 1 == 0:
                    frac = (np.mean(model_dict[f'{model}_epoch_{int(epoch):02d}'][5]) / full) * 100
                    scores.append(frac)
                else:
                    frac = (np.mean(model_dict[f'{model}_epoch_{epoch:.1f}'][5]) / full) * 100
                    scores.append(frac)
        if convergence and model in convergence_epoch:
            if brain:
                frac = (np.mean(model_dict[f'{model}_epoch_{convergence_epoch[model]:02d}'][2:5]) / full) * 100
                scores.append(frac)
                y = 'mean(V4, IT, Behavior) [% of standard training]'
            else:
                frac = (np.mean(model_dict[f'{model}_epoch_{convergence_epoch[model]:02d}'][5]) / full) * 100
                y = 'Imagenet [% of standard training]'
                scores.append(frac)
            x_values[name] = epochs + [convergence_epoch[model]]
        else:
            x_values[name] = epochs
        data[name] = scores

    title = f'{model_name} Brain scores mean vs epochs' if brain else f'{model_name} Imagenet score vs epochs'
    plot_data_base(data, '', x_values, 'Epochs', y, x_ticks=epochs + [10, 20, 30],
                   percent=True, special_xaxis=True, only_blue=False)
def plot_benchmarks_over_epochs(model, epochs=None, benchmarks=benchmarks, selection=[2, 3, 4], ax=None):
    # TODO: Add error bars
    model_dict = {}
    conn = get_connection()
    if epochs is None:
        epochs = (0, 5, 10, 15, 20)

    names = []
    # for epoch in epochs:
    #     if epoch % 1 == 0:
    #         names.append(f'{model}_epoch_{epoch:02d}')
    #     else:
    #         names.append(f'{model}_epoch_{epoch:.1f}')
    model_dict = load_error_bared(conn, [model, 'CORnet-S_full'], benchmarks, epochs=epochs, convergence=True)
    full = model_dict['CORnet-S_full']

    benchmarks_labels = ['V1', 'V2', 'V4', 'IT', 'Behavior', 'Mean']
    data = {}
    for i in range(len(benchmarks) - 1):
        if i in selection:
            data[benchmarks_labels[i]] = []
            for epoch in epochs:
                if epoch % 1 == 0:
                    frac = (model_dict[f'{model}_epoch_{epoch:02d}'][i] / full[i]) * 100
                else:
                    frac = (model_dict[f'{model}_epoch_{epoch:.1f}'][i] / full[i]) * 100
                data[benchmarks_labels[i]].append(frac)
        end = (np.mean(model_dict[model][i]) / np.mean(full[i])) * 100
        data[benchmarks_labels[i]].append(end)
    data[benchmarks_labels[-1]] = []
    for epoch in epochs:
        if epoch % 1 == 0:
            frac = (np.mean(model_dict[f'{model}_epoch_{epoch:02d}'][selection]) / np.mean(full[selection])) * 100
        else:
            frac = (np.mean(model_dict[f'{model}_epoch_{epoch:.1f}'][selection]) / np.mean(full[selection])) * 100
        data[benchmarks_labels[-1]].append(frac)
    end = (np.mean(model_dict[model][selection]) / np.mean(full[selection])) * 100
    data[benchmarks_labels[-1]].append(end)
    plot_data_base(data, f'', r'\textbf{Epoch}', r'\textbf{Score} [\% of standard training]',
                   epochs + [43],
                   x_ticks=[value for value in epochs if value not in [0.1, 0.2, 0.3, 0.4, 0.6, 0.7, 0.8, 0.9, 15]] + [
                       43],
                   x_labels=[value for value in epochs if value not in [0.1, 0.2, 0.3, 0.4, 0.6, 0.7, 0.8, 0.9, 15]] + [
                       'Conv'],
                   percent=True, alpha=0.5, log=True, annotate=True, legend=False, annotate_pos=2, ax=ax,
                   palette=grey_palette[:len(benchmarks_labels) - 1] + [blue_palette[0]])
def plot_overflow():
    overflow_models = []

    for i in (0.05, 0.1, 0.2, 0.3, 0.4, 0.5):
        overflow_models.append(f'CORnet-S_overflow_{i}')
    # conn = get_connection()
    conn = get_connection('scores_openmind')
    result_overflow_models = load_scores(
        conn, overflow_models,
        ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n'])
    result_base_random = load_scores(
        conn, ['CORnet-S_random'],
        ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n'])
    result_base = load_scores(
        conn, ['CORnet-S'],
        ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n'])

    it_data = {}
    it_data['overflow_init'] = []
    it_data['base_untrained'] = []
    it_data['base_trained'] = []
    behavior_data = {}
    behavior_data['overflow_init'] = []
    behavior_data['base_untrained'] = []
    behavior_data['base_trained'] = []
    labels = []
    for i in (0.05, 0.1, 0.2, 0.3, 0.4, 0.5):
        labels.append(i)
        it_data['overflow_init'].append(
            result_overflow_models[f'CORnet-S_overflow_{i}'][0])
        it_data['base_untrained'].append(
            result_base_random[f'CORnet-S_random'][0])
        it_data['base_trained'].append(result_base[f'CORnet-S'][0])
        behavior_data['overflow_init'].append(
            result_overflow_models[f'CORnet-S_overflow_{i}'][1])
        behavior_data['base_untrained'].append(
            result_base_random[f'CORnet-S_random'][1])
        behavior_data['base_trained'].append(result_base[f'CORnet-S'][1])

    print(it_data)
    print(behavior_data)
    plot_data_base(it_data, 'IT benchmark overflow_initialization', labels,
                   'Overflow initialization in %', 'Score', [0.0, 0.6])
    plot_data_base(behavior_data, 'Behavior benchmark overflow initialization',
                   labels, 'Overflow initialization in %', 'Score', [0.0, 0.6])
def plot_single_benchmarks(models, epochs=None, compare_batchfix=False, run_mean=False):
    model_dict = {}
    conn = get_connection()
    if epochs is None:
        epochs = (0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 2, 3, 4, 5, 6)
    data = {}
    benchmarks_label = ['V1', 'V2', 'V4', 'IT', 'Behavior', 'Imagenet']

    for model, name in models.items():
        names = []
        for epoch in epochs:
            if epoch % 1 == 0:
                names.append(f'{model}_epoch_{epoch:02d}')
            else:
                names.append(f'{model}_epoch_{epoch:.1f}')

            if compare_batchfix:
                names.append(f'{model}_epoch_{epoch:02d}_BF')
        model_dict[name] = load_scores(conn, names, benchmarks)
    for i in range(6):
        for model, name in models.items():
            scores = []
            for epoch in epochs:
                if epoch % 1 == 0:
                    scores.append(model_dict[name][f'{model}_epoch_{int(epoch):02d}'][i])
                else:
                    scores.append(model_dict[name][f'{model}_epoch_{epoch:.1f}'][i])

            if run_mean:
                data[name] = [scores[0]] + np.convolve(scores, np.ones((3,)) / 3, mode='valid') + [scores[-1]]
            else:
                data[name] = scores

            if compare_batchfix:
                scores = []
                for epoch in epochs:
                    scores.append(model_dict[name][f'{model}_BF_epoch_{epoch:02d}'][i])
                    if run_mean:
                        data[f'{name}_BF'] = np.convolve(scores, np.ones((3,)) / 3, mode='same')
                    else:
                        data[f'{name}_BF'] = scores

        title = f'{benchmarks_label[i]} benchmark vs epochs'
        plot_data_base(data, title, epochs, 'Epoch', 'Score', x_ticks=epochs, log=True)
def plot_over_epoch(models):
    model_dict = {}
    conn = get_connection()
    epochs = (0, 5, 10, 15)
    for model in models:
        names = []
        for epoch in epochs:
            names.append(f'{model}_epoch_{epoch:02d}')
        model_dict[model] = load_scores(conn, names, benchmarks=benchmarks)
    model_dict[f'{model}_epoch_00'] = load_scores(conn, ['CORnet-S_random'], benchmarks)
    benchmarks_labels = ['V1', 'V2', 'V4', 'IT', 'Behavior', 'Imagenet']
    for i in range(6):
        data = {}
        for model in models:
            data[model] = []
            for epoch in epochs:
                data[model].append(model_dict[model][f'{model}_epoch_{epoch:02d}'][i])
        # data['CORnet-S'] = [0] * 3 + [model_dict['CORnet-S']['CORnet-S'][i]]
        plot_data_base(data, f'{benchmarks_labels[i]} Benchmark over epochs', epochs, 'Score over epochs', 'Score')
Exemple #11
0
def impact_mean_std(type='Sum impact',
                    func=connections,
                    random=True,
                    upper_bound=0.3):
    influences, layer = func(random, False)
    means = []
    stds = []
    relative = []
    for i in range(len(influences)):
        inf = influences[i]
        means.append(np.mean(inf))
        stds.append(np.std(inf))
        relative.append(stds[i] / means[i])
    plot_data_base({'std': stds},
                   f'{type} ' + ('untrained' if random else 'trained'),
                   layer[0:(len(layer) - 1)],
                   rotate=True,
                   scale_fix=(0.0, upper_bound))
    print(relative)
def weight_std_factor(model_name, random=False):
    model = load_model(model_name, True)
    norm_dists = {}
    # norm_dists['layer'] = []
    # norm_dists['mean'] = []
    norm_dists['value'] = []
    # pytorch model
    layers = []
    for name, m in model.named_modules():
        if type(m) == nn.Conv2d:
            norm_dists['value'] = []
            layers.append(name)
            name.split('.')
            weights = m.weight.data.cpu().numpy()
            n_l = weights.shape[1] * weights.shape[2] * weights.shape[2]
            for i in range(weights.shape[0]):
                flat = weights[i].flatten()
                mu, std = norm.fit(flat)
                # norm_dists['layer'].append(name)
                norm_dists['value'].append(std * 1 / 2 * n_l)
                print(f'Std: {std}')
            # plot_histogram(flat, name, model_name)
            plot_data_base(norm_dists, name, x_name='layers', y_name='Mean')
Exemple #13
0
def kernel_weight_size(random=True):
    kernel_weights, layer, sizes, weights = get_layer_weigh_list(random)
    _, _, _, weights_trained = get_layer_weigh_list(False)
    layer_sum = []
    layer_sum_2 = []
    kernel_sum = []
    label = []
    for i in range(1, 17):
        layer_sum.append(np.sum(np.abs(weights[i - 1])))
        # layer_sum.append(np.sum(weights[i-1]))
        layer_sum_2.append(np.sum(np.abs(weights_trained[i - 1])))
        # layer_sum_2.append(np.sum(weights_trained[i-1]))
        for j in range(sizes[i]):
            kernel_sum.append(
                np.sum(np.abs(kernel_weights[f'{layer[i]}_kernel{j}'])))
            label.append(f'{layer[i]}_kernel{j}')
    version = 'untrained' if random else 'trained'
    # plot_1_dim_data(layer_sum,name=f'Absolute sum of weights per layer {version}',
    #                 x_name=f'Layer number', y_name='Sum')
    # plot_1_dim_data(kernel_sum,name=f'Absolute sum of weights per kernel {version}' ,
    #                 x_name=f'Kernel name', y_name='Sum of weights')
    plot_data_base({
        'trained': layer_sum,
        'untrained': layer_sum_2
    },
                   f'Sum of weights per layer',
                   layer[0:(len(layer) - 1)],
                   rotate=True,
                   scale_fix=(-100, 82))
    plot_data_base({
        'trained': layer_sum,
        'untrained': layer_sum_2
    },
                   f'Absolute sum of weights per layer',
                   layer[0:(len(layer) - 1)],
                   rotate=True,
                   scale_fix=(0, 2000))
def mean_compared(model_name, random):
    import torch.nn as nn
    model_untrained = load_model(model_name, True)
    model_trained = load_model(model_name, False)
    means_untrained = []
    means_trained = []
    layers = []
    for name, m in model_untrained.named_modules():
        if type(m) == nn.Conv2d:
            layers.append(name)
            weights = m.weight.data.cpu().numpy()
            kernel_means = []
            for kernel_no in range(weights.shape[0]):
                kernel = weights[kernel_no]
                kernel_weights = kernel.flatten()
                kernel_means.append(np.mean(np.abs(kernel_weights)))
            means_untrained.append(np.mean(kernel_means))
    for name, m in model_trained.named_modules():
        if type(m) == nn.Conv2d:
            # layers.append(name)
            weights = m.weight.data.cpu().numpy()
            kernel_means = []
            for kernel_no in range(weights.shape[0]):
                kernel = weights[kernel_no]
                kernel_weights = kernel.flatten()
                kernel_means.append(np.mean(np.abs(kernel_weights)))
            means_trained.append(np.mean(kernel_means))
    plot_data_base(
        {
            'means_untrained': means_untrained,
            'means_trained': means_trained
        },
        f'Mean trained and untrained',
        layers,
        x_name='Layer number',
        y_name='value',
        rotate=True)
def image_scores_single(model, imgs, selection=[], ax=None):
    names = []
    conn = get_connection()
    for img in imgs:
        name = f'{model}_img{img}'
        names.append(name)
    # if model == 'CORnet-S_cluster2_v2_IT_trconv3_bi':
    #     model = f'{model}_seed42'
    # names.append(f'{model}_epoch_{convergence_epoch[model]}')
    names.append('CORnet-S_full')
    # model_dict = load_scores(conn, names, benchmarks)
    model_dict = load_error_bared(conn, names, benchmarks, convergence=True)
    full = model_dict[model]
    benchmarks_labels = ['V1', 'V2', 'V4', 'IT', 'Behavior', 'Mean']
    data = {}
    for i in range(len(benchmarks) - 1):
        if i in selection:
            data[benchmarks_labels[i]] = []
            for j in imgs:
                name1 = f'{model}_img{j}'
                frac = (np.mean(model_dict[name1][i]) / full[i]) * 100
                data[benchmarks_labels[i]].append(frac)
            frac = (np.mean(model_dict[model][i]) / full[i]) * 100
            data[benchmarks_labels[i]].append(frac)
    data[benchmarks_labels[-1]] = []
    for j in imgs:
        name1 = f'{model}_img{j}'
        frac = (np.mean(model_dict[name1][selection]) / np.mean(full[selection])) * 100
        data[benchmarks_labels[-1]].append(frac)
    frac = (np.mean(model_dict[model][selection]) / np.mean(full[selection])) * 100
    data[benchmarks_labels[-1]].append(frac)
    imgs.append(1280000)
    plot_data_base(data, '', r'\textbf{Images} [Million]', r'\textbf{Score} [\% of standard training]', x_values=imgs,
                   x_ticks=[100, 1000, 10000, 100000, 1280000], x_labels=['100', '1k', '10k', '100k', '1.3M'],
                   million_base=True, palette=grey_palette[:len(benchmarks_labels) - 1] + [blue_palette[0]], alpha=0.5,
                   use_xticks=True,
                   percent=True, log=True, annotate=True, legend=False, annotate_pos=3, ax=ax)
def plot_single_layer_perturbation():
    norm_dist_models = []
    jumbler_models = []
    fixed_models = []
    fixed_small_models = []
    for i in range(1, 18):
        norm_dist_models.append(f'CORnet-S_norm_dist_L{i}')
        jumbler_models.append(f'CORnet-S_jumbler_L{i}')
        fixed_models.append(f'CORnet-S_fixed_value_L{i}')
        fixed_small_models.append(f'CORnet-S_fixed_value_small_L{i}')
    conn = get_connection()
    result_norm = load_scores(
        conn, norm_dist_models,
        ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n'])
    result_jumbler = load_scores(
        conn, jumbler_models,
        ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n'])
    result_fixed = load_scores(
        conn, fixed_models,
        ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n'])
    result_base = load_scores(
        conn, ['CORnet-S'],
        ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n'])
    result_fixed_small = load_scores(
        conn, fixed_small_models,
        ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n'])

    it_data = {}
    it_data['norm_dist'] = []
    it_data['jumbler'] = []
    it_data['fixed_value'] = []
    it_data['fixed_small_value'] = []
    it_data['base'] = []
    behavior_data = {}
    behavior_data['norm_dist'] = []
    behavior_data['jumbler'] = []
    behavior_data['fixed_value'] = []
    behavior_data['fixed_small_value'] = []
    behavior_data['base'] = []
    labels = []
    for i in range(1, 18):
        labels.append(f'L{i}')
        it_data['norm_dist'].append(result_norm[f'CORnet-S_norm_dist_L{i}'][0])
        it_data['jumbler'].append(result_jumbler[f'CORnet-S_jumbler_L{i}'][0])
        it_data['fixed_value'].append(
            result_fixed[f'CORnet-S_fixed_value_L{i}'][0])
        it_data['base'].append(result_base[f'CORnet-S'][0])
        it_data['fixed_small_value'].append(
            result_fixed_small[f'CORnet-S_fixed_value_small_L{i}'][0])
        behavior_data['norm_dist'].append(
            result_norm[f'CORnet-S_norm_dist_L{i}'][1])
        behavior_data['jumbler'].append(
            result_jumbler[f'CORnet-S_jumbler_L{i}'][1])
        behavior_data['fixed_value'].append(
            result_fixed[f'CORnet-S_fixed_value_L{i}'][1])
        behavior_data['base'].append(result_base[f'CORnet-S'][1])
        behavior_data['fixed_small_value'].append(
            result_fixed_small[f'CORnet-S_fixed_value_small_L{i}'][1])

    plot_data_base(it_data, 'IT Benchmark single layer', labels, 'Conv Layers',
                   'Score', [0.0, 0.6])
    plot_data_base(behavior_data, 'Behavior Benchmark single layer', labels,
                   'Conv Layers', 'Score', [0.0, 0.6])
def plot_high_low_nullify_separate():
    high_var_models = []
    high_var_trained_models = []
    low_var_models = []
    low_var_trained_models = []
    for i in (0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95):
        high_var_models.append(f'CORnet-S_high_zero_{i}')
        high_var_trained_models.append(f'CORnet-S_trained_high_zero_{i}')
        low_var_models.append(f'CORnet-S_low_zero_{i}')
        low_var_trained_models.append(f'CORnet-S_trained_low_zero_{i}')
    conn = get_connection()
    result_high_var = load_scores(
        conn, high_var_models,
        ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n'])
    result_high_var_trained = load_scores(
        conn, high_var_trained_models,
        ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n'])
    result_low_var = load_scores(
        conn, low_var_models,
        ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n'])
    result_low_var_trained = load_scores(
        conn, low_var_trained_models,
        ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n'])
    result_base_random = load_scores(
        conn, ['CORnet-S_random'],
        ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n'])
    result_base = load_scores(
        conn, ['CORnet-S'],
        ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n'])

    it_data = {}
    it_data['high_zero'] = []
    it_data['low_zero'] = []
    # it_data['high_zero_trained'] = []
    # it_data['low_zero_trained'] = []
    it_data['base'] = []
    # it_data['base_trained'] = []
    behavior_data = {}
    behavior_data['high_zero'] = []
    behavior_data['low_zero'] = []
    # behavior_data['high_zero_trained'] = []
    # behavior_data['low_zero_trained'] = []
    behavior_data['base'] = []
    # behavior_data['base_trained'] = []
    labels = []
    for i in (0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95):
        labels.append(i)
        # it_data['high_zero'].append(result_high_var[f'CORnet-S_high_zero_{i}'][0])
        it_data['high_zero'].append(
            result_high_var_trained[f'CORnet-S_trained_high_zero_{i}'][0])
        # it_data['low_zero'].append(result_low_var[f'CORnet-S_low_zero_{i}'][0])
        it_data['low_zero'].append(
            result_low_var_trained[f'CORnet-S_trained_low_zero_{i}'][0])
        # it_data['base'].append(result_base_random[f'CORnet-S_random'][0])
        it_data['base'].append(result_base[f'CORnet-S'][0])
        # behavior_data['high_zero'].append(result_high_var[f'CORnet-S_high_zero_{i}'][1])
        behavior_data['high_zero'].append(
            result_high_var_trained[f'CORnet-S_trained_high_zero_{i}'][1])
        # behavior_data['low_zero'].append(result_low_var[f'CORnet-S_low_zero_{i}'][1])
        behavior_data['low_zero'].append(
            result_low_var_trained[f'CORnet-S_trained_low_zero_{i}'][1])
        # behavior_data['base'].append(result_base_random[f'CORnet-S_random'][1])
        behavior_data['base'].append(result_base[f'CORnet-S'][1])

    plot_data_base(it_data,
                   'IT Benchmark Zero Trained',
                   labels,
                   'Zero values in %',
                   'Score', [0.0, 0.6],
                   base_line=result_base[f'CORnet-S'][0])
    plot_data_base(behavior_data,
                   'Behavior Benchmark Zero Trained',
                   labels,
                   'Zero values in %',
                   'Score', [0.0, 0.6],
                   base_line=result_base[f'CORnet-S'][1])
def plot_high_low_nullify():
    high_var_models = []
    high_var_trained_models = []
    low_var_models = []
    low_var_trained_models = []
    for i in (0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95):
        high_var_models.append(f'CORnet-S_high_zero_{i}')
        high_var_trained_models.append(f'CORnet-S_trained_high_zero_{i}')
        low_var_models.append(f'CORnet-S_low_zero_{i}')
        low_var_trained_models.append(f'CORnet-S_trained_low_zero_{i}')
    # conn = get_connection()
    conn = get_connection('scores_openmind')
    result_high_var = load_scores(
        conn, high_var_models,
        ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n'])
    result_high_var_trained = load_scores(
        conn, high_var_trained_models,
        ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n'])
    result_low_var = load_scores(
        conn, low_var_models,
        ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n'])
    result_low_var_trained = load_scores(
        conn, low_var_trained_models,
        ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n'])
    result_base_random = load_scores(
        conn, ['CORnet-S_random'],
        ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n'])
    result_base = load_scores(
        conn, ['CORnet-S'],
        ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n'])

    it_data = {
        'high_zero_untrained': [],
        'low_zero_untrained': [],
        'high_zero_trained': [],
        'low_zero_trained': [],
        'base_untrained': [],
        'base_trained': []
    }
    behavior_data = {
        'high_zero_untrained': [],
        'low_zero_untrained': [],
        'high_zero_trained': [],
        'low_zero_trained': [],
        'base_untrained': [],
        'base_trained': []
    }
    labels = []
    for i in (0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95):
        labels.append(i)
        it_data['high_zero_untrained'].append(
            result_high_var[f'CORnet-S_high_zero_{i}'][0])
        it_data['high_zero_trained'].append(
            result_high_var_trained[f'CORnet-S_trained_high_zero_{i}'][0])
        it_data['low_zero_untrained'].append(
            result_low_var[f'CORnet-S_low_zero_{i}'][0])
        it_data['low_zero_trained'].append(
            result_low_var_trained[f'CORnet-S_trained_low_zero_{i}'][0])
        it_data['base_untrained'].append(
            result_base_random[f'CORnet-S_random'][0])
        it_data['base_trained'].append(result_base[f'CORnet-S'][0])
        behavior_data['high_zero_untrained'].append(
            result_high_var[f'CORnet-S_high_zero_{i}'][1])
        behavior_data['high_zero_trained'].append(
            result_high_var_trained[f'CORnet-S_trained_high_zero_{i}'][1])
        behavior_data['low_zero_untrained'].append(
            result_low_var[f'CORnet-S_low_zero_{i}'][1])
        behavior_data['low_zero_trained'].append(
            result_low_var_trained[f'CORnet-S_trained_low_zero_{i}'][1])
        behavior_data['base_untrained'].append(
            result_base_random[f'CORnet-S_random'][1])
        behavior_data['base_trained'].append(result_base[f'CORnet-S'][1])

    plot_data_base(it_data, 'it_benchmark_zero', labels, 'Zero values in %',
                   'Score', [0.0, 0.6])
    plot_data_base(behavior_data, 'behavior_benchmark_zero', labels,
                   'Zero values in %', 'Score', [0.0, 0.6])