def plot_models_vs(models, file_name, convergence=False, epoch=0, imagenet=False, gs=None, ax=None, selection=[]):
    model_dict = {}
    conn = get_connection()

    names = []
    for name, mod in models.items():
        for model in mod.values():
            #         if convergence:
            #             epoch = convergence_epoch[model]
            names.append(model)
    model_dict = load_error_bared(conn, names, benchmarks, convergence=convergence, epochs=[0])
    labels = []
    data_set = {}
    err = {}
    # We replace the model id, a more human readable version
    for name, models in models.items():
        labels.append(name)
        # data_set[name] = {}
        for model_name, model in models.items():
            if model_name not in data_set:
                data_set[model_name] = []
                err[model_name] = []
            # if imagenet:
            #     data_set[model_name].append(np.mean(model_dict[f'{model}_epoch_{epoch:02d}'][5]))
            # else:
            if convergence:
                data_set[model_name].append(np.mean(model_dict[model][selection]))
                err[model_name].append(np.mean(model_dict[model][6:][selection]))
            else:
                data_set[model_name].append(np.mean(model_dict[f'{model}_epoch_{epoch:02d}'][selection]))
                err[model_name].append(np.mean(model_dict[f'{model}_epoch_{epoch:02d}'][6:][selection]))

    # if convergence and 'CORnet-S_full' in convergence_epoch:
    full_tr = load_error_bared(conn, [f'CORnet-S_full'], benchmarks)[
        f'CORnet-S_full']
    # else:
    #     full_tr = load_scores(conn, ['CORnet-S_full_epoch_0'], benchmarks)['CORnet-S_full_epoch_06']
    # print(f'Mean of brain benchmark model {desc}, {np.mean(data_set[desc][2:5])}')
    # if imagenet:
    #     line =np.mean(full_tr[5])
    # else:
    line = np.mean(full_tr[selection])
    if len(selection) == 3:
        y = r"\textbf{Brain Predictivity} "
    else:
        y = r"\textbf{Brain Predictivity}"
    pals = blue_palette
    plot_bar_benchmarks(data_set, labels, '', y, file_name, yerr=err, line=line, label=True, grey=False, gs=gs, ax=ax)
def score_over_layers(models, random, labels, bench, convergence=True, ax=None):
    if bench is not None:
        benchmarks = bench
    conn = get_connection()
    names = []
    if convergence and 'CORnet-S_full' in convergence_epoch:
        full_tr = load_scores(conn, [f'CORnet-S_full_epoch_{convergence_epoch["CORnet-S_full"]:02d}'], benchmarks)[
            f'CORnet-S_full_epoch_{convergence_epoch["CORnet-S_full"]:02d}']
    else:
        full_tr = load_scores(conn, ['CORnet-S_full_epoch_06'], benchmarks)['CORnet-S_full_epoch_06']
    model_dict = load_error_bared(conn, list(chain(models.keys(), random.keys())), benchmarks, convergence=convergence)
    if len(benchmarks) < 6:
        benchmarks_labels = ['V4', 'IT', 'Behavior', 'Imagenet']
    else:
        benchmarks_labels = ['V1', 'V2', 'V4', 'IT', 'Behavior', 'Imagenet']
    data = {}
    err = {}
    x_ticks = {}
    for i in range(len(benchmarks)):
        data[benchmarks_labels[i]] = []
        err[benchmarks_labels[i]] = []
        x_ticks[benchmarks_labels[i]] = []
        layers = []
        for model, layer in models.items():
            layers.append(layer_best_2[layer])
            frac = (model_dict[model][i] / full_tr[i]) * 100
            frac_err = (model_dict[model][len(benchmarks):][i] / full_tr[i]) * 100
            data[benchmarks_labels[i]].append(frac)
            err[benchmarks_labels[i]].append(frac_err)
        x_ticks[benchmarks_labels[i]] = layers
    plot_data_double(data, data2=None, err=err, name=f'Artificial Genome + Critical Training',
                     x_name='Number of trained layers',
                     y_name=r'Benchmark Score [% of standard training]',
                     x_ticks=x_ticks, x_ticks_2=[], percent=True, ax=ax, pal=red_palette, annotate_pos=1)
def plot_models_benchmarks(models, file_name, benchmarks, convergence=True, gs=None, ax=None):
    conn = get_connection()
    model_dict = load_error_bared(conn, models.keys(), benchmarks)
    if len(benchmarks) < 6:
        benchmarks_labels = ['V4', 'IT', 'Behavior', 'Imagenet']
    else:
        benchmarks_labels = ['V1', 'V2', 'V4', 'IT', 'Behavior']  # 'Imagenet'
    data_set = {}
    err = {}
    # We replace the model id, a more human readable version
    for id, desc in models.items():
        data_set[desc] = model_dict[id][:5]
        err[desc] = model_dict[id][6:-1]
    plot_bar_benchmarks(data_set, benchmarks_labels, '', r'\textbf{Scores}', file_name, yerr=err, gs=gs, ax=ax)
def score_over_layers_avg(models_resnet, random, models_alexnet={}, random_alexnet={}, imagenet=False,
                          convergence=False,
                          model_name='resnet',
                          layers_numbers=[layers_number, layers_number, layers_number_mobilenet,
                                          layers_number_mobilenet],
                          gs=None, ax=None, selection=[]):
    conn = get_connection()
    full = 0
    model_dict = load_error_bared(conn, list(
        chain(models_resnet.keys(), models_mobilenet.keys(), random.keys(), random_mobilenet.keys())), benchmarks,
                                  convergence=convergence)
    data = {}
    err = {}
    layers = {}
    labels = {}
    idx = 0
    for models, label in zip([random, models_resnet],  # , random_mobilenet, models_mobilenet
                             ['Resnet50 KN+DT', 'Resnet50 Transfer AG+CT', 'Alexnet KN+DT', 'Alexnet Transfer AG+CT']):
        data[label] = []
        layers[label] = []
        layers_number = layers_numbers[idx]
        idx += 1
        for model, layer in models.items():
            layers[label].append(layers_number[layer])
            if model == f'{model_name}_v1_CORnet-S_full':
                full = np.mean(model_dict[model][selection])
                data[label].append(100)
            else:
                percent = (np.mean(model_dict[model][selection]) / full) * 100
                data[label].append(percent)
            full_err = (np.mean(model_dict[model][:6][selection]) / full) * 100
            err[label] = full_err
        if 'Alexnet' in label:
            labels[label] = models.values()
        else:
            labels[label] = [value.split('.')[0] for value in models.values()]

    if imagenet:
        title = f'{model_name} Imagenet score over layers'
        y = 'Imagenet [% of standard training]'
    else:
        title = f'{model_name} Brain-Score Benchmark mean(V4, IT, Behavior) over layers'
        if len(selection) == 3:
            y = r"mean(V4, IT, Behavior) [% of standard training]"
        else:
            y = r"mean(V1,V2,V4,IT,Behavior) [% of standard training]"
    plot_data_double(data, {}, '', x_name='Number of trained layers [% of all layers]', y_name=y, x_ticks=layers,
                     x_ticks_2=[], percent=False, percent_x=True,
                     pal=['#424949'] + [green_palette[1]] + ['#ABB2B9'] + [green_palette[0]], data_labels=labels, gs=gs,
                     ax=ax)
def plot_benchmarks_over_epochs(model, epochs=None, benchmarks=benchmarks, selection=[2, 3, 4], ax=None):
    # TODO: Add error bars
    model_dict = {}
    conn = get_connection()
    if epochs is None:
        epochs = (0, 5, 10, 15, 20)

    names = []
    # for epoch in epochs:
    #     if epoch % 1 == 0:
    #         names.append(f'{model}_epoch_{epoch:02d}')
    #     else:
    #         names.append(f'{model}_epoch_{epoch:.1f}')
    model_dict = load_error_bared(conn, [model, 'CORnet-S_full'], benchmarks, epochs=epochs, convergence=True)
    full = model_dict['CORnet-S_full']

    benchmarks_labels = ['V1', 'V2', 'V4', 'IT', 'Behavior', 'Mean']
    data = {}
    for i in range(len(benchmarks) - 1):
        if i in selection:
            data[benchmarks_labels[i]] = []
            for epoch in epochs:
                if epoch % 1 == 0:
                    frac = (model_dict[f'{model}_epoch_{epoch:02d}'][i] / full[i]) * 100
                else:
                    frac = (model_dict[f'{model}_epoch_{epoch:.1f}'][i] / full[i]) * 100
                data[benchmarks_labels[i]].append(frac)
        end = (np.mean(model_dict[model][i]) / np.mean(full[i])) * 100
        data[benchmarks_labels[i]].append(end)
    data[benchmarks_labels[-1]] = []
    for epoch in epochs:
        if epoch % 1 == 0:
            frac = (np.mean(model_dict[f'{model}_epoch_{epoch:02d}'][selection]) / np.mean(full[selection])) * 100
        else:
            frac = (np.mean(model_dict[f'{model}_epoch_{epoch:.1f}'][selection]) / np.mean(full[selection])) * 100
        data[benchmarks_labels[-1]].append(frac)
    end = (np.mean(model_dict[model][selection]) / np.mean(full[selection])) * 100
    data[benchmarks_labels[-1]].append(end)
    plot_data_base(data, f'', r'\textbf{Epoch}', r'\textbf{Score} [\% of standard training]',
                   epochs + [43],
                   x_ticks=[value for value in epochs if value not in [0.1, 0.2, 0.3, 0.4, 0.6, 0.7, 0.8, 0.9, 15]] + [
                       43],
                   x_labels=[value for value in epochs if value not in [0.1, 0.2, 0.3, 0.4, 0.6, 0.7, 0.8, 0.9, 15]] + [
                       'Conv'],
                   percent=True, alpha=0.5, log=True, annotate=True, legend=False, annotate_pos=2, ax=ax,
                   palette=grey_palette[:len(benchmarks_labels) - 1] + [blue_palette[0]])
def image_epoch_heatmap(model, imgs, epochs, selection=[], title=r'\textbf{Standard training epochs/images trade-off}',
                        ax=None):
    names = []
    conn = get_connection()
    # delta= 'CORnet-S_full'
    # if model == 'CORnet-S_cluster2_v2_IT_trconv3_bi':
    #     model1 = model
    # else:
    #     model1 = model
    for img in imgs:
        name = f'{model}_img{img}'
        # for epoch in epochs:
        #     names.append(f'{name}_epoch_{epoch:02d}')
        names.append(name)
    names.append(model)
    # for epoch in epochs:
    #     names.append(f'{model}_epoch_{epoch:02d}')
    names.append('CORnet-S_full')
    model_dict = load_error_bared(conn, names, epochs=epochs, benchmarks=benchmarks)
    full = np.mean(model_dict['CORnet-S_full'][selection])
    matrix = np.zeros([len(imgs) + 1, len(epochs) + 1])
    data = {}
    for i in range(len(imgs)):
        for j in range(len(epochs)):
            name1 = f'{model}_img{imgs[i]}_epoch_{epochs[j]:02d}'
            frac = (np.mean(model_dict[name1][selection]) / full)
            matrix[i, j] = frac
        name = f'{model}_img{imgs[i]}'
        # name = f'{name}_epoch_{convergence_images[name]:02d}'
        frac = (np.mean(model_dict[name][selection]) / full)
        matrix[i, -1] = frac

    # names.append(f'{model1}_epoch_{convergence_epoch[model1]:02d}')
    for j in range(len(epochs)):
        name1 = f'{model}_epoch_{epochs[j]:02d}'
        frac = (np.mean(model_dict[name1][selection]) / full)
        matrix[-1, j] = frac
    # name = f'{model}_epoch_{convergence_epoch[model]:02d}'
    frac = (np.mean(model_dict[model][selection]) / full)
    matrix[-1, -1] = frac
    plot_heatmap(matrix, r'\textbf{Epochs}', r'\textbf{Images}', title=title, annot=True, ax=ax,
                 cbar=False, cmap='YlOrRd', percent=False,
                 fmt='.0%', vmin=0, vmax=1, yticklabels=imgs + ['All'], xticklabels=epochs + ['Convergence'], alpha=0.8)
def image_scores_single(model, imgs, selection=[], ax=None):
    names = []
    conn = get_connection()
    for img in imgs:
        name = f'{model}_img{img}'
        names.append(name)
    # if model == 'CORnet-S_cluster2_v2_IT_trconv3_bi':
    #     model = f'{model}_seed42'
    # names.append(f'{model}_epoch_{convergence_epoch[model]}')
    names.append('CORnet-S_full')
    # model_dict = load_scores(conn, names, benchmarks)
    model_dict = load_error_bared(conn, names, benchmarks, convergence=True)
    full = model_dict[model]
    benchmarks_labels = ['V1', 'V2', 'V4', 'IT', 'Behavior', 'Mean']
    data = {}
    for i in range(len(benchmarks) - 1):
        if i in selection:
            data[benchmarks_labels[i]] = []
            for j in imgs:
                name1 = f'{model}_img{j}'
                frac = (np.mean(model_dict[name1][i]) / full[i]) * 100
                data[benchmarks_labels[i]].append(frac)
            frac = (np.mean(model_dict[model][i]) / full[i]) * 100
            data[benchmarks_labels[i]].append(frac)
    data[benchmarks_labels[-1]] = []
    for j in imgs:
        name1 = f'{model}_img{j}'
        frac = (np.mean(model_dict[name1][selection]) / np.mean(full[selection])) * 100
        data[benchmarks_labels[-1]].append(frac)
    frac = (np.mean(model_dict[model][selection]) / np.mean(full[selection])) * 100
    data[benchmarks_labels[-1]].append(frac)
    imgs.append(1280000)
    plot_data_base(data, '', r'\textbf{Images} [Million]', r'\textbf{Score} [\% of standard training]', x_values=imgs,
                   x_ticks=[100, 1000, 10000, 100000, 1280000], x_labels=['100', '1k', '10k', '100k', '1.3M'],
                   million_base=True, palette=grey_palette[:len(benchmarks_labels) - 1] + [blue_palette[0]], alpha=0.5,
                   use_xticks=True,
                   percent=True, log=True, annotate=True, legend=False, annotate_pos=3, ax=ax)
def score_over_layers_avg(all_models, random, all_labels=[], imagenet=False, convergence=False, ax=None, selection=[]):
    conn = get_connection()
    data = {}
    err2 = {}
    full = 0
    names = []
    for models in all_models:
        names.extend(models.keys())
    names.extend(random.keys())
    model_dict = load_error_bared(conn, names, benchmarks, convergence=convergence)
    data2 = {}
    data2['Score'] = []
    err2['Score'] = []
    layers2 = []
    for model, layer in random.items():
        layers2.append(layer_best_2[layer])
        if model == 'CORnet-S_full':
            full = np.mean(model_dict[model][selection])
            full_err = (np.mean(model_dict[model][6:][selection]) / full) * 100
            data2['Score'].append(100)
            err2['Score'].append(full_err)
        else:
            percent = (np.mean(model_dict[model][selection]) / full) * 100
            percent_error = (np.mean(model_dict[model][6:][selection]) / full) * 100
            data2['Score'].append(percent)
            err2['Score'].append(percent_error)
    x_ticks = {}
    labels = {}
    err = {}
    for models, name in zip(all_models, all_labels):
        data[name] = []
        err[name] = []
        layers = []
        for model, layer in models.items():
            if convergence and model in convergence_epoch:
                postfix = f'_epoch_{convergence_epoch[model]:02d}'
            else:
                postfix = f'_epoch_06'
            layers.append(layer_best_2[layer])
            if model == 'CORnet-S_full':
                full = np.mean(model_dict[model][selection])
                data[name].append(100)
            else:
                percent = (np.mean(model_dict[model][selection]) / full) * 100
                data[name].append(percent)
            full_err = (np.mean(model_dict[model][6:][selection]) / full) * 100
            err[name].append(full_err)
        x_ticks[name] = layers
        # short = name.split('(')[1][:-1]
        # labels[name] = [f'{value.split(".")[0]}_{short}' for value in models.values()]
        labels[name] = [name]

    if imagenet:
        title = f'Imagenet over layers'
        y = r"Imagenet}[% of standard training]"
    else:
        title = f'Brain-Score Benchmark mean(V4, IT, Behavior) over layers'
        if len(selection) == 3:
            y = r"Brain Predictivity [% of standard training]"
        else:
            y = r"Brain Predictivity [% of standard training]"

    plot_data_double(data, data2, '', err=err, err2=err2, x_name='Number of trained layers',
                     y_name=y, x_ticks=x_ticks,
                     x_ticks_2=layers2, percent=True, annotate_pos=0, pal=my_palette, ax=ax)
Пример #9
0
def get_full(conn, convergence):
    return load_error_bared(conn, ['CORnet-S_full'], benchmarks,
                            convergence)['CORnet-S_full']
Пример #10
0
def plot_num_params(imagenet=False,
                    entry_models=[],
                    all_labels=[],
                    convergence=False,
                    ax=None,
                    selection=[],
                    log=False,
                    layer_random=layer_random,
                    pal=None,
                    percent=True,
                    ylim=None):
    conn = get_connection()
    full_score = get_full(conn, convergence)
    full = np.mean(full_score[selection])
    names = []
    data = {'Score': []}
    err = {'Score': []}
    model_dict = load_error_bared(conn,
                                  layer_random.keys(),
                                  benchmarks,
                                  convergence=convergence)

    for model, layer in layer_random.items():
        if percent:
            frac = (np.mean(model_dict[model][selection]) / full) * 100
            percent_err = (np.mean(model_dict[model][6:][selection]) /
                           full) * 100
        else:
            frac = np.mean(model_dict[model][selection])
            percent_err = np.mean(model_dict[model][6:][selection])
        print(f'MOdel {model} has score {frac}')
        data['Score'].append(frac)
        err['Score'].append(percent_err)
    data2 = {}
    err2 = {}
    labels = {}
    params = {}
    for entry_model, name in zip(entry_models, all_labels):
        data2[name] = []
        params[name] = []
        err2[name] = []
        for model in entry_model.keys():
            print(model)
            if model.endswith('BF'):
                model = model.replace('_BF', '')
            if model.endswith('seed42'):
                model = model.replace('_seed42', '')
            if model == "CORnet-S_random":
                params[name].append(0)
            else:
                params[name].append(get_params(model))

        model_dict = load_error_bared(conn, entry_model.keys(), benchmarks)
        for model in entry_model.keys():
            if percent:
                frac = (np.mean(model_dict[model][selection]) / full) * 100
                percent_err = (np.mean(model_dict[model][6:][selection]) /
                               full) * 100
            else:
                frac = np.mean(model_dict[model][selection])
                percent_err = np.mean(model_dict[model][6:][selection])
            print(f'MOdel {model} has score {frac}')
            data2[name].append(frac)
            err2[name].append(percent_err)
        if '(' in name:
            short = name.split('(')[1][:-1]
        else:
            short = name
        if len(entry_model) == 1:
            labels[name] = [short, f'{short} Transfer']
        else:
            labels[name] = [short]

    params2 = []
    for model in layer_random.keys():
        if model.endswith('BF'):
            model = model.replace('_BF', '')
        if model == "CORnet-S_random":
            params2.append(0)
        else:
            params2.append(get_params(model))
    if imagenet:
        title = f'Imagenet score vs number of parameter'
        y = r'\textbf{Imagenet performance}'
    else:
        title = f'Brain-Score Benchmark mean(V4, IT, Behavior) vs number of parameter'
        if len(selection) == 3:
            y = r"\textbf{Brain Predictivity}"
        else:
            y = r"\textbf{Brain Predictivity}"
    if percent:
        y = f'{y} [\% of standard training]'

    if pal is None:
        pal = blue_palette
    plot_data_double(data2,
                     data,
                     '',
                     err=err2,
                     err2=err,
                     x_name=r'\textbf{Number of trained parameters} [Million]',
                     x_labels=None,
                     ylim=ylim,
                     y_name=y,
                     x_ticks=params,
                     pal=pal,
                     percent=percent,
                     x_ticks_2=params2,
                     data_labels=labels,
                     ax=ax,
                     million=True,
                     log=log,
                     annotate_pos=0)