def score_over_layers(models, random, labels, bench, convergence=True, ax=None):
    if bench is not None:
        benchmarks = bench
    conn = get_connection()
    names = []
    if convergence and 'CORnet-S_full' in convergence_epoch:
        full_tr = load_scores(conn, [f'CORnet-S_full_epoch_{convergence_epoch["CORnet-S_full"]:02d}'], benchmarks)[
            f'CORnet-S_full_epoch_{convergence_epoch["CORnet-S_full"]:02d}']
    else:
        full_tr = load_scores(conn, ['CORnet-S_full_epoch_06'], benchmarks)['CORnet-S_full_epoch_06']
    model_dict = load_error_bared(conn, list(chain(models.keys(), random.keys())), benchmarks, convergence=convergence)
    if len(benchmarks) < 6:
        benchmarks_labels = ['V4', 'IT', 'Behavior', 'Imagenet']
    else:
        benchmarks_labels = ['V1', 'V2', 'V4', 'IT', 'Behavior', 'Imagenet']
    data = {}
    err = {}
    x_ticks = {}
    for i in range(len(benchmarks)):
        data[benchmarks_labels[i]] = []
        err[benchmarks_labels[i]] = []
        x_ticks[benchmarks_labels[i]] = []
        layers = []
        for model, layer in models.items():
            layers.append(layer_best_2[layer])
            frac = (model_dict[model][i] / full_tr[i]) * 100
            frac_err = (model_dict[model][len(benchmarks):][i] / full_tr[i]) * 100
            data[benchmarks_labels[i]].append(frac)
            err[benchmarks_labels[i]].append(frac_err)
        x_ticks[benchmarks_labels[i]] = layers
    plot_data_double(data, data2=None, err=err, name=f'Artificial Genome + Critical Training',
                     x_name='Number of trained layers',
                     y_name=r'Benchmark Score [% of standard training]',
                     x_ticks=x_ticks, x_ticks_2=[], percent=True, ax=ax, pal=red_palette, annotate_pos=1)
def image_scores(models, imgs, labels, ax=None, selection=[]):
    names = []
    conn = get_connection()
    for model in models:
        for img in imgs:
            name = f'{model}_img{img}'
            names.append(f'{name}_epoch_{convergence_images[name]}')
        if model == 'CORnet-S_cluster2_v2_IT_trconv3_bi':
            model = f'{model}_seed42'
        names.append(f'{model}_epoch_{convergence_epoch[model]}')
    names.append('CORnet-S_full_epoch_43')
    model_dict = load_scores(conn, names, benchmarks)
    data2 = {}
    full = np.mean(model_dict['CORnet-S_full_epoch_43'][selection])
    for i in imgs:
        for model, name in zip(models, labels):
            data2[name] = []
            name1 = f'{model}_img{i}'
            frac = (np.mean(model_dict[f'{name1}_epoch_{convergence_images[name1]}'][selection]) / full) * 100
            data2[name].append(frac)
            if model == 'CORnet-S_cluster2_v2_IT_trconv3_bi':
                model = f'{model}_seed42'
            frac = (np.mean(model_dict[f'{model}_epoch_{convergence_epoch[model]}'][selection]) / full) * 100
            data2[name].append(frac)

    if len(selection) == 1:
        title = 'Imagenet score vs number of weights'
        y = r'Imagenet [% of standard training]'
    else:
        title = f'Brain scores mean vs number of weights'
        y = r'Brain Predictivity [% of standard training]'
    imgs.append(1200000)
    plot_data_double(data2, {}, '', x_name='Number of images in million', y_name=y,
                     x_ticks={'IT init, selective training': imgs},
                     x_ticks_2=imgs, percent=True, log=True, ax=ax, million=True)
def plot_first_epochs(models, epochs=None, brain=True, convergence=True, ax=None):
    model_dict = {}
    conn = get_connection()
    if epochs is None:
        epochs = (0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 2, 3, 4, 5, 6)
    data = {}
    x_values = {}
    if convergence and 'CORnet-S_full' in convergence_epoch:
        full_tr = load_scores(conn, [f'CORnet-S_full_epoch_{convergence_epoch["CORnet-S_full"]:02d}'], benchmarks)[
            f'CORnet-S_full_epoch_{convergence_epoch["CORnet-S_full"]:02d}']
    else:
        full_tr = load_scores(conn, ['CORnet-S_full_epoch_06'], benchmarks)['CORnet-S_full_epoch_06']
    for model, name in models.items():
        names = []
        for epoch in epochs:
            if epoch % 1 == 0:
                names.append(f'{model}_epoch_{epoch:02d}')
            else:
                names.append(f'{model}_epoch_{epoch:.1f}')
        if convergence and model in convergence_epoch:
            names.append(f'{model}_epoch_{convergence_epoch[model]:02d}')
        model_dict = load_scores(conn, names, benchmarks)
        scores = []
        for epoch in epochs:
            if brain:
                full = np.mean(full_tr[2:5])
                if epoch % 1 == 0:
                    frac = (np.mean(model_dict[f'{model}_epoch_{int(epoch):02d}'][2:5]) / full) * 100
                    scores.append(frac)
                else:
                    frac = (np.mean(model_dict[f'{model}_epoch_{epoch:.1f}'][2:5]) / full) * 100
                    scores.append(frac)
            else:
                full = np.mean(full_tr[5])
                if epoch % 1 == 0:
                    frac = (np.mean(model_dict[f'{model}_epoch_{int(epoch):02d}'][5]) / full) * 100
                    scores.append(frac)
                else:
                    frac = (np.mean(model_dict[f'{model}_epoch_{epoch:.1f}'][5]) / full) * 100
                    scores.append(frac)
        if convergence and model in convergence_epoch:
            if brain:
                frac = (np.mean(model_dict[f'{model}_epoch_{convergence_epoch[model]:02d}'][2:5]) / full) * 100
                scores.append(frac)
            else:
                frac = (np.mean(model_dict[f'{model}_epoch_{convergence_epoch[model]:02d}'][5]) / full) * 100
                scores.append(frac)
            x_values[name] = epochs + [convergence_epoch[model]]
        else:
            x_values[name] = epochs
        data[name] = scores

    title = f'Brain scores mean vs epochs' if brain else 'Imagenet score vs epochs'
    plot_data_base(data, 'First epochs', x_values, 'Epochs', 'Brain Predictivity [% of standard training]',
                   x_ticks=epochs + [30, 40, 50], log=True,
                   percent=True, special_xaxis=True, legend=False, only_blue=False, palette=red_palette, annotate=True,
                   annotate_pos=1, ax=ax)
def score_layer_depth(values, brain=True):
    names = []
    conn = get_connection()
    for k, v in values.items():
        names.append(f'{k}_epoch_05')
    for k, v in random_scores.items():
        if k != 'CORnet-S_random' and k != 'CORnet-S_train_random':
            names.append(f'{k}_epoch_05')
        else:
            names.append(k)
    model_dict = load_scores(conn, names,
                             ['movshon.FreemanZiemba2013.V1-pls',
                              'movshon.FreemanZiemba2013.V2-pls',
                              'dicarlo.Majaj2015.V4-pls',
                              'dicarlo.Majaj2015.IT-pls',
                              'dicarlo.Rajalingham2018-i2n',
                              'fei-fei.Deng2009-top1'])
    weight_num = [9408, 36864, 8192, 16384, 65536, 2359296, 65536, 32768, 65536, 262144, 9437184, 262144, 131072,
                  262144, 1048576, 37748736, 1048576, 512000]
    acc = [52860096 + 512000]
    for i in weight_num:
        acc.append(acc[-1] - i)
    weights = []
    results = []
    for model, l in values.items():
        index = layers.index(l)
        weights.append(acc[index])
        res = model_dict[f'{model}_epoch_05']
        if brain:
            results.append(np.mean(res[2:4]))
            # if index < 7:
            #     results.append(np.mean(res[0:1]))
            # else:
            #     results.append(np.mean(res[0:2]))
        else:
            results.append(res[5])
    rand_names = []
    for model, l in random_scores.items():
        index = layers.index(l)
        weights.append(acc[index])
        if model != 'CORnet-S_random' and model != 'CORnet-S_train_random':
            res = model_dict[f'{model}_epoch_05']
        else:
            res = model_dict[model]

        if brain:
            results.append(np.mean(res[0:4]))
            # if index < 7:
            #     results.append(np.mean(res[0:1]))
            # else:
            #     results.append(np.mean(res[0:2]))
        else:
            results.append(res[5])
        rand_names.append(f'Random {l}')
    title = f'Brain scores mean vs number of weights' if brain else 'Imagenet score vs number of weights'
    scatter_plot(weights, results, x_label='Num of weights', y_label='Score', labels=list(values.values()) + rand_names,
                 title=title)
def plot_first_epochs(models, epochs=None, brain=True, convergence=True, model_name='resnet'):
    model_dict = {}
    conn = get_connection()
    if epochs is None:
        epochs = (0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 2, 3, 4, 5, 6)
    data = {}
    x_values = {}
    conv_number = convergence_epoch[f'{model_name}_v1_CORnet-S_full']
    full_tr = load_scores(conn, [f'{model_name}_v1_CORnet-S_full_epoch_{conv_number}'], benchmarks)[
        f'{model_name}_v1_CORnet-S_full_epoch_{conv_number}']
    for model, name in models.items():
        names = []
        for epoch in epochs:
            if epoch % 1 == 0:
                names.append(f'{model}_epoch_{epoch:02d}')
            else:
                names.append(f'{model}_epoch_{epoch:.1f}')
        if convergence and model in convergence_epoch:
            names.append(f'{model}_epoch_{convergence_epoch[model]:02d}')
        model_dict = load_scores(conn, names, benchmarks)
        scores = []
        for epoch in epochs:
            if brain:
                full = np.mean(full_tr[2:5])
                if epoch % 1 == 0:
                    frac = (np.mean(model_dict[f'{model}_epoch_{int(epoch):02d}'][2:5]) / full) * 100
                    scores.append(frac)
                else:
                    frac = (np.mean(model_dict[f'{model}_epoch_{epoch:.1f}'][2:5]) / full) * 100
                    scores.append(frac)
            else:
                full = np.mean(full_tr[5])
                if epoch % 1 == 0:
                    frac = (np.mean(model_dict[f'{model}_epoch_{int(epoch):02d}'][5]) / full) * 100
                    scores.append(frac)
                else:
                    frac = (np.mean(model_dict[f'{model}_epoch_{epoch:.1f}'][5]) / full) * 100
                    scores.append(frac)
        if convergence and model in convergence_epoch:
            if brain:
                frac = (np.mean(model_dict[f'{model}_epoch_{convergence_epoch[model]:02d}'][2:5]) / full) * 100
                scores.append(frac)
                y = 'mean(V4, IT, Behavior) [% of standard training]'
            else:
                frac = (np.mean(model_dict[f'{model}_epoch_{convergence_epoch[model]:02d}'][5]) / full) * 100
                y = 'Imagenet [% of standard training]'
                scores.append(frac)
            x_values[name] = epochs + [convergence_epoch[model]]
        else:
            x_values[name] = epochs
        data[name] = scores

    title = f'{model_name} Brain scores mean vs epochs' if brain else f'{model_name} Imagenet score vs epochs'
    plot_data_base(data, '', x_values, 'Epochs', y, x_ticks=epochs + [10, 20, 30],
                   percent=True, special_xaxis=True, only_blue=False)
def plot_models_benchmarks(models, file_name, benchmarks, convergence=True, gs=None, ax=None):
    conn = get_connection()
    model_dict = load_error_bared(conn, models.keys(), benchmarks)
    if len(benchmarks) < 6:
        benchmarks_labels = ['V4', 'IT', 'Behavior', 'Imagenet']
    else:
        benchmarks_labels = ['V1', 'V2', 'V4', 'IT', 'Behavior']  # 'Imagenet'
    data_set = {}
    err = {}
    # We replace the model id, a more human readable version
    for id, desc in models.items():
        data_set[desc] = model_dict[id][:5]
        err[desc] = model_dict[id][6:-1]
    plot_bar_benchmarks(data_set, benchmarks_labels, '', r'\textbf{Scores}', file_name, yerr=err, gs=gs, ax=ax)
def score_over_layers_avg(models_resnet, random, models_alexnet={}, random_alexnet={}, imagenet=False,
                          convergence=False,
                          model_name='resnet',
                          layers_numbers=[layers_number, layers_number, layers_number_mobilenet,
                                          layers_number_mobilenet],
                          gs=None, ax=None, selection=[]):
    conn = get_connection()
    full = 0
    model_dict = load_error_bared(conn, list(
        chain(models_resnet.keys(), models_mobilenet.keys(), random.keys(), random_mobilenet.keys())), benchmarks,
                                  convergence=convergence)
    data = {}
    err = {}
    layers = {}
    labels = {}
    idx = 0
    for models, label in zip([random, models_resnet],  # , random_mobilenet, models_mobilenet
                             ['Resnet50 KN+DT', 'Resnet50 Transfer AG+CT', 'Alexnet KN+DT', 'Alexnet Transfer AG+CT']):
        data[label] = []
        layers[label] = []
        layers_number = layers_numbers[idx]
        idx += 1
        for model, layer in models.items():
            layers[label].append(layers_number[layer])
            if model == f'{model_name}_v1_CORnet-S_full':
                full = np.mean(model_dict[model][selection])
                data[label].append(100)
            else:
                percent = (np.mean(model_dict[model][selection]) / full) * 100
                data[label].append(percent)
            full_err = (np.mean(model_dict[model][:6][selection]) / full) * 100
            err[label] = full_err
        if 'Alexnet' in label:
            labels[label] = models.values()
        else:
            labels[label] = [value.split('.')[0] for value in models.values()]

    if imagenet:
        title = f'{model_name} Imagenet score over layers'
        y = 'Imagenet [% of standard training]'
    else:
        title = f'{model_name} Brain-Score Benchmark mean(V4, IT, Behavior) over layers'
        if len(selection) == 3:
            y = r"mean(V4, IT, Behavior) [% of standard training]"
        else:
            y = r"mean(V1,V2,V4,IT,Behavior) [% of standard training]"
    plot_data_double(data, {}, '', x_name='Number of trained layers [% of all layers]', y_name=y, x_ticks=layers,
                     x_ticks_2=[], percent=False, percent_x=True,
                     pal=['#424949'] + [green_palette[1]] + ['#ABB2B9'] + [green_palette[0]], data_labels=labels, gs=gs,
                     ax=ax)
def plot_models_vs(models, file_name, convergence=False, epoch=0, imagenet=False, gs=None, ax=None, selection=[]):
    model_dict = {}
    conn = get_connection()

    names = []
    for name, mod in models.items():
        for model in mod.values():
            #         if convergence:
            #             epoch = convergence_epoch[model]
            names.append(model)
    model_dict = load_error_bared(conn, names, benchmarks, convergence=convergence, epochs=[0])
    labels = []
    data_set = {}
    err = {}
    # We replace the model id, a more human readable version
    for name, models in models.items():
        labels.append(name)
        # data_set[name] = {}
        for model_name, model in models.items():
            if model_name not in data_set:
                data_set[model_name] = []
                err[model_name] = []
            # if imagenet:
            #     data_set[model_name].append(np.mean(model_dict[f'{model}_epoch_{epoch:02d}'][5]))
            # else:
            if convergence:
                data_set[model_name].append(np.mean(model_dict[model][selection]))
                err[model_name].append(np.mean(model_dict[model][6:][selection]))
            else:
                data_set[model_name].append(np.mean(model_dict[f'{model}_epoch_{epoch:02d}'][selection]))
                err[model_name].append(np.mean(model_dict[f'{model}_epoch_{epoch:02d}'][6:][selection]))

    # if convergence and 'CORnet-S_full' in convergence_epoch:
    full_tr = load_error_bared(conn, [f'CORnet-S_full'], benchmarks)[
        f'CORnet-S_full']
    # else:
    #     full_tr = load_scores(conn, ['CORnet-S_full_epoch_0'], benchmarks)['CORnet-S_full_epoch_06']
    # print(f'Mean of brain benchmark model {desc}, {np.mean(data_set[desc][2:5])}')
    # if imagenet:
    #     line =np.mean(full_tr[5])
    # else:
    line = np.mean(full_tr[selection])
    if len(selection) == 3:
        y = r"\textbf{Brain Predictivity} "
    else:
        y = r"\textbf{Brain Predictivity}"
    pals = blue_palette
    plot_bar_benchmarks(data_set, labels, '', y, file_name, yerr=err, line=line, label=True, grey=False, gs=gs, ax=ax)
def plot_model_avg_benchmarks(models, file_name):
    model_dict = {}
    conn = get_connection()
    epoch = 6
    names = []
    for model in models.keys():
        names.append(f'{model}_epoch_{epoch:02d}')
    model_dict = load_scores(conn, names, benchmarks)
    benchmarks_labels = ['Brain Predictivity', 'Imagenet']
    data_set = {}
    # We replace the model id, a more human readable version
    for id, desc in models.items():
        data = model_dict[f'{id}_epoch_{epoch:02d}']
        data_set[desc] = [np.mean(data[0:5]), data[5]]
        print(f'Mean of brain benchmark model {desc}, {np.mean(data[0:5])}')
    plot_bar_benchmarks(data_set, benchmarks_labels, '', 'Scores', file_name)
def plot_benchmarks_over_epochs(model, epochs=None, benchmarks=benchmarks, selection=[2, 3, 4], ax=None):
    # TODO: Add error bars
    model_dict = {}
    conn = get_connection()
    if epochs is None:
        epochs = (0, 5, 10, 15, 20)

    names = []
    # for epoch in epochs:
    #     if epoch % 1 == 0:
    #         names.append(f'{model}_epoch_{epoch:02d}')
    #     else:
    #         names.append(f'{model}_epoch_{epoch:.1f}')
    model_dict = load_error_bared(conn, [model, 'CORnet-S_full'], benchmarks, epochs=epochs, convergence=True)
    full = model_dict['CORnet-S_full']

    benchmarks_labels = ['V1', 'V2', 'V4', 'IT', 'Behavior', 'Mean']
    data = {}
    for i in range(len(benchmarks) - 1):
        if i in selection:
            data[benchmarks_labels[i]] = []
            for epoch in epochs:
                if epoch % 1 == 0:
                    frac = (model_dict[f'{model}_epoch_{epoch:02d}'][i] / full[i]) * 100
                else:
                    frac = (model_dict[f'{model}_epoch_{epoch:.1f}'][i] / full[i]) * 100
                data[benchmarks_labels[i]].append(frac)
        end = (np.mean(model_dict[model][i]) / np.mean(full[i])) * 100
        data[benchmarks_labels[i]].append(end)
    data[benchmarks_labels[-1]] = []
    for epoch in epochs:
        if epoch % 1 == 0:
            frac = (np.mean(model_dict[f'{model}_epoch_{epoch:02d}'][selection]) / np.mean(full[selection])) * 100
        else:
            frac = (np.mean(model_dict[f'{model}_epoch_{epoch:.1f}'][selection]) / np.mean(full[selection])) * 100
        data[benchmarks_labels[-1]].append(frac)
    end = (np.mean(model_dict[model][selection]) / np.mean(full[selection])) * 100
    data[benchmarks_labels[-1]].append(end)
    plot_data_base(data, f'', r'\textbf{Epoch}', r'\textbf{Score} [\% of standard training]',
                   epochs + [43],
                   x_ticks=[value for value in epochs if value not in [0.1, 0.2, 0.3, 0.4, 0.6, 0.7, 0.8, 0.9, 15]] + [
                       43],
                   x_labels=[value for value in epochs if value not in [0.1, 0.2, 0.3, 0.4, 0.6, 0.7, 0.8, 0.9, 15]] + [
                       'Conv'],
                   percent=True, alpha=0.5, log=True, annotate=True, legend=False, annotate_pos=2, ax=ax,
                   palette=grey_palette[:len(benchmarks_labels) - 1] + [blue_palette[0]])
def plot_models_benchmark_vs_public(models, file_name):
    model_dict = {}
    conn = get_connection()
    epoch = 6
    names = []
    for model in models.keys():
        names.append(f'{model}_epoch_{epoch:02d}')
    model_dict = load_scores(conn, names, benchmarks)
    model_dict_pub = load_scores(conn, names, benchmarks_public)
    benchmarks_labels = ['V1', 'V2', 'V4', 'IT', 'Behavior', 'Imagenet']
    data_set = {}
    # We replace the model id, a more human readable version
    for id, desc in models.items():
        data_set[desc] = model_dict[f'{id}_epoch_{epoch:02d}']
        data_set[f'{desc} public'] = model_dict_pub[f'{id}_epoch_{epoch:02d}']
        print(f'Mean of brain benchmark model {desc}, {np.mean(data_set[desc][2:5])}')
    plot_bar_benchmarks(data_set, benchmarks_labels, 'Model scores in epoch 6', 'Score [% of standard training]',
                        file_name, grey=True)
def plot_single_benchmarks(models, epochs=None, compare_batchfix=False, run_mean=False):
    model_dict = {}
    conn = get_connection()
    if epochs is None:
        epochs = (0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 2, 3, 4, 5, 6)
    data = {}
    benchmarks_label = ['V1', 'V2', 'V4', 'IT', 'Behavior', 'Imagenet']

    for model, name in models.items():
        names = []
        for epoch in epochs:
            if epoch % 1 == 0:
                names.append(f'{model}_epoch_{epoch:02d}')
            else:
                names.append(f'{model}_epoch_{epoch:.1f}')

            if compare_batchfix:
                names.append(f'{model}_epoch_{epoch:02d}_BF')
        model_dict[name] = load_scores(conn, names, benchmarks)
    for i in range(6):
        for model, name in models.items():
            scores = []
            for epoch in epochs:
                if epoch % 1 == 0:
                    scores.append(model_dict[name][f'{model}_epoch_{int(epoch):02d}'][i])
                else:
                    scores.append(model_dict[name][f'{model}_epoch_{epoch:.1f}'][i])

            if run_mean:
                data[name] = [scores[0]] + np.convolve(scores, np.ones((3,)) / 3, mode='valid') + [scores[-1]]
            else:
                data[name] = scores

            if compare_batchfix:
                scores = []
                for epoch in epochs:
                    scores.append(model_dict[name][f'{model}_BF_epoch_{epoch:02d}'][i])
                    if run_mean:
                        data[f'{name}_BF'] = np.convolve(scores, np.ones((3,)) / 3, mode='same')
                    else:
                        data[f'{name}_BF'] = scores

        title = f'{benchmarks_label[i]} benchmark vs epochs'
        plot_data_base(data, title, epochs, 'Epoch', 'Score', x_ticks=epochs, log=True)
def delta_heatmap(model1, model2, imgs, epochs, selection=[], title='', ax=None):
    names = []
    conn = get_connection()
    for model in [model1, model2]:
        if model == 'CORnet-S_cluster2_v2_IT_trconv3_bi':
            model_spec = model
        else:
            model_spec = model
        for img in imgs:
            name = f'{model}_img{img}'
            for epoch in epochs:
                names.append(f'{name}_epoch_{epoch:02d}')
            names.append(f'{name}_epoch_{convergence_images[name]}')
        names.append(f'{model_spec}_epoch_{convergence_epoch[model_spec]}')
        for epoch in epochs:
            names.append(f'{model}_epoch_{epoch:02d}')
        names.append('CORnet-S_full_epoch_43')
    model_dict = load_scores(conn, names, benchmarks)
    full = np.mean(model_dict['CORnet-S_full_epoch_43'][selection])
    matrix = np.zeros([len(imgs) + 1, len(epochs) + 1])
    data = {}
    for i in range(len(imgs)):
        for j in range(len(epochs)):
            name1 = f'{model1}_img{imgs[i]}_epoch_{epochs[j]:02d}'
            name2 = f'{model2}_img{imgs[i]}_epoch_{epochs[j]:02d}'
            matrix[i, j] = calc_dif(name1, name2, model_dict, full, selection)
        name = f'{model1}_img{imgs[i]}'
        name = f'{name}_epoch_{convergence_images[name]:02d}'
        name2 = f'{model2}_img{imgs[i]}'
        name2 = f'{name2}_epoch_{convergence_images[name2]:02d}'
        matrix[i, -1] = calc_dif(name, name2, model_dict, full, selection)
    names.append(f'{model1}_epoch_{convergence_epoch[model1]:02d}')
    for j in range(len(epochs)):
        name1 = f'{model1}_epoch_{epochs[j]:02d}'
        name2 = f'{model2}_epoch_{epochs[j]:02d}'
        matrix[-1, j] = calc_dif(name1, name2, model_dict, full, selection)
    name = f'CORnet-S_cluster2_v2_IT_trconv3_bi_epoch_{convergence_epoch["CORnet-S_cluster2_v2_IT_trconv3_bi"]:02d}'
    name2 = f'{model2}_epoch_{convergence_epoch[model2]:02d}'
    matrix[-1, -1] = calc_dif(name, name2, model_dict, full, selection)
    plot_heatmap(matrix, r'\textbf{Epochs}', r'\textbf{Images}',
                 title=title, annot=True, ax=ax,
                 cbar=False, cmap='RdYlGn', percent=False, alpha=0.6,
                 fmt='.0%', vmin=-0.30, vmax=0.30, yticklabels=imgs + ['All'], xticklabels=epochs + ['Convergence'])
def image_epoch_heatmap(model, imgs, epochs, selection=[], title=r'\textbf{Standard training epochs/images trade-off}',
                        ax=None):
    names = []
    conn = get_connection()
    # delta= 'CORnet-S_full'
    # if model == 'CORnet-S_cluster2_v2_IT_trconv3_bi':
    #     model1 = model
    # else:
    #     model1 = model
    for img in imgs:
        name = f'{model}_img{img}'
        # for epoch in epochs:
        #     names.append(f'{name}_epoch_{epoch:02d}')
        names.append(name)
    names.append(model)
    # for epoch in epochs:
    #     names.append(f'{model}_epoch_{epoch:02d}')
    names.append('CORnet-S_full')
    model_dict = load_error_bared(conn, names, epochs=epochs, benchmarks=benchmarks)
    full = np.mean(model_dict['CORnet-S_full'][selection])
    matrix = np.zeros([len(imgs) + 1, len(epochs) + 1])
    data = {}
    for i in range(len(imgs)):
        for j in range(len(epochs)):
            name1 = f'{model}_img{imgs[i]}_epoch_{epochs[j]:02d}'
            frac = (np.mean(model_dict[name1][selection]) / full)
            matrix[i, j] = frac
        name = f'{model}_img{imgs[i]}'
        # name = f'{name}_epoch_{convergence_images[name]:02d}'
        frac = (np.mean(model_dict[name][selection]) / full)
        matrix[i, -1] = frac

    # names.append(f'{model1}_epoch_{convergence_epoch[model1]:02d}')
    for j in range(len(epochs)):
        name1 = f'{model}_epoch_{epochs[j]:02d}'
        frac = (np.mean(model_dict[name1][selection]) / full)
        matrix[-1, j] = frac
    # name = f'{model}_epoch_{convergence_epoch[model]:02d}'
    frac = (np.mean(model_dict[model][selection]) / full)
    matrix[-1, -1] = frac
    plot_heatmap(matrix, r'\textbf{Epochs}', r'\textbf{Images}', title=title, annot=True, ax=ax,
                 cbar=False, cmap='YlOrRd', percent=False,
                 fmt='.0%', vmin=0, vmax=1, yticklabels=imgs + ['All'], xticklabels=epochs + ['Convergence'], alpha=0.8)
def plot_over_epoch(models):
    model_dict = {}
    conn = get_connection()
    epochs = (0, 5, 10, 15)
    for model in models:
        names = []
        for epoch in epochs:
            names.append(f'{model}_epoch_{epoch:02d}')
        model_dict[model] = load_scores(conn, names, benchmarks=benchmarks)
    model_dict[f'{model}_epoch_00'] = load_scores(conn, ['CORnet-S_random'], benchmarks)
    benchmarks_labels = ['V1', 'V2', 'V4', 'IT', 'Behavior', 'Imagenet']
    for i in range(6):
        data = {}
        for model in models:
            data[model] = []
            for epoch in epochs:
                data[model].append(model_dict[model][f'{model}_epoch_{epoch:02d}'][i])
        # data['CORnet-S'] = [0] * 3 + [model_dict['CORnet-S']['CORnet-S'][i]]
        plot_data_base(data, f'{benchmarks_labels[i]} Benchmark over epochs', epochs, 'Score over epochs', 'Score')
def image_scores_single(model, imgs, selection=[], ax=None):
    names = []
    conn = get_connection()
    for img in imgs:
        name = f'{model}_img{img}'
        names.append(name)
    # if model == 'CORnet-S_cluster2_v2_IT_trconv3_bi':
    #     model = f'{model}_seed42'
    # names.append(f'{model}_epoch_{convergence_epoch[model]}')
    names.append('CORnet-S_full')
    # model_dict = load_scores(conn, names, benchmarks)
    model_dict = load_error_bared(conn, names, benchmarks, convergence=True)
    full = model_dict[model]
    benchmarks_labels = ['V1', 'V2', 'V4', 'IT', 'Behavior', 'Mean']
    data = {}
    for i in range(len(benchmarks) - 1):
        if i in selection:
            data[benchmarks_labels[i]] = []
            for j in imgs:
                name1 = f'{model}_img{j}'
                frac = (np.mean(model_dict[name1][i]) / full[i]) * 100
                data[benchmarks_labels[i]].append(frac)
            frac = (np.mean(model_dict[model][i]) / full[i]) * 100
            data[benchmarks_labels[i]].append(frac)
    data[benchmarks_labels[-1]] = []
    for j in imgs:
        name1 = f'{model}_img{j}'
        frac = (np.mean(model_dict[name1][selection]) / np.mean(full[selection])) * 100
        data[benchmarks_labels[-1]].append(frac)
    frac = (np.mean(model_dict[model][selection]) / np.mean(full[selection])) * 100
    data[benchmarks_labels[-1]].append(frac)
    imgs.append(1280000)
    plot_data_base(data, '', r'\textbf{Images} [Million]', r'\textbf{Score} [\% of standard training]', x_values=imgs,
                   x_ticks=[100, 1000, 10000, 100000, 1280000], x_labels=['100', '1k', '10k', '100k', '1.3M'],
                   million_base=True, palette=grey_palette[:len(benchmarks_labels) - 1] + [blue_palette[0]], alpha=0.5,
                   use_xticks=True,
                   percent=True, log=True, annotate=True, legend=False, annotate_pos=3, ax=ax)
Beispiel #17
0
def plot_num_params_images(imagenet=False,
                           entry_models=[],
                           all_labels=[],
                           images=[],
                           convergence=False,
                           ax=None,
                           selection=[],
                           log=False,
                           layer_random=layer_random):
    conn = get_connection()
    full = np.mean(get_full(conn, convergence)[selection])
    data2 = {}
    labels = []
    params = {}
    for entry_model, name in itertools.chain(
        [(layer_random, 'Kaiming Normal + Downstream Training (KN+DT)')],
            zip(entry_models, all_labels)):
        short = name.split('(')[1][:-1]
        for img in images:
            name_epoch = f'{short} {img} Imgs'
            data2[name_epoch] = []
            params[name_epoch] = []
        data2[f'{short} Full'] = []
        params[f'{short} Full'] = []
        mod_params = get_model_params(entry_model.keys())
        names = []
        for model in entry_model.keys():
            if model == "CORnet-S_random":
                names.append(model)
            else:
                names.append(f'{model}_epoch_{convergence_epoch[model]}')
                model = model.split('_seed42')[0]
                for img in images:
                    model_img = f'{model}_img{img}'
                    conv = convergence_images[
                        model_img] if model_img in convergence_images else 20
                    names.append(f'{model_img}_epoch_{conv:02d}')

        model_dict = load_scores(conn, names, benchmarks)
        for model in names:
            percent = (np.mean(model_dict[model][selection]) / full) * 100
            if 'img' not in model:
                name_epoch = f'{short} Full'
                base_model = model.partition('_epoch')[0]
                data2[name_epoch].append(percent)
                params[name_epoch].append(mod_params[base_model])
            else:
                img = model.split('_')[-3].partition('g')[2]
                base_model = model.partition('_img')[0]
                name_epoch = f'{short} {img} Imgs'
                data2[name_epoch].append(percent)
                params[name_epoch].append(mod_params[base_model])
            labels = labels + [f'{ep} Images'
                               for ep in images] + ['Convergence']

    if imagenet:
        title = f'Imagenet score vs number of parameter'
        y = r'Imagenet performance [% of standard training]'
    else:
        title = f'Brain-Score Benchmark mean(V4, IT, Behavior) vs number of parameter'
        if len(selection) == 3:
            y = r"Brain Predictivity [% of standard training]"
        else:
            y = r"Brain Predictivity [% of standard training]"
    col = grey_palette[:len(images) + 1] + blue_palette[:len(
        images) + 1] + green_palette[:len(images) +
                                     1] + grey_palette[:len(images) + 1]
    plot_data_double(data2, {},
                     '',
                     x_name='Number of trained parameters [Million]',
                     x_labels=[],
                     y_name=y,
                     x_ticks=params,
                     pal=col,
                     data_labels=labels,
                     ylim=[0, 100],
                     x_ticks_2=[],
                     percent=True,
                     ax=ax,
                     million=True,
                     annotate_pos=0,
                     log=log)
def score_over_layers_avg(all_models, random, all_labels=[], imagenet=False, convergence=False, ax=None, selection=[]):
    conn = get_connection()
    data = {}
    err2 = {}
    full = 0
    names = []
    for models in all_models:
        names.extend(models.keys())
    names.extend(random.keys())
    model_dict = load_error_bared(conn, names, benchmarks, convergence=convergence)
    data2 = {}
    data2['Score'] = []
    err2['Score'] = []
    layers2 = []
    for model, layer in random.items():
        layers2.append(layer_best_2[layer])
        if model == 'CORnet-S_full':
            full = np.mean(model_dict[model][selection])
            full_err = (np.mean(model_dict[model][6:][selection]) / full) * 100
            data2['Score'].append(100)
            err2['Score'].append(full_err)
        else:
            percent = (np.mean(model_dict[model][selection]) / full) * 100
            percent_error = (np.mean(model_dict[model][6:][selection]) / full) * 100
            data2['Score'].append(percent)
            err2['Score'].append(percent_error)
    x_ticks = {}
    labels = {}
    err = {}
    for models, name in zip(all_models, all_labels):
        data[name] = []
        err[name] = []
        layers = []
        for model, layer in models.items():
            if convergence and model in convergence_epoch:
                postfix = f'_epoch_{convergence_epoch[model]:02d}'
            else:
                postfix = f'_epoch_06'
            layers.append(layer_best_2[layer])
            if model == 'CORnet-S_full':
                full = np.mean(model_dict[model][selection])
                data[name].append(100)
            else:
                percent = (np.mean(model_dict[model][selection]) / full) * 100
                data[name].append(percent)
            full_err = (np.mean(model_dict[model][6:][selection]) / full) * 100
            err[name].append(full_err)
        x_ticks[name] = layers
        # short = name.split('(')[1][:-1]
        # labels[name] = [f'{value.split(".")[0]}_{short}' for value in models.values()]
        labels[name] = [name]

    if imagenet:
        title = f'Imagenet over layers'
        y = r"Imagenet}[% of standard training]"
    else:
        title = f'Brain-Score Benchmark mean(V4, IT, Behavior) over layers'
        if len(selection) == 3:
            y = r"Brain Predictivity [% of standard training]"
        else:
            y = r"Brain Predictivity [% of standard training]"

    plot_data_double(data, data2, '', err=err, err2=err2, x_name='Number of trained layers',
                     y_name=y, x_ticks=x_ticks,
                     x_ticks_2=layers2, percent=True, annotate_pos=0, pal=my_palette, ax=ax)
Beispiel #19
0
def plot_performance(imagenet=True,
                     entry_models=[best_brain_avg],
                     all_labels=[],
                     convergence=False,
                     ax=None,
                     selection=[],
                     log=False):
    conn = get_connection()
    names = []
    for model in random_scores.keys():
        if convergence and model in convergence_epoch:
            postfix = f'_epoch_{convergence_epoch[model]:02d}'
        else:
            postfix = f'_epoch_06'
        if model != "CORnet-S_random":
            names.append(f'{model}{postfix}')
        else:
            names.append(model)
    performance = load_model_parameter(conn)
    model_dict = load_scores(conn, names, benchmarks)
    time2 = []
    data2 = {'Score': []}

    for model, layer in random_scores.items():
        if model == "CORnet-S_random":
            postfix = ''
        elif convergence and model in convergence_epoch:
            postfix = f'_epoch_{convergence_epoch[model]:02d}'
        else:
            postfix = f'_epoch_06'
        high = np.mean(model_dict[
            f'CORnet-S_full_epoch_{convergence_epoch["CORnet-S_full"]}']
                       [selection])
        perc = (np.mean(model_dict[f'{model}{postfix}'][selection]) /
                high) * 100
        if layer in performance:
            data2['Score'].append(perc)
            time2.append(performance[layer])

    data = {}
    time = {}
    labels = {}
    for entry_model, name in zip(entry_models, all_labels):
        names = []
        for model in entry_model.keys():
            if convergence and model in convergence_epoch:
                postfix = f'_epoch_{convergence_epoch[model]:02d}'
            else:
                postfix = f'_epoch_06'
            names.append(f'{model}{postfix}')
        model_dict = load_scores(conn, names, benchmarks)
        time[name] = []
        data[name] = []
        for model, layer in entry_model.items():
            if convergence and model in convergence_epoch:
                postfix = f'_epoch_{convergence_epoch[model]:02d}'
            else:
                postfix = f'_epoch_06'
            perc = (np.mean(model_dict[f'{model}{postfix}'][selection]) /
                    high) * 100
            if layer in performance:
                data[name].append(perc)
                time[name].append(performance[layer])
        short = name.split('(')[1][:-1]
        labels[name] = [
            f'{value.split(".")[0]}_{short}' for value in entry_model.values()
        ]

    if imagenet:
        title = f'Imagenet score vs training time'
        y = r'Imagenet performance [% of standard training]'
    else:
        title = f'Brain-Score Benchmark mean(V4, IT, Behavior) vs training time'
        if len(selection) == 3:
            y = r"Brain Predictivity) [% of standard training]"
        else:
            y = r"Brain Predictivity [% of standard training]"
    plot_data_double(data,
                     data2,
                     '',
                     x_name='Training time [Milliseconds/Epoch]',
                     x_labels=[],
                     y_name=y,
                     x_ticks=time,
                     x_ticks_2=time2,
                     percent=True,
                     data_labels=labels,
                     ax=ax,
                     log=log)
Beispiel #20
0
def plot_num_params(imagenet=False,
                    entry_models=[],
                    all_labels=[],
                    convergence=False,
                    ax=None,
                    selection=[],
                    log=False,
                    layer_random=layer_random,
                    pal=None,
                    percent=True,
                    ylim=None):
    conn = get_connection()
    full_score = get_full(conn, convergence)
    full = np.mean(full_score[selection])
    names = []
    data = {'Score': []}
    err = {'Score': []}
    model_dict = load_error_bared(conn,
                                  layer_random.keys(),
                                  benchmarks,
                                  convergence=convergence)

    for model, layer in layer_random.items():
        if percent:
            frac = (np.mean(model_dict[model][selection]) / full) * 100
            percent_err = (np.mean(model_dict[model][6:][selection]) /
                           full) * 100
        else:
            frac = np.mean(model_dict[model][selection])
            percent_err = np.mean(model_dict[model][6:][selection])
        print(f'MOdel {model} has score {frac}')
        data['Score'].append(frac)
        err['Score'].append(percent_err)
    data2 = {}
    err2 = {}
    labels = {}
    params = {}
    for entry_model, name in zip(entry_models, all_labels):
        data2[name] = []
        params[name] = []
        err2[name] = []
        for model in entry_model.keys():
            print(model)
            if model.endswith('BF'):
                model = model.replace('_BF', '')
            if model.endswith('seed42'):
                model = model.replace('_seed42', '')
            if model == "CORnet-S_random":
                params[name].append(0)
            else:
                params[name].append(get_params(model))

        model_dict = load_error_bared(conn, entry_model.keys(), benchmarks)
        for model in entry_model.keys():
            if percent:
                frac = (np.mean(model_dict[model][selection]) / full) * 100
                percent_err = (np.mean(model_dict[model][6:][selection]) /
                               full) * 100
            else:
                frac = np.mean(model_dict[model][selection])
                percent_err = np.mean(model_dict[model][6:][selection])
            print(f'MOdel {model} has score {frac}')
            data2[name].append(frac)
            err2[name].append(percent_err)
        if '(' in name:
            short = name.split('(')[1][:-1]
        else:
            short = name
        if len(entry_model) == 1:
            labels[name] = [short, f'{short} Transfer']
        else:
            labels[name] = [short]

    params2 = []
    for model in layer_random.keys():
        if model.endswith('BF'):
            model = model.replace('_BF', '')
        if model == "CORnet-S_random":
            params2.append(0)
        else:
            params2.append(get_params(model))
    if imagenet:
        title = f'Imagenet score vs number of parameter'
        y = r'\textbf{Imagenet performance}'
    else:
        title = f'Brain-Score Benchmark mean(V4, IT, Behavior) vs number of parameter'
        if len(selection) == 3:
            y = r"\textbf{Brain Predictivity}"
        else:
            y = r"\textbf{Brain Predictivity}"
    if percent:
        y = f'{y} [\% of standard training]'

    if pal is None:
        pal = blue_palette
    plot_data_double(data2,
                     data,
                     '',
                     err=err2,
                     err2=err,
                     x_name=r'\textbf{Number of trained parameters} [Million]',
                     x_labels=None,
                     ylim=ylim,
                     y_name=y,
                     x_ticks=params,
                     pal=pal,
                     percent=percent,
                     x_ticks_2=params2,
                     data_labels=labels,
                     ax=ax,
                     million=True,
                     log=log,
                     annotate_pos=0)
Beispiel #21
0
def plot_num_params_epochs(imagenet=False,
                           entry_models=[],
                           all_labels=[],
                           epochs=[],
                           convergence=False,
                           ax=None,
                           selection=[],
                           log=False,
                           layer_random=layer_random):
    conn = get_connection()
    full = np.mean(get_full(conn, convergence)[selection])
    data2 = {}
    labels = []
    params = {}
    for entry_model, name in itertools.chain(
        [(layer_random, 'Kaiming Normal + Downstream Training (KN+DT)')],
            zip(entry_models, all_labels)):
        short = name.split('(')[1][:-1]
        for epoch in epochs:
            name_epoch = f'{short} Epoch {epoch:02d}'
            data2[name_epoch] = []
            params[name_epoch] = []
        data2[f'{short} Convergence'] = []
        params[f'{short} Convergence'] = []
        mod_params = get_model_params(entry_model.keys())
        names = []
        for model in entry_model.keys():
            if model == "CORnet-S_random":
                names.append(model)
            else:
                conv = convergence_epoch[
                    model] if model in convergence_epoch else 100
                for epoch in epochs:
                    if epoch < conv:
                        names.append(f'{model}_epoch_{epoch:02d}')
                if convergence and model in convergence_epoch:
                    names.append(f'{model}_epoch_{conv:02d}')

        model_dict = load_scores(conn, names, benchmarks)
        for model in names:
            epoch = model.split('_')[-1]
            base_model = model.partition('_epoch')[0]
            percent = (np.mean(model_dict[model][selection]) / full) * 100
            if int(epoch) == convergence_epoch[base_model]:
                name_epoch = f'{short} Convergence'
                data2[name_epoch].append(percent)
                params[name_epoch].append(mod_params[base_model])
            if int(epoch) in epochs:
                name_epoch = f'{short} Epoch {epoch}'
                data2[name_epoch].append(percent)
                params[name_epoch].append(mod_params[base_model])
        labels = labels + [f'Epoch {ep}' for ep in epochs] + ['Convergence']

    if imagenet:
        title = f'Imagenet score vs number of parameter'
        y = r'Imagenet performance [% of standard training]'
    else:
        title = f'Brain Predictivity vs number of parameter'
        if len(selection) == 3:
            y = r"Brain Predictivity [% of standard training]"
        else:
            y = r"Brain Predictivity [% of standard training]"
    col = grey_palette[:len(epochs) + 1] + blue_palette[:len(
        epochs) + 1] + green_palette[:len(epochs) +
                                     1] + grey_palette[:len(epochs) + 1]
    plot_data_double(
        data2,
        {},
        '',
        x_name='Number of trained parameters [Million]',
        x_labels=[],
        y_name=y,
        data_labels=labels,
        x_ticks=params,
        pal=col,
        x_ticks_2=[],
        percent=True,
        ax=ax,
        million=True,
        ylim=[0, 100],
        annotate_pos=0,
        log=log,
    )
Beispiel #22
0
def image_epoch_score(models,
                      imgs,
                      epochs,
                      selection=[],
                      axes=None,
                      percent=True,
                      make_trillions=False,
                      with_weights=True):
    names = []
    conn = get_connection()
    params = {}
    data = {}
    for model, label in models.items():
        data[label] = []
        params[label] = []
        if model == 'CORnet-S_cluster2_v2_IT_trconv3_bi':
            model1 = model
        else:
            model1 = model
        for img in imgs:
            name = f'{model}_img{img}'
            for epoch in epochs:
                if epoch % 1 == 0:
                    names.append(f'{name}_epoch_{epoch:02d}')
                else:
                    names.append(f'{name}_epoch_{epoch:.1f}')
            if name in convergence_images:
                names.append(f'{name}_epoch_{convergence_images[name]}')
        names.append(f'{model1}_epoch_{convergence_epoch[model1]:02d}')
    if with_weights:
        parameter = get_model_params(models, False)
    else:
        parameter = {x: 1 for x in models}
    names.append('CORnet-S_full_epoch_43')
    model_dict = load_scores(conn, names, benchmarks)
    full = np.mean(model_dict['CORnet-S_full_epoch_43'][selection])
    high_x = 0
    high_y = 0
    val = 0
    for model in names:
        if percent:
            frac = (np.mean(model_dict[model][selection]) / full) * 100
        else:
            frac = np.mean(model_dict[model][selection])
        if frac > 0.0:
            if 'img' not in model:
                base_model = model.partition('_epoch')[0]
                epoch = float(model.partition('_epoch_')[2])
                data[models[base_model]].append(frac)
                score = (1280000 * epoch * (parameter[base_model] / 1000000)
                         )  #
                print(
                    f'Model {base_model} in epoch {epoch} with full imagenet set '
                    f'leads to score {score} with brain score {frac}')
                params[models[base_model]].append(score)
            else:
                base_model = model.partition('_img')[0]
                imgs = int(model.partition('_img')[2].partition('_')[0])
                epoch = float(
                    model.partition('_img')[2].partition('_epoch_')[2])
                score = (imgs * epoch * (parameter[base_model] / 1000000)
                         )  # (parameter[base_model] / 1000000) *
                data[models[base_model]].append(frac)
                params[models[base_model]].append(score)
                print(
                    f'Model {base_model} in epoch {epoch} with {imgs} images '
                    f'leads to score {score} with brain score {frac}')
        if percent > high_y:
            high_y = percent
    if len(selection) == 3:
        y = r"\textbf{Brain Predictivity}"
    else:
        y = r"\textbf{Brain Predictivity}[\% of standard training]"  # [\% of standard training]
    for i, ax in enumerate(axes):
        zero_indices = {
            key: np.array([tick == 0 for tick in xticks])
            for key, xticks in params.items()
        }
        if i == 0:  # axis plotting the x=0 value
            ax_data = {
                key: np.array(values)[zero_indices[key]].tolist()
                for key, values in data.items()
            }
            xticks = {
                key: np.array(values)[zero_indices[key]].tolist()
                for key, values in params.items()
            }
            xticklabels = np.array([0])
            ylabel = y
        else:  # axis plotting everything x>0
            ax_data = {
                key: np.array(values)[~zero_indices[key]].tolist()
                for key, values in data.items()
            }
            xticks = {
                key: np.array(values)[~zero_indices[key]].tolist()
                for key, values in params.items()
            }
            # when make_trillions==True, this should actually be *10^12, but due to downstream hacks we leave it at ^6
            xticklabels = np.array([.001, .01, .1, 1, 10, 100, 1000]) * pow(
                10, 6)
            ax.spines['left'].set_visible(False)
            ylabel = ''
        kwargs = dict(trillion=True) if make_trillions else dict(
            trillion=True, million_base=True)
        plot_data_double(ax_data, {},
                         '',
                         x_name='',
                         x_labels=xticklabels,
                         scatter=True,
                         percent=percent,
                         alpha=0.8,
                         ylim=[0, 100],
                         y_name=ylabel,
                         x_ticks=xticks,
                         pal=[
                             '#2CB8B8', '#186363', '#ABB2B9', '#ABB2B9',
                             '#ABB2B9', '#259C9C', '#36E3E3', '#9AC3C3'
                         ],
                         log=True,
                         x_ticks_2={},
                         ax=ax,
                         **kwargs,
                         annotate_pos=0)

        # adopted from https://stackoverflow.com/a/32186074/2225200
        d = .015  # how big to make the diagonal lines in axes coordinates
        kwargs = dict(transform=ax.transAxes, color='#dedede', clip_on=False)
        if i == 0:
            m = 1 / .05
            ax.plot((1 - d * m, 1 + d * m), (-d, +d), **kwargs)
        else:
            kwargs.update(transform=ax.transAxes)
            ax.plot((-d, +d), (-d, +d), **kwargs)
            # remove yticks. We can't `ax.yaxis.set_visible(False)` altogether since that would also remove the grid
            for tic in ax.yaxis.get_major_ticks():
                tic.tick1On = tic.tick2On = False
            ax.set_yticklabels([])
        axes[0].set_ylim(axes[1].get_ylim())