def score_over_layers(models, random, labels, bench, convergence=True, ax=None): if bench is not None: benchmarks = bench conn = get_connection() names = [] if convergence and 'CORnet-S_full' in convergence_epoch: full_tr = load_scores(conn, [f'CORnet-S_full_epoch_{convergence_epoch["CORnet-S_full"]:02d}'], benchmarks)[ f'CORnet-S_full_epoch_{convergence_epoch["CORnet-S_full"]:02d}'] else: full_tr = load_scores(conn, ['CORnet-S_full_epoch_06'], benchmarks)['CORnet-S_full_epoch_06'] model_dict = load_error_bared(conn, list(chain(models.keys(), random.keys())), benchmarks, convergence=convergence) if len(benchmarks) < 6: benchmarks_labels = ['V4', 'IT', 'Behavior', 'Imagenet'] else: benchmarks_labels = ['V1', 'V2', 'V4', 'IT', 'Behavior', 'Imagenet'] data = {} err = {} x_ticks = {} for i in range(len(benchmarks)): data[benchmarks_labels[i]] = [] err[benchmarks_labels[i]] = [] x_ticks[benchmarks_labels[i]] = [] layers = [] for model, layer in models.items(): layers.append(layer_best_2[layer]) frac = (model_dict[model][i] / full_tr[i]) * 100 frac_err = (model_dict[model][len(benchmarks):][i] / full_tr[i]) * 100 data[benchmarks_labels[i]].append(frac) err[benchmarks_labels[i]].append(frac_err) x_ticks[benchmarks_labels[i]] = layers plot_data_double(data, data2=None, err=err, name=f'Artificial Genome + Critical Training', x_name='Number of trained layers', y_name=r'Benchmark Score [% of standard training]', x_ticks=x_ticks, x_ticks_2=[], percent=True, ax=ax, pal=red_palette, annotate_pos=1)
def plot_first_epochs(models, epochs=None, brain=True, convergence=True, ax=None): model_dict = {} conn = get_connection() if epochs is None: epochs = (0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 2, 3, 4, 5, 6) data = {} x_values = {} if convergence and 'CORnet-S_full' in convergence_epoch: full_tr = load_scores(conn, [f'CORnet-S_full_epoch_{convergence_epoch["CORnet-S_full"]:02d}'], benchmarks)[ f'CORnet-S_full_epoch_{convergence_epoch["CORnet-S_full"]:02d}'] else: full_tr = load_scores(conn, ['CORnet-S_full_epoch_06'], benchmarks)['CORnet-S_full_epoch_06'] for model, name in models.items(): names = [] for epoch in epochs: if epoch % 1 == 0: names.append(f'{model}_epoch_{epoch:02d}') else: names.append(f'{model}_epoch_{epoch:.1f}') if convergence and model in convergence_epoch: names.append(f'{model}_epoch_{convergence_epoch[model]:02d}') model_dict = load_scores(conn, names, benchmarks) scores = [] for epoch in epochs: if brain: full = np.mean(full_tr[2:5]) if epoch % 1 == 0: frac = (np.mean(model_dict[f'{model}_epoch_{int(epoch):02d}'][2:5]) / full) * 100 scores.append(frac) else: frac = (np.mean(model_dict[f'{model}_epoch_{epoch:.1f}'][2:5]) / full) * 100 scores.append(frac) else: full = np.mean(full_tr[5]) if epoch % 1 == 0: frac = (np.mean(model_dict[f'{model}_epoch_{int(epoch):02d}'][5]) / full) * 100 scores.append(frac) else: frac = (np.mean(model_dict[f'{model}_epoch_{epoch:.1f}'][5]) / full) * 100 scores.append(frac) if convergence and model in convergence_epoch: if brain: frac = (np.mean(model_dict[f'{model}_epoch_{convergence_epoch[model]:02d}'][2:5]) / full) * 100 scores.append(frac) else: frac = (np.mean(model_dict[f'{model}_epoch_{convergence_epoch[model]:02d}'][5]) / full) * 100 scores.append(frac) x_values[name] = epochs + [convergence_epoch[model]] else: x_values[name] = epochs data[name] = scores title = f'Brain scores mean vs epochs' if brain else 'Imagenet score vs epochs' plot_data_base(data, 'First epochs', x_values, 'Epochs', 'Brain Predictivity [% of standard training]', x_ticks=epochs + [30, 40, 50], log=True, percent=True, special_xaxis=True, legend=False, only_blue=False, palette=red_palette, annotate=True, annotate_pos=1, ax=ax)
def plot_first_epochs(models, epochs=None, brain=True, convergence=True, model_name='resnet'): model_dict = {} conn = get_connection() if epochs is None: epochs = (0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 2, 3, 4, 5, 6) data = {} x_values = {} conv_number = convergence_epoch[f'{model_name}_v1_CORnet-S_full'] full_tr = load_scores(conn, [f'{model_name}_v1_CORnet-S_full_epoch_{conv_number}'], benchmarks)[ f'{model_name}_v1_CORnet-S_full_epoch_{conv_number}'] for model, name in models.items(): names = [] for epoch in epochs: if epoch % 1 == 0: names.append(f'{model}_epoch_{epoch:02d}') else: names.append(f'{model}_epoch_{epoch:.1f}') if convergence and model in convergence_epoch: names.append(f'{model}_epoch_{convergence_epoch[model]:02d}') model_dict = load_scores(conn, names, benchmarks) scores = [] for epoch in epochs: if brain: full = np.mean(full_tr[2:5]) if epoch % 1 == 0: frac = (np.mean(model_dict[f'{model}_epoch_{int(epoch):02d}'][2:5]) / full) * 100 scores.append(frac) else: frac = (np.mean(model_dict[f'{model}_epoch_{epoch:.1f}'][2:5]) / full) * 100 scores.append(frac) else: full = np.mean(full_tr[5]) if epoch % 1 == 0: frac = (np.mean(model_dict[f'{model}_epoch_{int(epoch):02d}'][5]) / full) * 100 scores.append(frac) else: frac = (np.mean(model_dict[f'{model}_epoch_{epoch:.1f}'][5]) / full) * 100 scores.append(frac) if convergence and model in convergence_epoch: if brain: frac = (np.mean(model_dict[f'{model}_epoch_{convergence_epoch[model]:02d}'][2:5]) / full) * 100 scores.append(frac) y = 'mean(V4, IT, Behavior) [% of standard training]' else: frac = (np.mean(model_dict[f'{model}_epoch_{convergence_epoch[model]:02d}'][5]) / full) * 100 y = 'Imagenet [% of standard training]' scores.append(frac) x_values[name] = epochs + [convergence_epoch[model]] else: x_values[name] = epochs data[name] = scores title = f'{model_name} Brain scores mean vs epochs' if brain else f'{model_name} Imagenet score vs epochs' plot_data_base(data, '', x_values, 'Epochs', y, x_ticks=epochs + [10, 20, 30], percent=True, special_xaxis=True, only_blue=False)
def image_scores(models, imgs, labels, ax=None, selection=[]): names = [] conn = get_connection() for model in models: for img in imgs: name = f'{model}_img{img}' names.append(f'{name}_epoch_{convergence_images[name]}') if model == 'CORnet-S_cluster2_v2_IT_trconv3_bi': model = f'{model}_seed42' names.append(f'{model}_epoch_{convergence_epoch[model]}') names.append('CORnet-S_full_epoch_43') model_dict = load_scores(conn, names, benchmarks) data2 = {} full = np.mean(model_dict['CORnet-S_full_epoch_43'][selection]) for i in imgs: for model, name in zip(models, labels): data2[name] = [] name1 = f'{model}_img{i}' frac = (np.mean(model_dict[f'{name1}_epoch_{convergence_images[name1]}'][selection]) / full) * 100 data2[name].append(frac) if model == 'CORnet-S_cluster2_v2_IT_trconv3_bi': model = f'{model}_seed42' frac = (np.mean(model_dict[f'{model}_epoch_{convergence_epoch[model]}'][selection]) / full) * 100 data2[name].append(frac) if len(selection) == 1: title = 'Imagenet score vs number of weights' y = r'Imagenet [% of standard training]' else: title = f'Brain scores mean vs number of weights' y = r'Brain Predictivity [% of standard training]' imgs.append(1200000) plot_data_double(data2, {}, '', x_name='Number of images in million', y_name=y, x_ticks={'IT init, selective training': imgs}, x_ticks_2=imgs, percent=True, log=True, ax=ax, million=True)
def plot_overflow(): overflow_models = [] for i in (0.05, 0.1, 0.2, 0.3, 0.4, 0.5): overflow_models.append(f'CORnet-S_overflow_{i}') # conn = get_connection() conn = get_connection('scores_openmind') result_overflow_models = load_scores( conn, overflow_models, ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n']) result_base_random = load_scores( conn, ['CORnet-S_random'], ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n']) result_base = load_scores( conn, ['CORnet-S'], ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n']) it_data = {} it_data['overflow_init'] = [] it_data['base_untrained'] = [] it_data['base_trained'] = [] behavior_data = {} behavior_data['overflow_init'] = [] behavior_data['base_untrained'] = [] behavior_data['base_trained'] = [] labels = [] for i in (0.05, 0.1, 0.2, 0.3, 0.4, 0.5): labels.append(i) it_data['overflow_init'].append( result_overflow_models[f'CORnet-S_overflow_{i}'][0]) it_data['base_untrained'].append( result_base_random[f'CORnet-S_random'][0]) it_data['base_trained'].append(result_base[f'CORnet-S'][0]) behavior_data['overflow_init'].append( result_overflow_models[f'CORnet-S_overflow_{i}'][1]) behavior_data['base_untrained'].append( result_base_random[f'CORnet-S_random'][1]) behavior_data['base_trained'].append(result_base[f'CORnet-S'][1]) print(it_data) print(behavior_data) plot_data_base(it_data, 'IT benchmark overflow_initialization', labels, 'Overflow initialization in %', 'Score', [0.0, 0.6]) plot_data_base(behavior_data, 'Behavior benchmark overflow initialization', labels, 'Overflow initialization in %', 'Score', [0.0, 0.6])
def plot_models_benchmark_vs_public(models, file_name): model_dict = {} conn = get_connection() epoch = 6 names = [] for model in models.keys(): names.append(f'{model}_epoch_{epoch:02d}') model_dict = load_scores(conn, names, benchmarks) model_dict_pub = load_scores(conn, names, benchmarks_public) benchmarks_labels = ['V1', 'V2', 'V4', 'IT', 'Behavior', 'Imagenet'] data_set = {} # We replace the model id, a more human readable version for id, desc in models.items(): data_set[desc] = model_dict[f'{id}_epoch_{epoch:02d}'] data_set[f'{desc} public'] = model_dict_pub[f'{id}_epoch_{epoch:02d}'] print(f'Mean of brain benchmark model {desc}, {np.mean(data_set[desc][2:5])}') plot_bar_benchmarks(data_set, benchmarks_labels, 'Model scores in epoch 6', 'Score [% of standard training]', file_name, grey=True)
def score_layer_depth(values, brain=True): names = [] conn = get_connection() for k, v in values.items(): names.append(f'{k}_epoch_05') for k, v in random_scores.items(): if k != 'CORnet-S_random' and k != 'CORnet-S_train_random': names.append(f'{k}_epoch_05') else: names.append(k) model_dict = load_scores(conn, names, ['movshon.FreemanZiemba2013.V1-pls', 'movshon.FreemanZiemba2013.V2-pls', 'dicarlo.Majaj2015.V4-pls', 'dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n', 'fei-fei.Deng2009-top1']) weight_num = [9408, 36864, 8192, 16384, 65536, 2359296, 65536, 32768, 65536, 262144, 9437184, 262144, 131072, 262144, 1048576, 37748736, 1048576, 512000] acc = [52860096 + 512000] for i in weight_num: acc.append(acc[-1] - i) weights = [] results = [] for model, l in values.items(): index = layers.index(l) weights.append(acc[index]) res = model_dict[f'{model}_epoch_05'] if brain: results.append(np.mean(res[2:4])) # if index < 7: # results.append(np.mean(res[0:1])) # else: # results.append(np.mean(res[0:2])) else: results.append(res[5]) rand_names = [] for model, l in random_scores.items(): index = layers.index(l) weights.append(acc[index]) if model != 'CORnet-S_random' and model != 'CORnet-S_train_random': res = model_dict[f'{model}_epoch_05'] else: res = model_dict[model] if brain: results.append(np.mean(res[0:4])) # if index < 7: # results.append(np.mean(res[0:1])) # else: # results.append(np.mean(res[0:2])) else: results.append(res[5]) rand_names.append(f'Random {l}') title = f'Brain scores mean vs number of weights' if brain else 'Imagenet score vs number of weights' scatter_plot(weights, results, x_label='Num of weights', y_label='Score', labels=list(values.values()) + rand_names, title=title)
def plot_over_epoch(models): model_dict = {} conn = get_connection() epochs = (0, 5, 10, 15) for model in models: names = [] for epoch in epochs: names.append(f'{model}_epoch_{epoch:02d}') model_dict[model] = load_scores(conn, names, benchmarks=benchmarks) model_dict[f'{model}_epoch_00'] = load_scores(conn, ['CORnet-S_random'], benchmarks) benchmarks_labels = ['V1', 'V2', 'V4', 'IT', 'Behavior', 'Imagenet'] for i in range(6): data = {} for model in models: data[model] = [] for epoch in epochs: data[model].append(model_dict[model][f'{model}_epoch_{epoch:02d}'][i]) # data['CORnet-S'] = [0] * 3 + [model_dict['CORnet-S']['CORnet-S'][i]] plot_data_base(data, f'{benchmarks_labels[i]} Benchmark over epochs', epochs, 'Score over epochs', 'Score')
def plot_model_avg_benchmarks(models, file_name): model_dict = {} conn = get_connection() epoch = 6 names = [] for model in models.keys(): names.append(f'{model}_epoch_{epoch:02d}') model_dict = load_scores(conn, names, benchmarks) benchmarks_labels = ['Brain Predictivity', 'Imagenet'] data_set = {} # We replace the model id, a more human readable version for id, desc in models.items(): data = model_dict[f'{id}_epoch_{epoch:02d}'] data_set[desc] = [np.mean(data[0:5]), data[5]] print(f'Mean of brain benchmark model {desc}, {np.mean(data[0:5])}') plot_bar_benchmarks(data_set, benchmarks_labels, '', 'Scores', file_name)
def plot_single_benchmarks(models, epochs=None, compare_batchfix=False, run_mean=False): model_dict = {} conn = get_connection() if epochs is None: epochs = (0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 2, 3, 4, 5, 6) data = {} benchmarks_label = ['V1', 'V2', 'V4', 'IT', 'Behavior', 'Imagenet'] for model, name in models.items(): names = [] for epoch in epochs: if epoch % 1 == 0: names.append(f'{model}_epoch_{epoch:02d}') else: names.append(f'{model}_epoch_{epoch:.1f}') if compare_batchfix: names.append(f'{model}_epoch_{epoch:02d}_BF') model_dict[name] = load_scores(conn, names, benchmarks) for i in range(6): for model, name in models.items(): scores = [] for epoch in epochs: if epoch % 1 == 0: scores.append(model_dict[name][f'{model}_epoch_{int(epoch):02d}'][i]) else: scores.append(model_dict[name][f'{model}_epoch_{epoch:.1f}'][i]) if run_mean: data[name] = [scores[0]] + np.convolve(scores, np.ones((3,)) / 3, mode='valid') + [scores[-1]] else: data[name] = scores if compare_batchfix: scores = [] for epoch in epochs: scores.append(model_dict[name][f'{model}_BF_epoch_{epoch:02d}'][i]) if run_mean: data[f'{name}_BF'] = np.convolve(scores, np.ones((3,)) / 3, mode='same') else: data[f'{name}_BF'] = scores title = f'{benchmarks_label[i]} benchmark vs epochs' plot_data_base(data, title, epochs, 'Epoch', 'Score', x_ticks=epochs, log=True)
def delta_heatmap(model1, model2, imgs, epochs, selection=[], title='', ax=None): names = [] conn = get_connection() for model in [model1, model2]: if model == 'CORnet-S_cluster2_v2_IT_trconv3_bi': model_spec = model else: model_spec = model for img in imgs: name = f'{model}_img{img}' for epoch in epochs: names.append(f'{name}_epoch_{epoch:02d}') names.append(f'{name}_epoch_{convergence_images[name]}') names.append(f'{model_spec}_epoch_{convergence_epoch[model_spec]}') for epoch in epochs: names.append(f'{model}_epoch_{epoch:02d}') names.append('CORnet-S_full_epoch_43') model_dict = load_scores(conn, names, benchmarks) full = np.mean(model_dict['CORnet-S_full_epoch_43'][selection]) matrix = np.zeros([len(imgs) + 1, len(epochs) + 1]) data = {} for i in range(len(imgs)): for j in range(len(epochs)): name1 = f'{model1}_img{imgs[i]}_epoch_{epochs[j]:02d}' name2 = f'{model2}_img{imgs[i]}_epoch_{epochs[j]:02d}' matrix[i, j] = calc_dif(name1, name2, model_dict, full, selection) name = f'{model1}_img{imgs[i]}' name = f'{name}_epoch_{convergence_images[name]:02d}' name2 = f'{model2}_img{imgs[i]}' name2 = f'{name2}_epoch_{convergence_images[name2]:02d}' matrix[i, -1] = calc_dif(name, name2, model_dict, full, selection) names.append(f'{model1}_epoch_{convergence_epoch[model1]:02d}') for j in range(len(epochs)): name1 = f'{model1}_epoch_{epochs[j]:02d}' name2 = f'{model2}_epoch_{epochs[j]:02d}' matrix[-1, j] = calc_dif(name1, name2, model_dict, full, selection) name = f'CORnet-S_cluster2_v2_IT_trconv3_bi_epoch_{convergence_epoch["CORnet-S_cluster2_v2_IT_trconv3_bi"]:02d}' name2 = f'{model2}_epoch_{convergence_epoch[model2]:02d}' matrix[-1, -1] = calc_dif(name, name2, model_dict, full, selection) plot_heatmap(matrix, r'\textbf{Epochs}', r'\textbf{Images}', title=title, annot=True, ax=ax, cbar=False, cmap='RdYlGn', percent=False, alpha=0.6, fmt='.0%', vmin=-0.30, vmax=0.30, yticklabels=imgs + ['All'], xticklabels=epochs + ['Convergence'])
def plot_num_params_epochs(imagenet=False, entry_models=[], all_labels=[], epochs=[], convergence=False, ax=None, selection=[], log=False, layer_random=layer_random): conn = get_connection() full = np.mean(get_full(conn, convergence)[selection]) data2 = {} labels = [] params = {} for entry_model, name in itertools.chain( [(layer_random, 'Kaiming Normal + Downstream Training (KN+DT)')], zip(entry_models, all_labels)): short = name.split('(')[1][:-1] for epoch in epochs: name_epoch = f'{short} Epoch {epoch:02d}' data2[name_epoch] = [] params[name_epoch] = [] data2[f'{short} Convergence'] = [] params[f'{short} Convergence'] = [] mod_params = get_model_params(entry_model.keys()) names = [] for model in entry_model.keys(): if model == "CORnet-S_random": names.append(model) else: conv = convergence_epoch[ model] if model in convergence_epoch else 100 for epoch in epochs: if epoch < conv: names.append(f'{model}_epoch_{epoch:02d}') if convergence and model in convergence_epoch: names.append(f'{model}_epoch_{conv:02d}') model_dict = load_scores(conn, names, benchmarks) for model in names: epoch = model.split('_')[-1] base_model = model.partition('_epoch')[0] percent = (np.mean(model_dict[model][selection]) / full) * 100 if int(epoch) == convergence_epoch[base_model]: name_epoch = f'{short} Convergence' data2[name_epoch].append(percent) params[name_epoch].append(mod_params[base_model]) if int(epoch) in epochs: name_epoch = f'{short} Epoch {epoch}' data2[name_epoch].append(percent) params[name_epoch].append(mod_params[base_model]) labels = labels + [f'Epoch {ep}' for ep in epochs] + ['Convergence'] if imagenet: title = f'Imagenet score vs number of parameter' y = r'Imagenet performance [% of standard training]' else: title = f'Brain Predictivity vs number of parameter' if len(selection) == 3: y = r"Brain Predictivity [% of standard training]" else: y = r"Brain Predictivity [% of standard training]" col = grey_palette[:len(epochs) + 1] + blue_palette[:len( epochs) + 1] + green_palette[:len(epochs) + 1] + grey_palette[:len(epochs) + 1] plot_data_double( data2, {}, '', x_name='Number of trained parameters [Million]', x_labels=[], y_name=y, data_labels=labels, x_ticks=params, pal=col, x_ticks_2=[], percent=True, ax=ax, million=True, ylim=[0, 100], annotate_pos=0, log=log, )
def plot_single_layer_perturbation(): norm_dist_models = [] jumbler_models = [] fixed_models = [] fixed_small_models = [] for i in range(1, 18): norm_dist_models.append(f'CORnet-S_norm_dist_L{i}') jumbler_models.append(f'CORnet-S_jumbler_L{i}') fixed_models.append(f'CORnet-S_fixed_value_L{i}') fixed_small_models.append(f'CORnet-S_fixed_value_small_L{i}') conn = get_connection() result_norm = load_scores( conn, norm_dist_models, ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n']) result_jumbler = load_scores( conn, jumbler_models, ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n']) result_fixed = load_scores( conn, fixed_models, ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n']) result_base = load_scores( conn, ['CORnet-S'], ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n']) result_fixed_small = load_scores( conn, fixed_small_models, ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n']) it_data = {} it_data['norm_dist'] = [] it_data['jumbler'] = [] it_data['fixed_value'] = [] it_data['fixed_small_value'] = [] it_data['base'] = [] behavior_data = {} behavior_data['norm_dist'] = [] behavior_data['jumbler'] = [] behavior_data['fixed_value'] = [] behavior_data['fixed_small_value'] = [] behavior_data['base'] = [] labels = [] for i in range(1, 18): labels.append(f'L{i}') it_data['norm_dist'].append(result_norm[f'CORnet-S_norm_dist_L{i}'][0]) it_data['jumbler'].append(result_jumbler[f'CORnet-S_jumbler_L{i}'][0]) it_data['fixed_value'].append( result_fixed[f'CORnet-S_fixed_value_L{i}'][0]) it_data['base'].append(result_base[f'CORnet-S'][0]) it_data['fixed_small_value'].append( result_fixed_small[f'CORnet-S_fixed_value_small_L{i}'][0]) behavior_data['norm_dist'].append( result_norm[f'CORnet-S_norm_dist_L{i}'][1]) behavior_data['jumbler'].append( result_jumbler[f'CORnet-S_jumbler_L{i}'][1]) behavior_data['fixed_value'].append( result_fixed[f'CORnet-S_fixed_value_L{i}'][1]) behavior_data['base'].append(result_base[f'CORnet-S'][1]) behavior_data['fixed_small_value'].append( result_fixed_small[f'CORnet-S_fixed_value_small_L{i}'][1]) plot_data_base(it_data, 'IT Benchmark single layer', labels, 'Conv Layers', 'Score', [0.0, 0.6]) plot_data_base(behavior_data, 'Behavior Benchmark single layer', labels, 'Conv Layers', 'Score', [0.0, 0.6])
def plot_high_low_nullify_separate(): high_var_models = [] high_var_trained_models = [] low_var_models = [] low_var_trained_models = [] for i in (0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95): high_var_models.append(f'CORnet-S_high_zero_{i}') high_var_trained_models.append(f'CORnet-S_trained_high_zero_{i}') low_var_models.append(f'CORnet-S_low_zero_{i}') low_var_trained_models.append(f'CORnet-S_trained_low_zero_{i}') conn = get_connection() result_high_var = load_scores( conn, high_var_models, ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n']) result_high_var_trained = load_scores( conn, high_var_trained_models, ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n']) result_low_var = load_scores( conn, low_var_models, ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n']) result_low_var_trained = load_scores( conn, low_var_trained_models, ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n']) result_base_random = load_scores( conn, ['CORnet-S_random'], ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n']) result_base = load_scores( conn, ['CORnet-S'], ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n']) it_data = {} it_data['high_zero'] = [] it_data['low_zero'] = [] # it_data['high_zero_trained'] = [] # it_data['low_zero_trained'] = [] it_data['base'] = [] # it_data['base_trained'] = [] behavior_data = {} behavior_data['high_zero'] = [] behavior_data['low_zero'] = [] # behavior_data['high_zero_trained'] = [] # behavior_data['low_zero_trained'] = [] behavior_data['base'] = [] # behavior_data['base_trained'] = [] labels = [] for i in (0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95): labels.append(i) # it_data['high_zero'].append(result_high_var[f'CORnet-S_high_zero_{i}'][0]) it_data['high_zero'].append( result_high_var_trained[f'CORnet-S_trained_high_zero_{i}'][0]) # it_data['low_zero'].append(result_low_var[f'CORnet-S_low_zero_{i}'][0]) it_data['low_zero'].append( result_low_var_trained[f'CORnet-S_trained_low_zero_{i}'][0]) # it_data['base'].append(result_base_random[f'CORnet-S_random'][0]) it_data['base'].append(result_base[f'CORnet-S'][0]) # behavior_data['high_zero'].append(result_high_var[f'CORnet-S_high_zero_{i}'][1]) behavior_data['high_zero'].append( result_high_var_trained[f'CORnet-S_trained_high_zero_{i}'][1]) # behavior_data['low_zero'].append(result_low_var[f'CORnet-S_low_zero_{i}'][1]) behavior_data['low_zero'].append( result_low_var_trained[f'CORnet-S_trained_low_zero_{i}'][1]) # behavior_data['base'].append(result_base_random[f'CORnet-S_random'][1]) behavior_data['base'].append(result_base[f'CORnet-S'][1]) plot_data_base(it_data, 'IT Benchmark Zero Trained', labels, 'Zero values in %', 'Score', [0.0, 0.6], base_line=result_base[f'CORnet-S'][0]) plot_data_base(behavior_data, 'Behavior Benchmark Zero Trained', labels, 'Zero values in %', 'Score', [0.0, 0.6], base_line=result_base[f'CORnet-S'][1])
def plot_high_low_nullify(): high_var_models = [] high_var_trained_models = [] low_var_models = [] low_var_trained_models = [] for i in (0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95): high_var_models.append(f'CORnet-S_high_zero_{i}') high_var_trained_models.append(f'CORnet-S_trained_high_zero_{i}') low_var_models.append(f'CORnet-S_low_zero_{i}') low_var_trained_models.append(f'CORnet-S_trained_low_zero_{i}') # conn = get_connection() conn = get_connection('scores_openmind') result_high_var = load_scores( conn, high_var_models, ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n']) result_high_var_trained = load_scores( conn, high_var_trained_models, ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n']) result_low_var = load_scores( conn, low_var_models, ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n']) result_low_var_trained = load_scores( conn, low_var_trained_models, ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n']) result_base_random = load_scores( conn, ['CORnet-S_random'], ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n']) result_base = load_scores( conn, ['CORnet-S'], ['dicarlo.Majaj2015.IT-pls', 'dicarlo.Rajalingham2018-i2n']) it_data = { 'high_zero_untrained': [], 'low_zero_untrained': [], 'high_zero_trained': [], 'low_zero_trained': [], 'base_untrained': [], 'base_trained': [] } behavior_data = { 'high_zero_untrained': [], 'low_zero_untrained': [], 'high_zero_trained': [], 'low_zero_trained': [], 'base_untrained': [], 'base_trained': [] } labels = [] for i in (0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95): labels.append(i) it_data['high_zero_untrained'].append( result_high_var[f'CORnet-S_high_zero_{i}'][0]) it_data['high_zero_trained'].append( result_high_var_trained[f'CORnet-S_trained_high_zero_{i}'][0]) it_data['low_zero_untrained'].append( result_low_var[f'CORnet-S_low_zero_{i}'][0]) it_data['low_zero_trained'].append( result_low_var_trained[f'CORnet-S_trained_low_zero_{i}'][0]) it_data['base_untrained'].append( result_base_random[f'CORnet-S_random'][0]) it_data['base_trained'].append(result_base[f'CORnet-S'][0]) behavior_data['high_zero_untrained'].append( result_high_var[f'CORnet-S_high_zero_{i}'][1]) behavior_data['high_zero_trained'].append( result_high_var_trained[f'CORnet-S_trained_high_zero_{i}'][1]) behavior_data['low_zero_untrained'].append( result_low_var[f'CORnet-S_low_zero_{i}'][1]) behavior_data['low_zero_trained'].append( result_low_var_trained[f'CORnet-S_trained_low_zero_{i}'][1]) behavior_data['base_untrained'].append( result_base_random[f'CORnet-S_random'][1]) behavior_data['base_trained'].append(result_base[f'CORnet-S'][1]) plot_data_base(it_data, 'it_benchmark_zero', labels, 'Zero values in %', 'Score', [0.0, 0.6]) plot_data_base(behavior_data, 'behavior_benchmark_zero', labels, 'Zero values in %', 'Score', [0.0, 0.6])
def plot_num_params_images(imagenet=False, entry_models=[], all_labels=[], images=[], convergence=False, ax=None, selection=[], log=False, layer_random=layer_random): conn = get_connection() full = np.mean(get_full(conn, convergence)[selection]) data2 = {} labels = [] params = {} for entry_model, name in itertools.chain( [(layer_random, 'Kaiming Normal + Downstream Training (KN+DT)')], zip(entry_models, all_labels)): short = name.split('(')[1][:-1] for img in images: name_epoch = f'{short} {img} Imgs' data2[name_epoch] = [] params[name_epoch] = [] data2[f'{short} Full'] = [] params[f'{short} Full'] = [] mod_params = get_model_params(entry_model.keys()) names = [] for model in entry_model.keys(): if model == "CORnet-S_random": names.append(model) else: names.append(f'{model}_epoch_{convergence_epoch[model]}') model = model.split('_seed42')[0] for img in images: model_img = f'{model}_img{img}' conv = convergence_images[ model_img] if model_img in convergence_images else 20 names.append(f'{model_img}_epoch_{conv:02d}') model_dict = load_scores(conn, names, benchmarks) for model in names: percent = (np.mean(model_dict[model][selection]) / full) * 100 if 'img' not in model: name_epoch = f'{short} Full' base_model = model.partition('_epoch')[0] data2[name_epoch].append(percent) params[name_epoch].append(mod_params[base_model]) else: img = model.split('_')[-3].partition('g')[2] base_model = model.partition('_img')[0] name_epoch = f'{short} {img} Imgs' data2[name_epoch].append(percent) params[name_epoch].append(mod_params[base_model]) labels = labels + [f'{ep} Images' for ep in images] + ['Convergence'] if imagenet: title = f'Imagenet score vs number of parameter' y = r'Imagenet performance [% of standard training]' else: title = f'Brain-Score Benchmark mean(V4, IT, Behavior) vs number of parameter' if len(selection) == 3: y = r"Brain Predictivity [% of standard training]" else: y = r"Brain Predictivity [% of standard training]" col = grey_palette[:len(images) + 1] + blue_palette[:len( images) + 1] + green_palette[:len(images) + 1] + grey_palette[:len(images) + 1] plot_data_double(data2, {}, '', x_name='Number of trained parameters [Million]', x_labels=[], y_name=y, x_ticks=params, pal=col, data_labels=labels, ylim=[0, 100], x_ticks_2=[], percent=True, ax=ax, million=True, annotate_pos=0, log=log)
def image_epoch_score(models, imgs, epochs, selection=[], axes=None, percent=True, make_trillions=False, with_weights=True): names = [] conn = get_connection() params = {} data = {} for model, label in models.items(): data[label] = [] params[label] = [] if model == 'CORnet-S_cluster2_v2_IT_trconv3_bi': model1 = model else: model1 = model for img in imgs: name = f'{model}_img{img}' for epoch in epochs: if epoch % 1 == 0: names.append(f'{name}_epoch_{epoch:02d}') else: names.append(f'{name}_epoch_{epoch:.1f}') if name in convergence_images: names.append(f'{name}_epoch_{convergence_images[name]}') names.append(f'{model1}_epoch_{convergence_epoch[model1]:02d}') if with_weights: parameter = get_model_params(models, False) else: parameter = {x: 1 for x in models} names.append('CORnet-S_full_epoch_43') model_dict = load_scores(conn, names, benchmarks) full = np.mean(model_dict['CORnet-S_full_epoch_43'][selection]) high_x = 0 high_y = 0 val = 0 for model in names: if percent: frac = (np.mean(model_dict[model][selection]) / full) * 100 else: frac = np.mean(model_dict[model][selection]) if frac > 0.0: if 'img' not in model: base_model = model.partition('_epoch')[0] epoch = float(model.partition('_epoch_')[2]) data[models[base_model]].append(frac) score = (1280000 * epoch * (parameter[base_model] / 1000000) ) # print( f'Model {base_model} in epoch {epoch} with full imagenet set ' f'leads to score {score} with brain score {frac}') params[models[base_model]].append(score) else: base_model = model.partition('_img')[0] imgs = int(model.partition('_img')[2].partition('_')[0]) epoch = float( model.partition('_img')[2].partition('_epoch_')[2]) score = (imgs * epoch * (parameter[base_model] / 1000000) ) # (parameter[base_model] / 1000000) * data[models[base_model]].append(frac) params[models[base_model]].append(score) print( f'Model {base_model} in epoch {epoch} with {imgs} images ' f'leads to score {score} with brain score {frac}') if percent > high_y: high_y = percent if len(selection) == 3: y = r"\textbf{Brain Predictivity}" else: y = r"\textbf{Brain Predictivity}[\% of standard training]" # [\% of standard training] for i, ax in enumerate(axes): zero_indices = { key: np.array([tick == 0 for tick in xticks]) for key, xticks in params.items() } if i == 0: # axis plotting the x=0 value ax_data = { key: np.array(values)[zero_indices[key]].tolist() for key, values in data.items() } xticks = { key: np.array(values)[zero_indices[key]].tolist() for key, values in params.items() } xticklabels = np.array([0]) ylabel = y else: # axis plotting everything x>0 ax_data = { key: np.array(values)[~zero_indices[key]].tolist() for key, values in data.items() } xticks = { key: np.array(values)[~zero_indices[key]].tolist() for key, values in params.items() } # when make_trillions==True, this should actually be *10^12, but due to downstream hacks we leave it at ^6 xticklabels = np.array([.001, .01, .1, 1, 10, 100, 1000]) * pow( 10, 6) ax.spines['left'].set_visible(False) ylabel = '' kwargs = dict(trillion=True) if make_trillions else dict( trillion=True, million_base=True) plot_data_double(ax_data, {}, '', x_name='', x_labels=xticklabels, scatter=True, percent=percent, alpha=0.8, ylim=[0, 100], y_name=ylabel, x_ticks=xticks, pal=[ '#2CB8B8', '#186363', '#ABB2B9', '#ABB2B9', '#ABB2B9', '#259C9C', '#36E3E3', '#9AC3C3' ], log=True, x_ticks_2={}, ax=ax, **kwargs, annotate_pos=0) # adopted from https://stackoverflow.com/a/32186074/2225200 d = .015 # how big to make the diagonal lines in axes coordinates kwargs = dict(transform=ax.transAxes, color='#dedede', clip_on=False) if i == 0: m = 1 / .05 ax.plot((1 - d * m, 1 + d * m), (-d, +d), **kwargs) else: kwargs.update(transform=ax.transAxes) ax.plot((-d, +d), (-d, +d), **kwargs) # remove yticks. We can't `ax.yaxis.set_visible(False)` altogether since that would also remove the grid for tic in ax.yaxis.get_major_ticks(): tic.tick1On = tic.tick2On = False ax.set_yticklabels([]) axes[0].set_ylim(axes[1].get_ylim())
def plot_performance(imagenet=True, entry_models=[best_brain_avg], all_labels=[], convergence=False, ax=None, selection=[], log=False): conn = get_connection() names = [] for model in random_scores.keys(): if convergence and model in convergence_epoch: postfix = f'_epoch_{convergence_epoch[model]:02d}' else: postfix = f'_epoch_06' if model != "CORnet-S_random": names.append(f'{model}{postfix}') else: names.append(model) performance = load_model_parameter(conn) model_dict = load_scores(conn, names, benchmarks) time2 = [] data2 = {'Score': []} for model, layer in random_scores.items(): if model == "CORnet-S_random": postfix = '' elif convergence and model in convergence_epoch: postfix = f'_epoch_{convergence_epoch[model]:02d}' else: postfix = f'_epoch_06' high = np.mean(model_dict[ f'CORnet-S_full_epoch_{convergence_epoch["CORnet-S_full"]}'] [selection]) perc = (np.mean(model_dict[f'{model}{postfix}'][selection]) / high) * 100 if layer in performance: data2['Score'].append(perc) time2.append(performance[layer]) data = {} time = {} labels = {} for entry_model, name in zip(entry_models, all_labels): names = [] for model in entry_model.keys(): if convergence and model in convergence_epoch: postfix = f'_epoch_{convergence_epoch[model]:02d}' else: postfix = f'_epoch_06' names.append(f'{model}{postfix}') model_dict = load_scores(conn, names, benchmarks) time[name] = [] data[name] = [] for model, layer in entry_model.items(): if convergence and model in convergence_epoch: postfix = f'_epoch_{convergence_epoch[model]:02d}' else: postfix = f'_epoch_06' perc = (np.mean(model_dict[f'{model}{postfix}'][selection]) / high) * 100 if layer in performance: data[name].append(perc) time[name].append(performance[layer]) short = name.split('(')[1][:-1] labels[name] = [ f'{value.split(".")[0]}_{short}' for value in entry_model.values() ] if imagenet: title = f'Imagenet score vs training time' y = r'Imagenet performance [% of standard training]' else: title = f'Brain-Score Benchmark mean(V4, IT, Behavior) vs training time' if len(selection) == 3: y = r"Brain Predictivity) [% of standard training]" else: y = r"Brain Predictivity [% of standard training]" plot_data_double(data, data2, '', x_name='Training time [Milliseconds/Epoch]', x_labels=[], y_name=y, x_ticks=time, x_ticks_2=time2, percent=True, data_labels=labels, ax=ax, log=log)