def plot_corr_correct(data, model, layer):
    plt.figure()
    ax = plt.gca()
    plt.xlim((-10, 110))
    layer_names = util.get_model_layers(model)
    layer_names_short = util.get_model_layers(model, True)
    ind = layer_names.index(layer)
    layer1 = layer_names_short[ind]
    if layer == 'fc7ex': plt.ylim((-2, 4))
    elif layer == 'conv5_2ex': plt.ylim((-2, 4))  #VGG16 conv5_2
    elif layer == 'conv5_1ex': plt.ylim((-0.1, 1.1))
    elif layer == 'conv4_3ex': plt.ylim((-0.1, 1.1))
    else: plt.ylim((-2, 4))
    xmin, xmax = ax.get_xlim()
    ymin, ymax = ax.get_ylim()
    plt.plot([xmin, xmax], [0, 0], c='k', hold=True)
    plt.plot([0, 0], [ymin, ymax], c='k', hold=True)
    plt.scatter(data.model_comp['animal']['human_acc'][0],
                data.model_comp['animal'][model + '_' + layer][0],
                c='b',
                label='Animal',
                hold=True)
    plt.scatter(data.model_comp['nonanimal']['human_acc'][0],
                -1 * data.model_comp['nonanimal'][model + '_' + layer][0],
                c='g',
                label='Non-Animal',
                hold=True)
    plt.plot([50, 50], [ymin, ymax],
             c='r',
             ls='dashed',
             hold=True,
             label='Chance')
    #relabel axis
    x_min = 0
    x_max = 101
    plt.xticks(np.arange(x_min, x_max, 50))
    xlabs, xlocs = plt.xticks()
    plt.xticks(np.arange(x_min, x_max, 50), abs(xlabs).astype(int))
    plt.legend(loc="upper left")
    plt.title('Fitting Human to ' + model + ' ' + layer1 + ' Accuracy')
    plt.xlabel('Human Accuracy (%)')
    plt.ylabel('Distance from Hyperplane')

    # Get correlation and significance
    corrl, p_val = data.model_corrs[model + '_' + layer]
    box = ax.get_position()
    ax.set_position(
        [box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9])
    plt.figtext(0.5,
                0.05,
                'Correlation: %.3f (%s), p-value: %.2E' %
                (corrl, data.corr_type, decimal.Decimal(str(p_val))),
                horizontalalignment='center')

    # Save figure
    plt.savefig(util.at_plot_path('corr_%s_%s.pdf' % (model, layer)))
    plt.savefig(util.at_plot_path('corr_%s_%s.png' % (model, layer)))
 def calc_model_correlation(self,
                            model,
                            corr,
                            adjust_for_true_label=True,
                            verbose=True):
     layer_names = util.get_model_layers(model)
     for layer in layer_names:
         self.calc_model_layer_correlation(
             model,
             layer,
             corr,
             adjust_for_true_label=adjust_for_true_label,
             verbose=verbose)
     self.corr_type = corr
def eval_correlation_experiment():
    #input lists
    do_eval = True
    save_eval = False
    do_overview = True
    do_easy_hard_plot = False
    do_correlations = False
    do_bootstrap = False
    do_model_eval = True
    model_names_compare = ['VGG16_ft70000', 'VGG16', 'VGG19', 'AlexNet']
    model_names_specific = ['VGG16_ft70000']
    corrs = ["Spearman's rho", "Pearson's r", "Kendall's tau"]
    adjust_corr_for_true_label = True
    if adjust_corr_for_true_label:
        data_filename = 'last_data.p'
    else:
        data_filename = 'last_data_unadjusted.p'

    corr = corrs[0]
    experiment_ids = [30, 31, 32, 33, 34]
    classifier_type = 'svm'
    train_batches = range(16)
    axis_types = ['rf']  #['rf', 'idx']
    bootstrap_count = 300
    bootstrap_size = 180

    # Data evaluation
    if do_eval:
        data = data_loader.Data()
        data.load_multi(experiment_ids)
        data.eval_participants()
        if do_model_eval:
            for model_name in model_names_compare:
                data.load_model_data(model_name, classifier_type,
                                     train_batches)
                data.calc_model_correlation(model_name, corr,
                                            adjust_corr_for_true_label)
        # Bootstrapping on correlations
        if do_bootstrap:
            data.bootstrap(
                experiment_ids,
                model_names_compare,
                classifier_type,
                train_batches,
                corr_type=corr,
                adjust_corr_for_true_label=adjust_corr_for_true_label,
                bootstrap_count=bootstrap_count,
                bootstrap_size=bootstrap_size)
        if save_eval: pickle.dump(data, open(data_filename, 'wb'))
    else:
        data = pickle.load(open(data_filename, 'rb'))

    # Info: Human accuracy
    #eval_human_behaviour(data)

    # Plots
    if do_overview:
        plot_corr_errs = [
            m in model_names_specific for m in model_names_compare
        ]
        #plot_correlation_overview.overviewPlot(data, model_names_compare, axis_type='ridx', use_bootstrap_value=do_bootstrap, plot_corr_errs=plot_corr_errs, use_subplots=True)
        plot_correlation_overview.overviewPlot(
            data,
            model_names_compare,
            axis_type='ridx',
            use_bootstrap_value=do_bootstrap,
            plot_corr_errs=plot_corr_errs,
            use_subplots=True,
            is_unadjusted=not adjust_corr_for_true_label)
        plot_correlation_overview.overviewPlot(
            data,
            model_names_specific,
            axis_type='names',
            use_bootstrap_value=do_bootstrap,
            plot_corr_errs=do_bootstrap,
            use_subplots=False,
            is_unadjusted=not adjust_corr_for_true_label)
    if do_correlations:
        for model_name in model_names_specific:
            layer_names = util.get_model_layers(model_name)
            for layer in layer_names:
                plot_correlations.plot_corr_correct(data,
                                                    model=model_name,
                                                    layer=layer)
    if do_easy_hard_plot:
        data.load_im2path(experiment_ids)
        easy_hard_mod(data, model_names_specific[0], 'fc7ex')
示例#4
0
def plot_model_performance(model, classifier_type, train_batches, set_index, set_name, filter_on, filter_category):

    # txt files
    # vehicles_list = '/home/michele/python/rapid_categorization/imagenet_request/n04524313_whole_tree_vehicle.txt'
    # animal_list = '/home/michele/python/rapid_categorization/imagenet_request/whole_tree_animal.txt'
    # structure_list = '/home/michele/python/rapid_categorization/imagenet_request/structure_wnid_tree.txt'
    # if filter_on == 'filter_on':
    #     if filter_category == 'vehicles':
    #         txt_file = vehicles_list
    #     elif filter_category == 'animals':
    #         txt_file = animal_list
    #     elif filter_category == 'structure':
    #         txt_file = structure_list
    #     elif filter_category == 'distractors':
    #         exclude_file_animal = animal_list
    #         exclude_file_structure = structure_list
    #         exclude_file_vehicles = vehicles_list
    #
    #     if not filter_category == 'distractors':
    #         with open(txt_file, 'r') as f:
    #             tree_list = list(f)
    #             tree_list[:] = [x.strip().strip('-') for x in tree_list]
    #     elif filter_category == 'distractors':
    #         with open(exclude_file_animal, 'r') as f:
    #             exc_animal_list = list(f)
    #             exc_animal_list[:] = [x.strip().strip('-') for x in exc_animal_list]
    #         with open(exclude_file_structure, 'r') as f:
    #             exc_structure_list = list(f)
    #             exc_structure_list[:] = [x.strip().strip('-') for x in exc_structure_list]
    #         with open(exclude_file_vehicles, 'r') as f:
    #             exc_vehicle_list = list(f)
    #             exc_vehicle_list[:] = [x.strip().strip('-') for x in exc_structure_list]

    # Hard code set_index to be 16
    layer_names = util.get_model_layers(model)
    # print layer_names
    print "layers len: ", len(layer_names)
    acc_list = []
    for layer in layer_names:
        inputfn = util.get_predictions_filename(model, layer, classifier_type, train_batches, set_index, set_name)
        modeldata = np.load(inputfn)

        # if not filter_category == 'distractors':
        #     zipped = [x for x in zip(modeldata['source_filenames'], modeldata['pred_labels'], modeldata['true_labels'])
        #               if x[0].split('_')[0] in tree_list]
        # elif filter_category == 'distractors':
        #     zipped = [x for x in zip(modeldata['source_filenames'], modeldata['pred_labels'], modeldata['true_labels'])
        #               if not x[0].split('_')[0] in exc_animal_list and not x[0].split('_')[0] in exc_structure_list
        #               and not x[0].split('_')[0] in exc_vehicle_list]

        # ternary operator
        # acc = reduce(lambda accum, data: accum + 1 if data[1] == data[2] else accum, zipped, 0)/ float(len(zipped))

        acc = float(sum(modeldata['pred_labels'] == modeldata['true_labels'])) / float(
            len(modeldata['pred_labels']))

        acc_list += [acc]

        # all incorrectly predicted images
        mislabeled = [x for x in zip(modeldata['source_filenames'], modeldata['pred_labels'], modeldata['true_labels'],
                                     modeldata['hyper_dist']) if x[1] != x[2]]

    print acc_list
    print "acc list len: ", len(acc_list)
    x = np.linspace(0, 1, len(layer_names))
    fig = plt.figure()
    ax = fig.add_subplot(111)
    # if filter_on == 'filter_on':
    #     ax.set_title('VGG16 Model accuracy on %s images' %(filter_category))
    ax.set_title('VGG16 Model Accuracy on %s Dataset setidx=%d' %(set_name,set_index))
    ax.set_xlabel('Relative Layer Depth')
    ax.set_ylabel('Accuracy (%)')
    # Plot accuracy points
    ax.plot(x, acc_list, 'o')
    # Plot polyfit
    p1_fit = np.polyfit(x, acc_list, 2)
    p1_fn = np.poly1d(p1_fit)
    xs = np.linspace(x[0], x[-1])
    print len(xs)
    ax.plot(xs, p1_fn(xs), 'b')
    plt.show()

    # show image collage and hyperplane distances of all incorrectly predicted images
    plt.figure()
    # Get list of paths to all incorrectly predicted images
    for i_zipped in xrange(len(mislabeled)):
        image_name = mislabeled[i_zipped][0]
        image_hyper_dist = mislabeled[i_zipped][3]
        image_path = os.path.join(TURK_images_root, image_name + '.png')
        ax = plt.subplot(10, 9, i_zipped + 1)
        plt.title(str(image_hyper_dist),, fontsize=6)


    nan_idx = 0

    for i_image in xrange(n_images):

        ## ----- cell 7 ----- ##
        curr_image_path = image_fold_path + '/' + image_list[i_image]
        print '## Running image ##', curr_image_path

        output_prob = output['prob'][0]  # the output probability vector for the first image in the batch

        # print 'predicted class is:', output_prob.argmax()

        if output_prob.argmax() == 0:
            print 'Condition met'

            predicted_prob = output_prob[output_prob.argmax()]

            ## ----- cell 9 ----- ##
            # load ImageNet labels
            labels = ['non-animal', 'animal']

            output_label = labels[output_prob.argmax()]
            # print 'output label:', output_label
            # print 'output probs: ', output_prob

            image_class_output_title = image_list[i_image] + '\n' + output_label + '\n' + str(predicted_prob)
            # print image_class_output_title


            im = ax.imshow(image)
            plt.title(image_class_output_title, fontsize=6)
            ax.axis('off')
            nan_idx += 1

    # plt.tight_layout()

    result_title = image_dir_root + image_folder + '_nan_filtered.png'

    plt.savefig(result_title)
 def load_model_data(self, model, classifier_type, train_batches):
     layer_names = util.get_model_layers(model)
     for layer in layer_names:
         self.load_model_layer_data(model, layer, classifier_type,
                                    train_batches)
def mid_mod_ims(data, model_name, num, classifier_type, train_batches,
                set_index, set_name):
    n_layers = 15
    # Calculate stats per image for sample populataion
    model_comp = {}  # data for model comparison
    im2lab = Data.im2lab  # im2lab = data.im2lab # Changed this from Jonah script [changed line 2 of 2] ***Check 'data' in mid_mod_ims
    im2key = {}  # maps image to index in model comp array
    im2key_ana = {}
    animal_ind = 0
    nonanimal_ind = 0
    layer_names = util.get_model_layers(model_name)
    # store image names with index in dict to hold model comparisons
    for im in im2lab.keys():
        if im2lab[im] == 'animal':
            im2key[im] = animal_ind
            im2key_ana[im] = animal_ind
            animal_ind += 1
        else:
            im2key[im] = nonanimal_ind
            im2key_ana[im] = nonanimal_ind + 150
            nonanimal_ind += 1
    #Easy Hard for Model
    model_comp['animal'] = np.ones([1, animal_ind])
    model_comp['nonanimal'] = np.ones([1, animal_ind])
    model_dist = {}
    model_dist['animal'] = np.ones([n_layers, animal_ind])
    model_dist['nonanimal'] = np.ones([n_layers, nonanimal_ind])
    modeldata = None
    for i_layer, layer_name in enumerate(layer_names):
        # calculate features
        modeldata = np.load(
            util.get_predictions_filename(model_name,
                                          layer_name,
                                          classifier_type=classifier_type,
                                          train_batches=train_batches,
                                          set_index=set_index,
                                          set_name=set_name))
        for index in range(len(modeldata['source_filenames'])):
            impath = modeldata['source_filenames'][index]
            imname = impath.split('/')[-1] + '.jpg'
            model_dist[im2lab[imname]][i_layer][
                im2key[imname]] = modeldata['hyper_dist'][index]
    inds = [
        layer_names.index('conv4_2ex'),
        layer_names.index('conv4_3ex'),
        layer_names.index('conv5_1ex'),
        layer_names.index('conv5_2ex')
    ]
    for index in range(len(modeldata['source_filenames'])):
        temp = []
        impath = modeldata['source_filenames'][index]
        imname = impath.split('/')[-1] + '.jpg'
        for ind in inds:
            if im2lab[imname] == 'animal':
                temp.append(model_dist[im2lab[imname]][ind, im2key[imname]])
            else:
                d = model_dist[im2lab[imname]][ind, im2key[imname]] * (-1)
                temp.append(d)

        model_comp[im2lab[imname]][0][im2key[imname]] = np.mean(temp)

    mc_a_inds = np.argsort(model_comp['animal'][0])
    mc_na_inds = np.argsort(model_comp['nonanimal'][0])

    key2im = {}
    key2im['animal'] = {}
    key2im['nonanimal'] = {}
    for index in range(len(modeldata['source_filenames'])):
        temp = []
        impath = modeldata['source_filenames'][index]
        imname = impath.split('/')[-1] + '.jpg'
        cat = im2lab[imname]
        key = im2key[imname]
        key2im[cat][key] = imname

    top_a = []
    bottom_a = []
    top_na = []
    bottom_na = []

    for im in range(num):
        a = key2im['animal'][mc_a_inds[im]]
        bottom_a.append(a)
        na = key2im['nonanimal'][mc_na_inds[im]]
        bottom_na.append(na)

    for im in range(-1, -1 - num, -1):
        a = key2im['animal'][mc_a_inds[im]]
        top_a.append(a)
        na = key2im['nonanimal'][mc_na_inds[im]]
        top_na.append(na)


#
#    #Plot easy animal
#    row = 1
#    col = 8
#    count = 0
#    f, subp =  plt.subplots(row, col,squeeze=False)
#    for r in range(row):
#        for c in range(col):
#            im_name = top_a[count]
#            count +=1
#            im_path = os.path.join(base_path,im_name)
#            imn = im_name.split('.')[0]+'.jpg'
#            img = Image.open(im_path)
#            im_mat = np.asarray(img.convert('L'))
#            subp[r][c].imshow(im_mat,cmap='gray')
#            subp[r][c].get_xaxis().set_ticks([])
#            subp[r][c].get_yaxis().set_ticks([])
#            subp[r][c].set_title(imn)
#
#            hypd = model_comp['animal'][0][im2key[imn]]
#            subp[r][c].set_xlabel('Model Conf: '+str(np.round(hypd,1)))
#    f.suptitle('Easiest Animals for VGG16 Intermediate Levels')
#
#     #Plot hard animal
#    row = 1
#    col = 8
#    count = 0
#    f, subp =  plt.subplots(row, col,squeeze=False)
#    for r in range(row):
#        for c in range(col):
#            im_name = bottom_a[count]
#            count +=1
#            im_path = os.path.join(base_path,im_name)
#            imn = im_name.split('.')[0]+'.jpg'
#            img = Image.open(im_path)
#            im_mat = np.asarray(img.convert('L'))
#            subp[r][c].imshow(im_mat,cmap='gray')
#            subp[r][c].get_xaxis().set_ticks([])
#            subp[r][c].get_yaxis().set_ticks([])
#            subp[r][c].set_title(imn)
#
#            hypd = model_comp['animal'][0][im2key[imn]]
#            subp[r][c].set_xlabel('Model Conf: '+str(np.round(hypd,1)))
#    f.suptitle('Hardest Animals for VGG16 Intermediate Levels')
#
#    #Plot easy nonanimal
#    row = 1
#    col = 8
#    count = 0
#    f, subp =  plt.subplots(row, col,squeeze=False)
#    for r in range(row):
#        for c in range(col):
#            im_name = top_na[count]
#            count +=1
#            im_path = os.path.join(base_path,im_name)
#            imn = im_name.split('.')[0]+'.jpg'
#            img = Image.open(im_path)
#            im_mat = np.asarray(img.convert('L'))
#            subp[r][c].imshow(im_mat,cmap='gray')
#            subp[r][c].get_xaxis().set_ticks([])
#            subp[r][c].get_yaxis().set_ticks([])
#            subp[r][c].set_title(imn)
#
#            hypd = model_comp['nonanimal'][0][im2key[imn]]
#            subp[r][c].set_xlabel('Model Conf: '+str(np.round(hypd,1)))
#    f.suptitle('Easiest Non-Animals for VGG16 Intermediate Levels')
#
#     #Plot hard nonanimal
#    row = 1
#    col = 8
#    count = 0
#    f, subp =  plt.subplots(row, col,squeeze=False)
#    for r in range(row):
#        for c in range(col):
#            im_name = bottom_na[count]
#            count +=1
#            im_path = os.path.join(base_path,im_name)
#            imn = im_name.split('.')[0]+'.jpg'
#            img = Image.open(im_path)
#            im_mat = np.asarray(img.convert('L'))
#            subp[r][c].imshow(im_mat,cmap='gray')
#            subp[r][c].get_xaxis().set_ticks([])
#            subp[r][c].get_yaxis().set_ticks([])
#            subp[r][c].set_title(imn)
#
#            hypd = model_comp['nonanimal'][0][im2key[imn]]
#            subp[r][c].set_xlabel('Model Conf: '+str(np.round(hypd,1)))
#    f.suptitle('Hardest Non-Animals for VGG16 Intermediate Levels')

#plot background accuracy
    lvs = []
    confids = []

    for cat in model_dist:
        for lev in range(n_layers):
            for im in range(len(model_dist[cat][lev])):
                lvs.append(lev)
                if cat == 'animal':
                    confids.append(model_dist[cat][lev, im])
                else:
                    d = (-1) * model_dist[cat][lev, im]
                    confids.append(d)

    # Plot of accuracy accross levels with these images
    model_dist['nonanimal'] = model_dist['nonanimal'] * (-1)
    combo_easy = np.concatenate((top_a, top_na))
    combo_hard = np.concatenate((bottom_a, bottom_na))
    mod_perf_easy = np.ones([n_layers, len(combo_easy)])
    for lyr in range(n_layers):
        for i in range(len(combo_easy)):
            im = combo_easy[i]
            key = im2key[im]
            mod_perf_easy[lyr, i] = model_dist[im2lab[im]][lyr][im2key[im]]
    mod_perf_hard = np.ones([n_layers, len(combo_hard)])
    for lyr in range(n_layers):
        for i in range(len(combo_hard)):
            im = combo_hard[i]
            key = im2key[im]
            mod_perf_hard[lyr, i] = model_dist[im2lab[im]][lyr][im2key[im]]

    fig, ax1 = plt.subplots()
    ax1.set_ylim([-2, 5])
    ax1.plot([-0.5, n_layers - 0.5], [0, 0], color='0', ls='-')
    ax1.scatter(lvs, confids, color='0.8', label='All Images')
    ax1.plot([7.8, 7.8], [-2, 5], color='0.5', ls='--')
    ax1.plot([11.2, 11.2], [-2, 5],
             color='0.5',
             ls='--',
             label='Intermediate Levels')
    ax1.plot(xrange(n_layers),
             np.mean(mod_perf_easy, 1),
             'go',
             label='Easiest for Mid-Layers')
    ax1.set_ylabel('Model Confidence')
    plt.xticks(rotation=5)
    p1_fit = np.polyfit(xrange(n_layers), np.mean(mod_perf_easy, 1), 3)
    p1_fn = np.poly1d(p1_fit)
    xs = np.linspace(0, n_layers - 1)
    ax1.plot(xs, p1_fn(xs), 'g')

    ax1.plot(xrange(n_layers),
             np.mean(mod_perf_hard, 1),
             'yo',
             label='Hardest for Mid-Layers')

    p2_fit = np.polyfit(xrange(n_layers), np.mean(mod_perf_hard, 1), 3)
    p2_fn = np.poly1d(p2_fit)
    xs = np.linspace(0, n_layers - 1)
    ax1.plot(xs, p2_fn(xs), 'y')

    ax1.set_xlim([-0.5, n_layers - 0.5])
    plt.xticks(range(n_layers),
               util.get_model_layers(model_name, short=True),
               rotation=70)
    plt.sca(ax1)
    ax1.set_xlabel('Layer (Increasing Complexity)')
    plt.title('Model Confidence in Mid-Layer Extreme Images (%s)' % model_name)
    ax1.legend(loc="upper left")
def overviewPlot(data, model_names, axis_type, use_bootstrap_value, plot_corr_errs, use_subplots, is_unadjusted):
    acc_title = 'Model Accuracy'
    if use_subplots:
        corr_title = 'Correlation with Humans'
    else:
        corr_title = 'Correlation with Humans'
    if is_unadjusted:
        save_prefix = 'unadjusted_'
        unadjusted_suffix = ' (Uncorrected)'
    else:
        save_prefix = ''
        unadjusted_suffix = ''
    if len(model_names) == 1:
        acc_title = util.get_model_human_name(model_names[0], True)+' ' + acc_title
        save_prefix += model_names[0]
    else:
        save_prefix += 'comp'
    if not isinstance(plot_corr_errs, list):
        plot_corr_errs = [plot_corr_errs] * len(model_names)
    if use_subplots:
        fig, axs = plt.subplots(1, 2)
        ax1 = axs[0]
        ax2 = axs[1]
        ax2.yaxis.tick_right()
        acc_tick_color = 'k'
        corr_tick_color = 'k'
        ax1.set_title(acc_title)
        ax2.set_title(corr_title)
        fig.set_size_inches(8, 6)
        fig.subplots_adjust(bottom=0.1, top=0.8)
        fig.suptitle('Model Comparison' + unadjusted_suffix, fontsize=14, weight='bold')
        if not is_unadjusted:
            ax1.text(-0.05, 1.03, '(a)', horizontalalignment='left', verticalalignment='bottom', transform=ax1.transAxes, fontsize=14, weight='bold')
            # Somehow cannot align this with ax2
            ax1.text(1.10, 1.03, '(b)', horizontalalignment='left', verticalalignment='bottom', transform=ax1.transAxes, fontsize=14, weight='bold')
    else:
        fig, ax1 = plt.subplots()
        ax2 = ax1.twinx()
        axs = [ax1] # Only need to modify one x axis
        acc_tick_color = 'b'
        corr_tick_color = 'r'
        fig.set_size_inches(8 + is_unadjusted * 2, 6)
        fig.subplots_adjust(bottom=0.2, top=0.92)
        plt.title(acc_title + ' and ' + corr_title + unadjusted_suffix, fontsize=14, weight='bold', y=1.02)
        if not is_unadjusted:
            ax1.text(-0.07, 1.04, '(c)', horizontalalignment='left', verticalalignment='bottom', transform=ax1.transAxes, fontsize=14, weight='bold')
    n_layers_max = 0
    x_max = 0
    x_indent = 0.5
    for model_name,plot_corr_err in zip(model_names, plot_corr_errs):
        # Get layer info
        layer_names = util.get_model_layers(model_name)
        layer_names_short = util.get_model_layers(model_name, True)
        n_layers = len(layer_names)
        n_layers_max = max(n_layers_max, n_layers)
        # Collect correlation and accuracy
        corrs = np.ones((1,n_layers))
        if plot_corr_errs: corr_errs = np.ones((2,n_layers))
        accs = np.ones((1,n_layers))
        for ind,layer_name in enumerate(layer_names):
            lidx = model_name + '_' + layer_name
            if use_bootstrap_value and lidx in data.model_corrs_bootstrapped:
                corrs_all = data.model_corrs_bootstrapped[lidx]
                corr_errs[0][ind] = np.percentile(corrs_all, 2.5)
                corr_errs[1][ind] = np.percentile(corrs_all, 97.5)
                corrs[0][ind] = np.median(corrs_all)
            else:
                if use_bootstrap_value:
                    print 'Warning: %s not bootstrapped!' % lidx
                corrs[0][ind] = data.model_corrs[lidx][0]
            accs[0][ind] = data.model_accs[lidx]
        # Define x axis
        if axis_type == 'rf':
            x = util.get_model_rf_sizes(model_name)
        elif axis_type == 'ridx':
            x = np.linspace(0, 1, n_layers)
            x_indent /= 5
        else:
            x = xrange(n_layers)
        x_max = max(x_max, x[-1])
        # Plot accuracy
        marker = util.get_model_plot_marker(model_name)
        acc_color = util.get_model_plot_color(model_name, plot_type='acc' if not use_subplots else None)
        ax1.plot(x,accs[0][:]*100,marker,color=acc_color)
        ax1.plot([], [], '-'+marker, color=acc_color, label=util.get_model_human_name(model_name))
        p1_fit = np.polyfit(x,accs[0][:]*100,2)
        p1_fn = np.poly1d(p1_fit)
        xs = np.linspace(x[0], x[-1])
        ax1.plot(xs,p1_fn(xs),color=acc_color)
        # Plot correlation
        corr_color = util.get_model_plot_color(model_name, plot_type='corr' if not use_subplots else None)
        if plot_corr_err:
            corr_err_color = util.get_model_plot_color(model_name, plot_type='corr' if not use_subplots else None, brighten=True)
            fits = [None]*2
            for ierr in (0,1):
                p2_fit = np.polyfit(x, corr_errs[ierr][:], 3)
                p2_fn = np.poly1d(p2_fit)
                fits[ierr] = p2_fn(xs)
            ax2.fill_between(xs, fits[0], fits[1], where=fits[1] >= fits[0], color='none', facecolor=corr_err_color, interpolate=True)
        ax2.plot(x,corrs[0][:],marker,color=corr_color)
        p2_fit = np.polyfit(x,corrs[0][:],3)
        p2_fn = np.poly1d(p2_fit)
        ax2.plot(xs,p2_fn(xs),color=corr_color)
        peak_corr_index = np.argmax(corrs[0])
        corr_range = (corr_errs[1][peak_corr_index] - corr_errs[0][peak_corr_index])/2
        print '%s peak correlation: %.3f pm %.3f' % (model_name, corrs[0][peak_corr_index], corr_range)
    # Plot human accuracy
    if use_bootstrap_value:
        hum_acc_errs = np.percentile(data.human_acc_bootstrapped, [2.5, 97.5])
        hum_acc = np.median(data.human_acc_bootstrapped) * 100
        for hum_acc_err in hum_acc_errs:
            ax1.plot([-x_indent, x_max + x_indent], [hum_acc_err*100, hum_acc_err*100], color='0.5', ls='--')
    else:
        hum_acc = int(np.mean(data.hum_im_acc) * 100)
    ax1.plot([-x_indent, x_max + x_indent], [hum_acc, hum_acc], color='0.5', ls='-')
    # Accuracy axis
    ax1.set_ylim([60 if use_subplots else 48, 100])
    ax1.set_ylabel('Accuracy (%)', color=acc_tick_color)
    if use_subplots:
        ax1.text(x_max * 1.9 / 4, hum_acc - 3, 'Human Accuracy', color='.5')
    else:
        ax1.text(x_max*3/4, hum_acc+3, 'Human Accuracy', color='.5')
    for tick in ax1.get_yticklabels():
        tick.set_color(acc_tick_color)
    # Correlation axis
    ax2.set_ylim([0, 0.45 if (use_subplots and not is_unadjusted) else 1])
    ax2.yaxis.set_label_position("right")
    ax2.set_ylabel(data.corr_type + ' Correlation', color=corr_tick_color)
    if not use_subplots:
        for tick in ax2.get_yticklabels():
            tick.set_color(corr_tick_color)
    # x axis
    for ax in axs:
        ax.set_xlim([-x_indent, x_max + x_indent])
        if axis_type == 'rf':
            ax.set_xlabel('Receptive field size')
        elif axis_type == 'idx':
            ax.set_xlabel('Layer index')
        elif axis_type == 'ridx':
            ax.set_xlabel('Relative layer depth')
        else:
            ax.set_xticks(x)
            ax.set_xticklabels(layer_names_short, rotation=70)
            ax.set_xlabel('Layer (Increasing Complexity)')

    # Legend
    if use_subplots:
        ax1.legend(bbox_to_anchor=(0., 1.07, 2.2, 0.102), ncol=4, mode='expand', borderaxespad=0)

    # Done
    fn = util.at_plot_path(save_prefix + '_' + axis_type + '_overview.pdf')
    plt.savefig(fn)
    plt.savefig(util.at_plot_path(save_prefix + '_' + axis_type + '_overview.png'))
    print 'Saved to %s' % fn