示例#1
0
def plot_roc(l, d):
    mkdir(OUT_ROOT)
    top_n_root = os.path.join(fs_root(), 'top_n')

    checkpoints = \
        [
            # Trained on cold
            'triplet_5e-6_all_conditions_angle_1-4_cu_LRD0.9-5_noPCA_lam0.5_me0',

            # Trained on small oxford
            'triplet_5e-6_full-10-25_cu_LRD0.9-5_noPCA_lam0.5_me0',

            # Trained on large oxford
            'ha0_lotriplet_vl64'

            # Treined on Pittsburgh
            'pittsnetvlad',

            # Image-net
            'offtheshelf'

            # III with hard postitives
            'ha6_loevil_triplet_muTrue_vl64'
        ]

    queries = [
        'oxford_night',
        'freiburg_cloudy',
        'oxford_overcast',
        'freiburg_sunny',
        'oxford_snow',
        'pittsburgh_query',
        'oxford_sunny',

    ]

    titles = [
        'Oxford RobotCar, night',
        'Cold Freiburg, cloudy',
        'Oxford RobotCar, overcast',
        'Cold Freiburg, sunny',
        'Oxford RobotCar, snow',
        'Pittsburgh',
        'Oxford RobotCar, sunny',
    ]

    losses = [

        'I Cold Freiburg',
        'II Oxford (small)',
        'III Oxford (large)',
        'IV Pittsburgh',
        'V ImageNet (off-the-shelf)',

        '\\textit{III Oxford (large) + HP}',
    ]

    fill_styles = [
        'none',
        'none',
        'none',
        'full',
        'none',

        'full',

    ]

    markers = [
        '|',
        '.',
        'o',
        '*',
        '',

        'o',
    ]

    lines = [
        '--',

        '-',
        '-',
        '-.',
        ':',

        '-'
    ]

    colors = [
        '#1cad62',

        '#00BFFF',
        '#1E90FF',  # Triplet
        '#8c0054',
        '#000000',

        '#1934e6',  # Triplet HP
    ]

    rows = 2
    cols = 4

    f, axs = plt.subplots(rows, cols, constrained_layout=False)
    if rows == 1:
        axs = np.expand_dims(axs, 0)
    if cols == 1:
        axs = np.expand_dims(axs, 1)
    f.tight_layout()
    f.set_figheight(4.5)  # 8.875in textheight
    f.set_figwidth(8.5)  # 6.875in textwidth

    for i, query in enumerate(queries):
        print(query)

        print_gt = True

        if query.startswith('freiburg'):
            t = 1.5
        else:
            t = 15.0

        setting = 'l{}_dim{}'.format(l, d)

        min_y = 1000
        max_y = 0

        for j, (cp_name, m, line, color) in enumerate(
                zip(checkpoints, cycle(markers), cycle(lines), cycle(colors))):

            t_n_file = os.path.join(top_n_root, setting, '{}_{}.pickle'.format(query, cp_name))
            if not os.path.exists(t_n_file):
                print('Missing: {}'.format(t_n_file))
                continue
            print(t_n_file)

            [top_i, top_g_dists, top_f_dists, gt_i, gt_g_dist, ref_idx] = load_pickle(t_n_file)
            top_g_dists = np.array(top_g_dists)

            if print_gt:
                print_gt = False
                X = np.linspace(0, t, num=50)
                Y = [float(sum(gt_g_dist < x)) / float(len(gt_g_dist)) * 100 for x in X]
                ax = axs[i % rows, i // rows]
                width = 0.75

                ax.plot(X, Y, label='Upper bound', linewidth=width, c='#000000')
                ax.plot([0], [0], linewidth=0, label=' ')
                ax.plot([0], [0], linewidth=0, label='\\textbf{Training datasets:}')
                ax.title.set_text(titles[i])
                ax.set_xlim([0, t])
                ax.grid(True)

            if 'ha6_loevil_triplet_muTrue_vl64' in cp_name:
                ax = axs[i % rows, i // rows]
                ax.plot([0], [0], linewidth=0, label=' ')
                ax.plot([0], [0], linewidth=0, label='\\textbf{With our mining:}')

            t_1_d = np.array([td[0] for td in top_g_dists])
            X = np.linspace(0, t, num=50)

            Y = [float(sum(t_1_d < x)) / float(len(t_1_d)) * 100 for x in X]

            min_y = min(np.min(np.array(Y)), min_y)
            max_y = max(np.max(np.array(Y)), max_y)

            ax = axs[i % rows, i // rows]
            width = 0.75
            ax.plot(X, Y, label=losses[j], linestyle=line, marker=m, linewidth=width, markevery=j % rows + cols,
                    c=color, markersize=3, fillstyle=fill_styles[j])

        ax = axs[i % rows, i // rows]
        ax.set_xlim([0, t])
        ax.set_ylim([min_y, min(max_y + 5, 99)])

        # Major ticks every 20, minor ticks every 5
        major_ticks_x = np.arange(0, t, t / 3)
        minor_ticks_x = np.arange(0, t, t / 3 / 4)

        y_step = 20
        if 'night' in query:
            y_step /= 2

        major_ticks_y = np.arange(min_y, min(max_y + 5, 99), y_step)
        minor_ticks_y = np.arange(min_y, min(max_y + 5, 99), 5)

        ax.set_xticks(major_ticks_x)
        ax.set_xticks(minor_ticks_x, minor=True)
        ax.set_yticks(major_ticks_y)
        ax.set_yticks(minor_ticks_y, minor=True)

        # And a corresponding grid
        ax.grid(which='both')

        # Or if you want different settings for the grids:
        ax.grid(which='minor', alpha=0.2)
        ax.grid(which='major', alpha=0.5)

    out_name = os.path.join(OUT_ROOT, '{}_training_region_roc.pdf'.format(setting))

    axs[-1, -1].axis('off')

    for i in range(cols):
        axs[-1, i].set_xlabel('Distance threshold $d$ [m]')

    for i in range(rows):
        axs[i, 0].set_ylabel('Correctly localized [\%]')

    left = 0.0  # the left side of the subplots of the figure
    right = 1.0  # the right side of the subplots of the figure
    bottom = 0.0  # the bottom of the subplots of the figure
    top = 1.0  # the top of the subplots of the figure
    wspace = 0.2  # the amount of width reserved for space between subplots,
    # expressed as a fraction of the average axis width
    hspace = 0.25  # the amount of height reserved for space between subplots,
    # expressed as a fraction of the average axis height

    # space = 0.2
    plt.subplots_adjust(wspace=wspace, hspace=hspace, left=left, right=right, bottom=bottom, top=top)

    handles, labels = axs[0, 0].get_legend_handles_labels()

    axs[-1, -1].legend(handles, labels, loc='lower left', bbox_to_anchor=(-0.075, 0.0), ncol=1, frameon=True,
                       borderaxespad=0.0)

    plt.savefig(out_name, bbox_inches='tight', pad_inches=0)
    plt.savefig(out_name.replace('.pdf', '.png'), bbox_inches='tight', pad_inches=0)
示例#2
0
def compile(l, d, code):
    mkdir(OUT_ROOT)
    top_n_root = os.path.join(srv_root(), 'neurips/top_n')

    queries = [
        'oxford_night',
        'oxford_overcast',
        'oxford_snow',
        'oxford_sunny',
        'pittsburgh_query',
    ]

    min_ys = [0, 40, 50, 50, 10]
    major_s = [10, 10, 10, 10, 10]
    minor_s = [2.5 if m == 10 else 1.0 for m in major_s]

    titles = [
        'Oxford RobotCar, night',
        'Oxford RobotCar, overcast',
        'Oxford RobotCar, snow',
        'Oxford RobotCar, sunny',
        'Pittsburgh',
    ]

    checkpoints = [
        '/scratch_net/tellur_third/user/efs/data/checkpoints/offtheshelf/offtheshelf/offtheshelf',
        '/scratch_net/tellur_third/user/efs/data/checkpoints/pittsburgh30/pittsnetvlad/vd16_pitts30k_conv5_3_vlad_preL2_intra_white',
        '/scratch_net/tellur_third/user/efs/cvpr_aws_logs/learnlarge/triplet_xy_000/epoch-checkpoint-2',
        '/scratch_net/tellur_third/user/efs/cvpr_aws_logs/learnlarge/quadruplet_xy_000/epoch-checkpoint-2',
        '/srv/beegfs02/scratch/toploc/data/mai_2020_logs/ha0_lolazy_triplet_muTrue_renone_vl64_pca_neurips_002/epoch-checkpoint-2',
        '/srv/beegfs02/scratch/toploc/data/mai_2020_logs/ha0_lolazy_quadruplet_muTrue_renone_vl64_pca_neurips_002/epoch-checkpoint-2',
        '/scratch_net/tellur_third/user/efs/home_logs/learnlarge_ral/huber_distance_triplet_xy_000/epoch-checkpoint-2',
        '/srv/beegfs02/scratch/toploc/data/mai_2020_logs/ha0_lologratio_ma15_mi15_muTrue_renone_tu1_vl64_pca_neurips_002/epoch-checkpoint-1',
        '/srv/beegfs02/scratch/toploc/data/mai_2020_logs/ha0_loms_loss_msTrue_muTrue_renone_tu1_vl64_pca_neurips_001/epoch-checkpoint-0',
        '/srv/beegfs02/scratch/toploc/data/mai_2020_logs/al0.8_be15_ha0_lowms_ma15_mi15_msTrue_muTrue_renone_tu1_vl64_pca_neurips_000/epoch-checkpoint-0',
    ]

    fill_styles = [
        'none',
        'none',
        'none',
        'none',
        'none',
        'none',
        'none',
        'none',
        'none',
        'full',
    ]

    markers = [
        '',
        "^",
        "^",
        "s",
        "^",
        "s",
        "^",
        'v',
        "o",
        "d",
    ]

    losses = [
        'Off-the-shelf \\cite{arandjelovic2016netvlad}',
        'Triplet trained on Pittsburgh \\cite{arandjelovic2016netvlad}',
        'Triplet \\cite{arandjelovic2016netvlad}',
        'Quadruplet \\cite{chen2017beyond}',
        'Lazy triplet \\cite{angelina2018pointnetvlad}',
        'Lazy quadruplet \\cite{angelina2018pointnetvlad}',
        'Trip.~+ Huber dist. \\cite{thoma2020geometrically}',
        'Log-ratio \\cite{kim2019deep}',
        'Multi-similarity \\cite{wang2019multi}',
        'Ours',
    ]

    lines = [
        ':',
        ':',
        '--',
        '--',
        '-.',
        '-.',
        '--',
        '-.',
        '--',
        '-',
    ]

    colors = [
        '#000000',
        '#ff6b1c',
        '#f03577',
        '#5f396b',
        '#1934e6',
        '#0e6606',
        '#B0C4DE',
        '#990000',
        '#663300',
        '#11d194',
    ]

    setting = 'l{}_dim{}'.format(l, d)
    print(setting)

    rows = 2
    cols = 3

    f, axs = plt.subplots(rows, cols, constrained_layout=False)
    if rows == 1:
        axs = np.expand_dims(axs, 0)
    if cols == 1:
        axs = np.expand_dims(axs, 1)
    f.tight_layout()
    f.set_figheight(8)  # 8.875in textheight
    f.set_figwidth(10)  # 6.875in textwidth

    for i, query in enumerate(queries):
        print(query)

        print_gt = True

        t = 25.0
        l = 0.0
        out_setting = 'l{}_dim{}'.format(l, d)

        setting = 'l{}_dim{}'.format(l, d)

        min_y = 1000
        max_y = 0

        for j, (checkpoint, loss, m, line, color) in enumerate(
                zip(checkpoints, losses, cycle(markers), cycle(lines),
                    cycle(colors))):

            cp_name = checkpoint.split('/')[-2]
            cp_name = ''.join(
                os.path.basename(cp_name).split('.'))  # Removing '.'
            cp_name += '_e{}'.format(checkpoint[-1])

            t_n_file = os.path.join(top_n_root, setting,
                                    '{}_{}.pickle'.format(query, cp_name))
            if not os.path.exists(t_n_file):
                print('Missing: {}'.format(t_n_file))
                continue
            print(t_n_file)

            [top_i, top_g_dists, top_f_dists, gt_i, gt_g_dist,
             ref_idx] = load_pickle(t_n_file)
            top_g_dists = np.array(top_g_dists)

            if print_gt:
                print_gt = False
                X = np.linspace(0, t, num=50)
                Y = [
                    float(sum(gt_g_dist < x)) / float(len(gt_g_dist)) * 100
                    for x in X
                ]
                ax = axs[i % rows, i // rows]
                # ax = axs
                width = 0.75

                ax.plot(X,
                        Y,
                        label='Upper bound',
                        linewidth=width,
                        c='#000000')
                ax.title.set_text(titles[i])
                ax.set_xlim([0, t])
                ax.grid(True)

                x_min = X[bisect.bisect(Y, min_ys[i])]

            t_1_d = np.array([td[0] for td in top_g_dists])
            X = np.linspace(0, t, num=50)

            Y = [float(sum(t_1_d < x)) / float(len(t_1_d)) * 100 for x in X]

            min_y = min(np.min(np.array(Y)), min_y)
            max_y = max(np.max(np.array(Y)), max_y)

            ax = axs[i % rows, i // rows]
            width = 0.75
            ax.plot(X,
                    Y,
                    label=loss,
                    linestyle=line,
                    marker=m,
                    linewidth=width,
                    markevery=j % rows + cols,
                    c=color,
                    markersize=3,
                    fillstyle=fill_styles[j % len(fill_styles)])

            #ax.plot(X, Y, label=cp_name)

        ax = axs[i % rows, i // rows]
        ax.set_xlim([x_min, t])
        ax.set_ylim([min_ys[i], min(max_y + 5, 100)])

        # Major ticks every 20, minor ticks every 5
        major_ticks_x = np.arange(x_min // (t / 5) * (t / 5), t, t / 5)[1:]
        minor_ticks_x = np.arange(x_min // (t / 5 / 4) * (t / 5 / 4), t,
                                  t / 5 / 4)[1:]

        y_step = 10

        major_ticks_y = np.arange(min_ys[i], min(max_y + 5, 100), major_s[i])
        minor_ticks_y = np.arange(min_ys[i], min(max_y + 5, 100), minor_s[i])

        ax.set_xticks(major_ticks_x)
        ax.set_xticks(minor_ticks_x, minor=True)
        ax.set_yticks(major_ticks_y)
        ax.set_yticks(minor_ticks_y, minor=True)

        # And a corresponding grid
        ax.grid(which='both')

        # Or if you want different settings for the grids:
        ax.grid(which='minor', alpha=0.2)
        ax.grid(which='major', alpha=0.5)

    out_setting = out_setting.replace('.', '')
    out_name = os.path.join(OUT_ROOT, '{}_neurips_roc.pdf'.format(out_setting))

    axs[-1, -1].axis('off')

    for i in range(cols):
        axs[-1, i].set_xlabel('Distance threshold $d$ [m]')

    for i in range(rows):
        axs[i, 0].set_ylabel('Correctly localized [\%]')

    handles, labels = axs[0, 0].get_legend_handles_labels()

    left = 0.0  # the left side of the subplots of the figure
    right = 1.0  # the right side of the subplots of the figure
    bottom = 0.23  # the bottom of the subplots of the figure
    top = 1.0  # the top of the subplots of the figure
    wspace = 0.2  # the amount of width reserved for space between subplots,
    # expressed as a fraction of the average axis width
    hspace = 0.2  # the amount of height reserved for space between subplots,
    # expressed as a fraction of the average axis height

    # space = 0.2
    plt.subplots_adjust(wspace=wspace,
                        hspace=hspace,
                        left=left,
                        right=right,
                        bottom=bottom,
                        top=top)

    axs[-1, -1].legend(handles,
                       labels,
                       bbox_to_anchor=(0.0, 0.5),
                       loc='center left',
                       ncol=1,
                       borderaxespad=0.,
                       frameon=True,
                       fontsize='medium')  # mode="expand",

    plt.savefig(out_name, bbox_inches='tight', pad_inches=0)

    plt.savefig(out_name.replace('.pdf', '.pgf'),
                bbox_inches='tight',
                pad_inches=0)

    # Test
    plt.show()
    CHECKPOINT = FLAGS.checkpoint
    IMG_ROOT = FLAGS.img_root
    LARGE_SIDE = FLAGS.large_side
    LOG_DIR = FLAGS.log_dir
    OUT_ROOT = FLAGS.out_root
    QUERY_CSV = FLAGS.query_csv
    REF_CSV = FLAGS.ref_csv
    RESCALE = FLAGS.rescale
    SMALL_SIDE = FLAGS.small_side
    TOP_N_PICKLE = FLAGS.top_n_pickle
    VLAD_CORES = FLAGS.vlad_cores

    OUT_FOLDER = os.path.join(
        OUT_ROOT,
        os.path.splitext(os.path.basename(TOP_N_PICKLE))[0])
    mkdir(OUT_FOLDER)

    TUPLES_PER_BATCH = 1  # Don't change this, save thread does not handle larger sizes

    CPU_IN_QUEUE = Queue(maxsize=0)
    GPU_IN_QUEUE = Queue(maxsize=10)
    GPU_OUT_QUEUE = Queue(maxsize=0)

    if not os.path.exists(LOG_DIR):
        os.makedirs(LOG_DIR)

    if not os.path.exists(OUT_ROOT):
        os.makedirs(OUT_ROOT)

    LOG = open(os.path.join(LOG_DIR, 'train_log.txt'), 'a')
    log('Running {} at {}.'.format(
示例#4
0
import os

import numpy as np
from distloss.cold_helper import get_recursive_file_list, parse_file_list

from learnlarge.util.helper import mkdir, fs_root
from learnlarge.util.io import load_csv
from learnlarge.util.io import save_csv

out_root = os.path.join(fs_root(), 'lists')
N_SAMPLES = 5000

mkdir(out_root)

# Oxford
place = 'oxford'


def img_path(info):
    date = info[0]
    folder = info[1]
    t = info[2]
    return os.path.join('datasets/oxford_512', '{}_stereo_centre_{:02d}'.format(date, int(folder)), '{}.png'.format(t))


# Preselected reference
preselected_ref = os.path.join(fs_root(), 'data/learnlarge/shuffled/train_ref_000.csv')
p_meta = load_csv(preselected_ref)
p_meta['path'] = [img_path((d, f, t)) for d, f, t in
                  zip(p_meta['date'], p_meta['folder'], p_meta['t'])]
idxs_to_keep = np.linspace(0, len(p_meta['path']), num=N_SAMPLES, endpoint=False, dtype=int)
def get_top_n():
    # check if complete:
    ld_checkpoints = get_checkpoints('obm')

    ld_cp_names = []
    for cp in ld_checkpoints:
        cp_name = cp.split('/')[-2]
        cp_name = ''.join(os.path.basename(cp_name).split('.'))  # Removing '.'
        cp_name += '_e{}'.format(cp[-1])
        ld_cp_names.append(cp_name)

    if any([x in QUERY_LV_PICKLE for x in ld_cp_names]):
        L = [0.0, 0.3, 1.0, 5.0]
        D = [64, 128, 256, 512, 1024, 2048, 4096]
    else:
        L = [0.0]
        D = [256]

    complete = True
    for l in L:
        for d in D:

            out_folder = os.path.join(OUT_ROOT, 'l{}_dim{}'.format(l, d))
            name = ''.join(os.path.basename(QUERY_LV_PICKLE).split('.')[:-1])
            out_pickle = os.path.join(out_folder, '{}.pickle'.format(name))

            if not os.path.exists(out_pickle):
                complete = False
                break
        if not complete:
            break

    if complete:
        print('Skipping complete {}'.format(QUERY_LV_PICKLE))
        return

    ref_meta = load_csv(REF_CSV)
    query_meta = load_csv(QUERY_CSV)
    full_ref_xy = get_xy(ref_meta)
    full_query_xy = get_xy(query_meta)
    num_q = full_query_xy.shape[0]

    pca_f = np.array(load_pickle(PCA_LV_PICKLE))
    full_ref_f = np.array(load_pickle(REF_LV_PICKLE))
    full_query_f = np.array(load_pickle(QUERY_LV_PICKLE))

    full_xy_dists = pairwise_distances(full_query_xy,
                                       full_ref_xy,
                                       metric='euclidean')

    for d in D:

        print(d)
        pca = PCA(whiten=True, n_components=d)
        pca = pca.fit(pca_f)
        pca_ref_f = pca.transform(full_ref_f)
        pca_query_f = pca.transform(full_query_f)

        for l in L:
            print(l)

            out_folder = os.path.join(OUT_ROOT, 'l{}_dim{}'.format(l, d))
            mkdir(out_folder)
            name = ''.join(os.path.basename(QUERY_LV_PICKLE).split('.')[:-1])
            out_pickle = os.path.join(out_folder, '{}.pickle'.format(name))

            if os.path.exists(out_pickle):
                print('{} already exists. Skipping.'.format(out_pickle))
                continue

            ref_idx = [0]
            for i in range(len(full_ref_xy)):
                if sum((full_ref_xy[i, :] - full_ref_xy[ref_idx[-1], :])**
                       2) >= l**2:
                    ref_idx.append(i)

            if len(ref_idx) < N:
                continue

            ref_f = np.array([pca_ref_f[i, :] for i in ref_idx])
            xy_dists = np.array([full_xy_dists[:, i]
                                 for i in ref_idx]).transpose()

            print('Building tree')
            ref_tree = KDTree(ref_f)

            print('Retrieving')
            top_f_dists, top_i = np.array(
                ref_tree.query(pca_query_f,
                               k=N,
                               return_distance=True,
                               sort_results=True))
            top_f_dists = np.array(top_f_dists)
            top_i = np.array(top_i, dtype=int)

            top_g_dists = [[xy_dists[q, r] for r in top_i[q, :]]
                           for q in range(num_q)]

            gt_i = np.argmin(xy_dists, axis=1)
            gt_g_dist = np.min(xy_dists, axis=1)

            # Translate to original indices
            top_i = [[ref_idx[r] for r in top_i[q, :]] for q in range(num_q)]
            gt_i = [ref_idx[r] for r in gt_i]

            save_pickle(
                [top_i, top_g_dists, top_f_dists, gt_i, gt_g_dist, ref_idx],
                out_pickle)
示例#6
0
def get_top_n():
    ref_meta = load_csv(REF_CSV)
    query_meta = load_csv(QUERY_CSV)
    full_ref_xy = get_xy(ref_meta)
    full_query_xy = get_xy(query_meta)
    num_q = full_query_xy.shape[0]

    pca_f = np.array(load_pickle(PCA_LV_PICKLE))
    full_ref_f = np.array(load_pickle(REF_LV_PICKLE))
    full_query_f = np.array(load_pickle(QUERY_LV_PICKLE))

    full_xy_dists = pairwise_distances(full_query_xy,
                                       full_ref_xy,
                                       metric='euclidean')

    for d in DIMS:

        print(d)
        pca = PCA(whiten=True, n_components=d)
        pca = pca.fit(pca_f)
        pca_ref_f = pca.transform(full_ref_f)
        pca_query_f = pca.transform(full_query_f)

        for l in L:
            print(l)

            out_folder = os.path.join(OUT_ROOT, 'l{}_dim{}'.format(l, d))
            mkdir(out_folder)
            name = ''.join(os.path.basename(QUERY_LV_PICKLE).split('.')[:-1])
            out_pickle = os.path.join(out_folder, '{}.pickle'.format(name))

            if os.path.exists(out_pickle):
                print('{} already exists. Skipping.'.format(out_pickle))
                continue

            ref_idx = [0]
            for i in range(len(full_ref_xy)):
                if sum((full_ref_xy[i, :] - full_ref_xy[ref_idx[-1], :])**
                       2) >= l**2:
                    ref_idx.append(i)

            if len(ref_idx) < N:
                continue

            ref_f = np.array([pca_ref_f[i, :] for i in ref_idx])
            xy_dists = np.array([full_xy_dists[:, i]
                                 for i in ref_idx]).transpose()

            print('Building tree')
            ref_tree = KDTree(ref_f)

            print('Retrieving')
            top_f_dists, top_i = np.array(
                ref_tree.query(pca_query_f,
                               k=N,
                               return_distance=True,
                               sort_results=True))
            top_f_dists = np.array(top_f_dists)
            top_i = np.array(top_i, dtype=int)

            top_g_dists = [[xy_dists[q, r] for r in top_i[q, :]]
                           for q in range(num_q)]

            gt_i = np.argmin(xy_dists, axis=1)
            gt_g_dist = np.min(xy_dists, axis=1)

            # Translate to original indices
            top_i = [[ref_idx[r] for r in top_i[q, :]] for q in range(num_q)]
            gt_i = [ref_idx[r] for r in gt_i]

            save_pickle(
                [top_i, top_g_dists, top_f_dists, gt_i, gt_g_dist, ref_idx],
                out_pickle)
import os

import cv2
import numpy as np

from learnlarge.util.helper import mkdir, fs_root
from learnlarge.util.io import load_txt

base_path = os.path.join(fs_root(), 'grad_cam')
out_path = os.path.join(fs_root(), 'video_frames')

mkdir(out_path)

checkpoints = os.listdir(os.path.join(fs_root(), 'checkpoints'))

cp_keys = list()

for cp in checkpoints:
    c_name = cp.split('/')[-2]
    c_name = ''.join(os.path.basename(c_name).split('.'))  # Removing '.'
    c_name += '_e{}'.format(cp[-1])
    cp_keys.append(c_name)

print(cp_keys)

names = {
    'ha6_loresidual_det_muTrue_vl64': 'III',
    'offtheshelf': 'V',
    'pittsnetvlad': 'IV',
    'quadruplet_5e-6_all_conditions_angle_1-4_cu_LRD09-5_noPCA_lam05_me0_e1': 'I',
    'triplet_5e-6_full-10-25_cu_LRD09-5_noPCA_lam05_me0_e3': 'II'
示例#8
0
from learnlarge.util.meta import get_xy

matplotlib.use("pgf")
matplotlib.rcParams.update({
    "pgf.texsystem": "pdflatex",
    'font.family': 'serif',
    'text.usetex': True,
    'pgf.rcfonts': False,
})

np.random.RandomState(seed=42)

out_root = os.path.join(fs_root(), 'list_plots')
list_out_root = os.path.join(fs_root(), 'lists')

mkdir(out_root)
mkdir(list_out_root)

rows = 1
cols = 4

f, axs = plt.subplots(rows, cols, constrained_layout=False)
if rows == 1:
    axs = np.expand_dims(axs, 0)
if cols == 1:
    axs = np.expand_dims(axs, 1)
f.tight_layout()
f.set_figheight(2.5)  # 8.875in textheight
f.set_figwidth(13)  # 6.875in textwidth

# ------------------------------------- Pittsburgh -------------------------------------
def compile_table(l, d):
    mkdir(OUT_ROOT)
    top_n_root = os.path.join(fs_root(), 'top_n')

    queries = [
        'oxford_night',
        'oxford_overcast',
        'oxford_snow',
        'oxford_sunny',
        'freiburg_cloudy',
        'freiburg_sunny',
        'pittsburgh_query'
    ]

    checkpoints = [
        # Trained on cold
        'triplet_5e-6_all_conditions_angle_1-4_cu_LRD0.9-5_noPCA_lam0.5_me0',
        'quadruplet_5e-6_all_conditions_angle_1-4_cu_LRD0.9-5_noPCA_lam0.5_me0',

        'lazy_triplet_5e-6_all_conditions_angle_1-4_cu_LRD0.9-5_noPCA_lam0.5_me0',
        'lazy_quadruplet_5e-6_all_conditions_angle_1-4_cu_LRD0.9-5_noPCA_lam0.5_me0',

        'sum_5e-6_all_conditions_angle_1-4_cu_LRD0.9-5_noPCA_lam0.5_me0',
        'h_sum_5e-6_all_conditions_angle_1-4_cu_LRD0.9-5_noPCA_lam0.5_me0',

        # Trained on small oxford
        'triplet_5e-6_full-10-25_cu_LRD0.9-5_noPCA_lam0.5_me0',
        'quadruplet_5e-6_full-10-25_cu_LRD0.9-5_noPCA_lam0.5_me0',

        'lazy_triplet_5e-6_full-10-25_cu_LRD0.9-5_noPCA_lam0.5_me0',
        'lazy_quadruplet_5e-6_full-10-25_cu_LRD0.9-5_noPCA_lam0.5_me0',

        'h_sum_5e-6_full-10-25_cu_LRD0.9-5_noPCA_lam0.5_me0',

        # Trained on large oxford
        'ha0_lotriplet_vl64'
        'ha0_loquadruplet_vl64'
        'ha0_lolazy_triplet_vl64'
        'ha0_lolazy_quadruplet_vl64'
        'ha0_lodistance_triplet_vl64'
        'ha0_lohuber_distance_triplet_vl64'
        'ha6_loevil_triplet_muTrue_vl64'
        'ha6_loevil_quadruplet_muTrue_vl64'
        'ha6_loresidual_det_muTrue_vl64'
        'ha0_lotriplet_vl0'
        'ha0_loquadruplet_vl0'
        'ha6_loevil_quadruplet_muTrue_vl0'
        'ha6_loresidual_det_muTrue_vl0'
        'ha0_lotriplet_muTrue_vl64'
        'ha0_lotriplet_muFalse_vl64'
        'ha6_lotriplet_muTrue_vl64'
        'ha6_lotriplet_muFalse_vl64'

        # Treined on Pittsburgh
        'pittsnetvlad',

        # Image-net
        'offtheshelf'
    ]

    losses = [
        'GT',

        'Triplet \cite{arandjelovic2016netvlad}',
        'Quadruplet \cite{chen2017beyond}',

        'Lazy triplet \cite{angelina2018pointnetvlad}',
        'Lazy quadruplet \cite{angelina2018pointnetvlad}',

        'Triplet + distance \cite{thoma2020geometrically}',
        'Triplet + Huber dist.~\cite{thoma2020geometrically}',

        'Triplet \cite{arandjelovic2016netvlad}',
        'Quadruplet \cite{chen2017beyond}',

        'Lazy triplet \cite{angelina2018pointnetvlad}',
        'Lazy quadruplet \cite{angelina2018pointnetvlad}',

        'Triplet + Huber dist.~\cite{thoma2020geometrically}',

        'Triplet \cite{arandjelovic2016netvlad}',
        'Quadruplet \cite{chen2017beyond}',

        'Lazy triplet \cite{angelina2018pointnetvlad}',
        'Lazy quadruplet \cite{angelina2018pointnetvlad}',

        'Triplet + distance \cite{thoma2020geometrically}',
        'Triplet + Huber dist.~\cite{thoma2020geometrically}',

        '\\textit{Triplet + HP}',
        '\\textit{Quadruplet + HP}',

        '\\textit{Volume}',
        '$\\mathit{Volume}^*$',

        'Triplet \cite{arandjelovic2016netvlad}',

        'Off-the-shelf \cite{deng2009imagenet}'
    ]

    setting = 'l{}_dim{}'.format(l, d)
    print(setting)

    table = defaultdict(list)

    table['Loss'] = losses

    for_mean = defaultdict(list)
    for i, query in enumerate(queries):
        print(query)

        print_gt = True

        if query.startswith('freiburg'):
            T = [0.5, 1.0, 1.5]
        else:
            T = [5.0, 10.0, 15.0]

        for j, checkpoint in enumerate(checkpoints):

            cp_name = checkpoint

            t_n_file = os.path.join(top_n_root, setting, '{}_{}.pickle'.format(query, cp_name))
            if not os.path.exists(t_n_file):
                print('Missing: {}'.format(t_n_file))
                table[query].append('-')
                for_mean[query].append([-1, -1, -1])
                continue
            print(t_n_file)

            [top_i, top_g_dists, top_f_dists, gt_i, gt_g_dist, ref_idx] = load_pickle(t_n_file)
            top_g_dists = np.array(top_g_dists)

            if print_gt:
                print_gt = False
                Y = [float(sum(gt_g_dist < x)) / float(len(gt_g_dist)) * 100 for x in T]
                table[query].append(['{:.1f}'.format(y) for y in Y])
                for_mean[query].append(Y)

            t_1_d = np.array([td[0] for td in top_g_dists])

            Y = [float(sum(t_1_d < x)) / float(len(t_1_d)) * 100 for x in T]
            table[query].append(['{:.1f}'.format(y) for y in Y])
            for_mean[query].append(Y)

        # Highlight best values:
        b = np.argmax(np.array(for_mean[query])[1:], axis=0)
        b = b + 1
        for ii, ib in enumerate(b):
            table[query][ib][ii] = '\\textbf{' + table[query][ib][ii] + '}'

        for ii in range(len(losses)):
            table[query][ii] = '/'.join(table[query][ii])

    for i in range(len(losses)):
        all = np.array([for_mean[query][i] for query in queries if for_mean[query][i][0] > -1])
        Y = np.mean(all, axis=0)
        table['mean'].append(['{:.1f}'.format(y) for y in Y])
        for_mean['mean'].append(Y)

    # Highlight best values:
    b = np.argmax(np.array(for_mean['mean'])[1:], axis=0)
    b = b + 1
    for ii, ib in enumerate(b):
        table['mean'][ib][ii] = '\\textbf{' + table['mean'][ib][ii] + '}'

    for ii in range(len(losses)):
        table['mean'][ii] = '/'.join(table['mean'][ii])

    out_name = os.path.join(OUT_ROOT, 'accuracy_table.csv')
    save_csv(table, out_name)