Esempio n. 1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--task_id', type=int, default=0)
    parser.add_argument('--max_side', type=int, default=240)
    parser.add_argument('--img_root', default='/path/to/server/files/data/datasets/oxford_ext_raw_stereo_centre')
    parser.add_argument('--ins_root', default=os.path.join(fs_root(), 'data/datasets/oxford_extracted'))
    parser.add_argument('--tar_root', default='/path/to/files/oxford_scraped')
    parser.add_argument('--out_img_root', default=os.path.join(fs_root(), 'datasets/oxford_240'))
    parser.add_argument('--out_root', default=os.path.join(fs_root(), 'data/learnlarge'))
    parser.add_argument('--log_dir', default=os.path.join(fs_root(), 'cpu_logs/downsize'))
    parser.add_argument('--cams', default=os.path.join(fs_root(), 'code/robotcar-dataset-sdk-2.1/models'))
    args = parser.parse_args()

    task_id = args.task_id
    max_side = args.max_side
    img_root = args.img_root
    ins_root = args.ins_root
    tar_root = args.tar_root
    out_img_root = args.out_img_root
    out_root = args.out_root
    log_dir = args.log_dir
    cams = args.cams

    if not os.path.exists(out_root):
        os.makedirs(out_root)

    if task_id == -1:
        create_array_job(ins_root, log_dir)
    elif task_id == 0:
        for task_id in range(1, len(os.listdir(ins_root)) + 1):
            downsize_images(task_id, max_side, img_root, ins_root, tar_root, out_img_root, out_root, cams)
    else:
        downsize_images(task_id, max_side, img_root, ins_root, tar_root, out_img_root, out_root, cams)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--in_root',
                        default=os.path.join(fs_root(),
                                             'data/learnlarge/splits'))
    parser.add_argument('--ins_root',
                        default=os.path.join(fs_root(),
                                             'data/datasets/oxford_extracted'))
    parser.add_argument('--out_root',
                        default=os.path.join(fs_root(),
                                             'data/learnlarge/merged'))
    parser.add_argument('--folds', default=['train', 'val', 'test', 'full'])
    parser.add_argument(
        '--cols_to_keep',
        default=['easting', 'northing', 'folder', 't', 'yaw', 'date'])
    args = parser.parse_args()

    in_root = args.in_root
    ins_root = args.ins_root
    out_root = args.out_root
    folds = args.folds
    cols_to_keep = args.cols_to_keep

    if not os.path.exists(out_root):
        os.makedirs(out_root)

    merge_dates(in_root, ins_root, out_root)
    clean(out_root, out_root, folds, cols_to_keep)
Esempio n. 3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--task_id', type=int, default=0)
    parser.add_argument('--in_root',
                        default=os.path.join(fs_root(),
                                             'data/learnlarge/img_info'))
    parser.add_argument('--ins_root',
                        default=os.path.join(fs_root(),
                                             'data/datasets/oxford_extracted'))
    parser.add_argument('--out_root',
                        default=os.path.join(fs_root(), 'data/learnlarge/xy'))
    parser.add_argument('--log_dir',
                        default=os.path.join(fs_root(), 'cluster_log/xy'))
    args = parser.parse_args()

    task_id = args.task_id
    in_root = args.in_root
    ins_root = args.ins_root
    out_root = args.out_root
    log_dir = args.log_dir

    if task_id == -1:
        create_array_job(ins_root, log_dir)
        interpolate_xy(98, in_root, ins_root, out_root)
    elif task_id == 0:
        for task_id in range(1, len(os.listdir(ins_root)) + 1):
            interpolate_xy(task_id, in_root, ins_root, out_root)
    else:
        interpolate_xy(task_id, in_root, ins_root, out_root)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--in_root', default=os.path.join(fs_root(), 'data/learnlarge/merged_parametrized'))
    parser.add_argument('--out_root', default=os.path.join(fs_root(), 'data/learnlarge/clean_merged_parametrized'))
    parser.add_argument('--folds', default=['train', 'val', 'test'])
    parser.add_argument('--cols_to_keep', default=['easting', 'northing', 'folder', 't', 'yaw', 'date', 'l'])
    args = parser.parse_args()

    in_root = args.in_root
    out_root = args.out_root
    folds = args.folds
    cols_to_keep = args.cols_to_keep

    if not os.path.exists(out_root):
        os.makedirs(out_root)

    clean_parametrization(in_root, folds, cols_to_keep, out_root)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--in_root', default=os.path.join(fs_root(), 'data/learnlarge/clean_merged_parametrized'))
    parser.add_argument('--out_root', default=os.path.join(fs_root(), 'data/learnlarge/statistics'))
    parser.add_argument('--tag_root', default='/path/to/server/files/data/datasets/oxford_extracted')
    # parser.add_argument('--folds', default=['train', 'val', 'test', 'full', 'train_ref', 'val_ref', 'test_ref', 'full_ref', 'train_query', 'val_query', 'test_query', 'full_query'])
    parser.add_argument('--folds',
                        default=['train_ref'])
    args = parser.parse_args()

    in_root = args.in_root
    out_root = args.out_root
    tag_root = args.tag_root
    folds = args.folds

    if not os.path.exists(out_root):
        os.makedirs(out_root)

    plot_statistics(in_root, out_root, folds, tag_root)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--in_root', default=os.path.join(fs_root(), 'data/learnlarge/merged_parametrized'))
    parser.add_argument('--folds', default=['train', 'val', 'test', 'full'])
    parser.add_argument('--query_dates', type=list, default=[
        '2015-08-14-14-54-57',  # roadworks, overcast
        '2014-11-18-13-20-12',  # sun, clouds
        '2014-12-17-18-18-43',  # night, rain
        '2015-02-03-08-45-10',  # snow
        '2014-06-26-09-24-58'  # overcast, alternate-route (validation area)
    ]
                        )
    args = parser.parse_args()
    print(flags_to_args(args))

    folds = args.folds
    in_root = args.in_root
    query_dates = args.query_dates

    set_aside_queries(in_root, folds, query_dates)
Esempio n. 7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--task_id', type=int, default=-1)
    parser.add_argument('--grids', type=dict,
                        default={'full': os.path.join(fs_root(), 'data/learnlarge/map_grids/full.png'),
                                 'test': os.path.join(fs_root(), 'data/learnlarge/map_grids/test.png'),
                                 'train': os.path.join(fs_root(), 'data/learnlarge/map_grids/train.png'),
                                 'val': os.path.join(fs_root(), 'data/learnlarge/map_grids/val.png')
                                 })
    parser.add_argument('--in_root', default=os.path.join(fs_root(), 'data/learnlarge/xy'))
    parser.add_argument('--ins_root', default=os.path.join(fs_root(), 'data/datasets/oxford_extracted'))
    parser.add_argument('--out_root', default=os.path.join(fs_root(), 'data/learnlarge/splits'))
    parser.add_argument('--log_dir', default=os.path.join(fs_root(), 'cpu_log/split'))
    args = parser.parse_args()

    task_id = args.task_id
    grids = args.grids
    in_root = args.in_root
    ins_root = args.ins_root
    out_root = args.out_root
    log_dir = args.log_dir

    if not os.path.exists(out_root):
        os.makedirs(out_root)

    if not os.path.exists(log_dir):
        os.makedirs(log_dir)

    if task_id == -1:
        create_array_job(ins_root, log_dir)
        get_splits(20, grids, in_root, ins_root, out_root)
    elif task_id == 0:
        for task_id in range(1, len(os.listdir(ins_root)) + 1):
            get_splits(task_id, grids, in_root, ins_root, out_root)
    else:
        get_splits(task_id, grids, in_root, ins_root, out_root)
Esempio n. 8
0
        for batch_indices, batch_image_info, batched_distance in zip(
                batched_indices, batched_image_info, batched_distances):
            CPU_IN_QUEUE.put(
                (batch_indices, batch_image_info, batched_distance))

        # Wait for completion & order output
        CPU_IN_QUEUE.join()
        GPU_IN_QUEUE.join()
        GPU_OUT_QUEUE.join()


if __name__ == "__main__":
    parser = argparse.ArgumentParser()

    # Image folder
    parser.add_argument('--img_root', default=os.path.join(fs_root()))
    parser.add_argument('--query_csv')
    parser.add_argument('--ref_csv')
    parser.add_argument('--top_n_pickle')
    parser.add_argument('--checkpoint')

    # Output
    parser.add_argument('--out_root',
                        default=os.path.join(fs_root(), 'grad_cam'))
    parser.add_argument('--log_dir',
                        default=os.path.join(fs_root(), 'logs/grad_cam'))

    # Network
    parser.add_argument('--vlad_cores', default=64, type=int)

    # Image Size
Esempio n. 9
0
import argparse
import os

from learnlarge.util.helper import fs_root
from learnlarge.util.sge import run_one_job

parser = argparse.ArgumentParser()
parser.add_argument('--i', default=0, type=int)
args = parser.parse_args()

script = os.path.join(fs_root(), 'code/learnlarge/evaluate/012_any_tsne.py')

checkpoints = [

    # Trained on cold
    'triplet_5e-6_all_conditions_angle_1-4_cu_LRD0.9-5_noPCA_lam0.5_me0',
    'quadruplet_5e-6_all_conditions_angle_1-4_cu_LRD0.9-5_noPCA_lam0.5_me0',
    'lazy_triplet_5e-6_all_conditions_angle_1-4_cu_LRD0.9-5_noPCA_lam0.5_me0',
    'lazy_quadruplet_5e-6_all_conditions_angle_1-4_cu_LRD0.9-5_noPCA_lam0.5_me0',
    'sum_5e-6_all_conditions_angle_1-4_cu_LRD0.9-5_noPCA_lam0.5_me0',
    'h_sum_5e-6_all_conditions_angle_1-4_cu_LRD0.9-5_noPCA_lam0.5_me0',

    # Trained on small oxford
    'triplet_5e-6_full-10-25_cu_LRD0.9-5_noPCA_lam0.5_me0',
    'quadruplet_5e-6_full-10-25_cu_LRD0.9-5_noPCA_lam0.5_me0',
    'lazy_triplet_5e-6_full-10-25_cu_LRD0.9-5_noPCA_lam0.5_me0',
    'lazy_quadruplet_5e-6_full-10-25_cu_LRD0.9-5_noPCA_lam0.5_me0',
    'h_sum_5e-6_full-10-25_cu_LRD0.9-5_noPCA_lam0.5_me0',

    # Trained on large oxford
    'ha0_lotriplet_vl64'
Esempio n. 10
0
    parser = argparse.ArgumentParser()

    # Image folder
    parser.add_argument('--pca_lv_pickle')
    parser.add_argument('--query_lv_pickle')
    parser.add_argument('--ref_lv_pickle')
    parser.add_argument('--query_csv')
    parser.add_argument('--ref_csv')
    parser.add_argument('--L', default=[0.0, 0.3, 1.0, 3.0, 5.0], type=list)
    parser.add_argument('--N', default=25, type=int)
    parser.add_argument('--dims',
                        default=[512, 16, 32, 64, 128, 256, 1024, 2048, 4096],
                        type=list)

    # Output
    parser.add_argument('--out_root', default=os.path.join(fs_root(), 'top_n'))
    parser.add_argument('--log_dir',
                        default=os.path.join(fs_root(), 'logs/top_n'))

    FLAGS = parser.parse_args()

    # Define each FLAG as a variable (generated automatically with util.flags_to_globals(FLAGS))
    flags_to_globals(FLAGS)

    N = FLAGS.N
    L = FLAGS.L
    DIMS = FLAGS.dims
    LOG_DIR = FLAGS.log_dir
    OUT_ROOT = FLAGS.out_root
    QUERY_CSV = FLAGS.query_csv
    QUERY_LV_PICKLE = FLAGS.query_lv_pickle
Esempio n. 11
0
import os

from learnlarge.util.helper import fs_root
from learnlarge.util.sge import run_one_job

script = os.path.join(fs_root(), 'code/learnlarge/evaluate/001_any_inference.py')

sets = [
    'oxford_ref',
    'oxford_night',
    'oxford_overcast',
    'oxford_pca',
    'oxford_snow',
    'oxford_sunny',
    'freiburg_pca',
    'freiburg_cloudy',
    'freiburg_ref',
    'freiburg_sunny',
    'pittsburgh_query',
    'pittsburgh_ref',
]

checkpoints = [

    # Trained on cold
    'triplet_5e-6_all_conditions_angle_1-4_cu_LRD0.9-5_noPCA_lam0.5_me0',
    'quadruplet_5e-6_all_conditions_angle_1-4_cu_LRD0.9-5_noPCA_lam0.5_me0',

    'lazy_triplet_5e-6_all_conditions_angle_1-4_cu_LRD0.9-5_noPCA_lam0.5_me0',
    'lazy_quadruplet_5e-6_all_conditions_angle_1-4_cu_LRD0.9-5_noPCA_lam0.5_me0',
Esempio n. 12
0
                memory=50,
                script_parameters=[('out_folder', os.path.basename(out_dir))],
                out_dir=out_dir,
                name='train_{}'.format(loss),
                overwrite=True,
                hold_off=False,
                array=True,
                num_jobs=1)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()

    # Image folder
    parser.add_argument('--img_root',
                        default=os.path.join(fs_root(), 'datasets/oxford_512'))

    # Location of meta data (file lists & checkpoint)
    parser.add_argument('--shuffled_root',
                        default=os.path.join(fs_root(),
                                             'data/learnlarge/shuffled'))
    parser.add_argument('--loc_ref_root',
                        default=os.path.join(fs_root(),
                                             'data/learnlarge/clusters'))
    parser.add_argument('--anchor_root',
                        default=os.path.join(fs_root(),
                                             'data/learnlarge/anchors'))
    parser.add_argument('--checkpoint',
                        default=os.path.join(
                            fs_root(),
                            'data/learnlarge/checkpoint/offtheshelf'))
Esempio n. 13
0
                out_dir=out_dir,
                name='infer_{}'.format(loss),
                overwrite=True,
                hold_off=False,
                array=True,
                num_jobs=1)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()

    # Image folder
    parser.add_argument('--rescale', default=True)
    parser.add_argument('--small_side', default=180, type=int)
    parser.add_argument('--large_side', default=240, type=int)
    parser.add_argument('--img_root', default=os.path.join(fs_root()))
    parser.add_argument('--csv_root', default=os.path.join(fs_root(), 'lists'))
    parser.add_argument('--set', default='cmu_ref')
    parser.add_argument('--checkpoint')
    parser.add_argument('--out_name')

    # Network
    parser.add_argument('--vlad_cores', default=64, type=int)

    # Output
    parser.add_argument('--out_root',
                        default=os.path.join(fs_root(), 'data/meta_eval/lv'))
    parser.add_argument('--log_dir',
                        default=os.path.join(fs_root(),
                                             'cpu_logs/learnlarge/lv'))
Esempio n. 14
0
def plot_roc(l, d):
    mkdir(OUT_ROOT)
    top_n_root = os.path.join(fs_root(), 'top_n')

    checkpoints = \
        [
            # Trained on cold
            'triplet_5e-6_all_conditions_angle_1-4_cu_LRD0.9-5_noPCA_lam0.5_me0',

            # Trained on small oxford
            'triplet_5e-6_full-10-25_cu_LRD0.9-5_noPCA_lam0.5_me0',

            # Trained on large oxford
            'ha0_lotriplet_vl64'

            # Treined on Pittsburgh
            'pittsnetvlad',

            # Image-net
            'offtheshelf'

            # III with hard postitives
            'ha6_loevil_triplet_muTrue_vl64'
        ]

    queries = [
        'oxford_night',
        'freiburg_cloudy',
        'oxford_overcast',
        'freiburg_sunny',
        'oxford_snow',
        'pittsburgh_query',
        'oxford_sunny',

    ]

    titles = [
        'Oxford RobotCar, night',
        'Cold Freiburg, cloudy',
        'Oxford RobotCar, overcast',
        'Cold Freiburg, sunny',
        'Oxford RobotCar, snow',
        'Pittsburgh',
        'Oxford RobotCar, sunny',
    ]

    losses = [

        'I Cold Freiburg',
        'II Oxford (small)',
        'III Oxford (large)',
        'IV Pittsburgh',
        'V ImageNet (off-the-shelf)',

        '\\textit{III Oxford (large) + HP}',
    ]

    fill_styles = [
        'none',
        'none',
        'none',
        'full',
        'none',

        'full',

    ]

    markers = [
        '|',
        '.',
        'o',
        '*',
        '',

        'o',
    ]

    lines = [
        '--',

        '-',
        '-',
        '-.',
        ':',

        '-'
    ]

    colors = [
        '#1cad62',

        '#00BFFF',
        '#1E90FF',  # Triplet
        '#8c0054',
        '#000000',

        '#1934e6',  # Triplet HP
    ]

    rows = 2
    cols = 4

    f, axs = plt.subplots(rows, cols, constrained_layout=False)
    if rows == 1:
        axs = np.expand_dims(axs, 0)
    if cols == 1:
        axs = np.expand_dims(axs, 1)
    f.tight_layout()
    f.set_figheight(4.5)  # 8.875in textheight
    f.set_figwidth(8.5)  # 6.875in textwidth

    for i, query in enumerate(queries):
        print(query)

        print_gt = True

        if query.startswith('freiburg'):
            t = 1.5
        else:
            t = 15.0

        setting = 'l{}_dim{}'.format(l, d)

        min_y = 1000
        max_y = 0

        for j, (cp_name, m, line, color) in enumerate(
                zip(checkpoints, cycle(markers), cycle(lines), cycle(colors))):

            t_n_file = os.path.join(top_n_root, setting, '{}_{}.pickle'.format(query, cp_name))
            if not os.path.exists(t_n_file):
                print('Missing: {}'.format(t_n_file))
                continue
            print(t_n_file)

            [top_i, top_g_dists, top_f_dists, gt_i, gt_g_dist, ref_idx] = load_pickle(t_n_file)
            top_g_dists = np.array(top_g_dists)

            if print_gt:
                print_gt = False
                X = np.linspace(0, t, num=50)
                Y = [float(sum(gt_g_dist < x)) / float(len(gt_g_dist)) * 100 for x in X]
                ax = axs[i % rows, i // rows]
                width = 0.75

                ax.plot(X, Y, label='Upper bound', linewidth=width, c='#000000')
                ax.plot([0], [0], linewidth=0, label=' ')
                ax.plot([0], [0], linewidth=0, label='\\textbf{Training datasets:}')
                ax.title.set_text(titles[i])
                ax.set_xlim([0, t])
                ax.grid(True)

            if 'ha6_loevil_triplet_muTrue_vl64' in cp_name:
                ax = axs[i % rows, i // rows]
                ax.plot([0], [0], linewidth=0, label=' ')
                ax.plot([0], [0], linewidth=0, label='\\textbf{With our mining:}')

            t_1_d = np.array([td[0] for td in top_g_dists])
            X = np.linspace(0, t, num=50)

            Y = [float(sum(t_1_d < x)) / float(len(t_1_d)) * 100 for x in X]

            min_y = min(np.min(np.array(Y)), min_y)
            max_y = max(np.max(np.array(Y)), max_y)

            ax = axs[i % rows, i // rows]
            width = 0.75
            ax.plot(X, Y, label=losses[j], linestyle=line, marker=m, linewidth=width, markevery=j % rows + cols,
                    c=color, markersize=3, fillstyle=fill_styles[j])

        ax = axs[i % rows, i // rows]
        ax.set_xlim([0, t])
        ax.set_ylim([min_y, min(max_y + 5, 99)])

        # Major ticks every 20, minor ticks every 5
        major_ticks_x = np.arange(0, t, t / 3)
        minor_ticks_x = np.arange(0, t, t / 3 / 4)

        y_step = 20
        if 'night' in query:
            y_step /= 2

        major_ticks_y = np.arange(min_y, min(max_y + 5, 99), y_step)
        minor_ticks_y = np.arange(min_y, min(max_y + 5, 99), 5)

        ax.set_xticks(major_ticks_x)
        ax.set_xticks(minor_ticks_x, minor=True)
        ax.set_yticks(major_ticks_y)
        ax.set_yticks(minor_ticks_y, minor=True)

        # And a corresponding grid
        ax.grid(which='both')

        # Or if you want different settings for the grids:
        ax.grid(which='minor', alpha=0.2)
        ax.grid(which='major', alpha=0.5)

    out_name = os.path.join(OUT_ROOT, '{}_training_region_roc.pdf'.format(setting))

    axs[-1, -1].axis('off')

    for i in range(cols):
        axs[-1, i].set_xlabel('Distance threshold $d$ [m]')

    for i in range(rows):
        axs[i, 0].set_ylabel('Correctly localized [\%]')

    left = 0.0  # the left side of the subplots of the figure
    right = 1.0  # the right side of the subplots of the figure
    bottom = 0.0  # the bottom of the subplots of the figure
    top = 1.0  # the top of the subplots of the figure
    wspace = 0.2  # the amount of width reserved for space between subplots,
    # expressed as a fraction of the average axis width
    hspace = 0.25  # the amount of height reserved for space between subplots,
    # expressed as a fraction of the average axis height

    # space = 0.2
    plt.subplots_adjust(wspace=wspace, hspace=hspace, left=left, right=right, bottom=bottom, top=top)

    handles, labels = axs[0, 0].get_legend_handles_labels()

    axs[-1, -1].legend(handles, labels, loc='lower left', bbox_to_anchor=(-0.075, 0.0), ncol=1, frameon=True,
                       borderaxespad=0.0)

    plt.savefig(out_name, bbox_inches='tight', pad_inches=0)
    plt.savefig(out_name.replace('.pdf', '.png'), bbox_inches='tight', pad_inches=0)
import os

import cv2
import numpy as np

from learnlarge.util.helper import mkdir, fs_root
from learnlarge.util.io import load_txt

base_path = os.path.join(fs_root(), 'grad_cam')
out_path = os.path.join(fs_root(), 'video_frames')

mkdir(out_path)

checkpoints = os.listdir(os.path.join(fs_root(), 'checkpoints'))

cp_keys = list()

for cp in checkpoints:
    c_name = cp.split('/')[-2]
    c_name = ''.join(os.path.basename(c_name).split('.'))  # Removing '.'
    c_name += '_e{}'.format(cp[-1])
    cp_keys.append(c_name)

print(cp_keys)

names = {
    'ha6_loresidual_det_muTrue_vl64': 'III',
    'offtheshelf': 'V',
    'pittsnetvlad': 'IV',
    'quadruplet_5e-6_all_conditions_angle_1-4_cu_LRD09-5_noPCA_lam05_me0_e1': 'I',
    'triplet_5e-6_full-10-25_cu_LRD09-5_noPCA_lam05_me0_e3': 'II'
Esempio n. 16
0
import argparse
import os

from learnlarge.util.helper import fs_root
from learnlarge.util.sge import run_one_job

parser = argparse.ArgumentParser()
parser.add_argument('--i', default=0, type=int)
args = parser.parse_args()

script = os.path.join(fs_root(), 'code/learnlarge/evaluate/003_any_top-n.py')

queries = [
    'freiburg_cloudy', 'freiburg_sunny', 'oxford_night', 'oxford_overcast',
    'oxford_snow', 'oxford_sunny', 'pittsburgh_query'
]

references = {
    'oxford': 'oxford_ref',
    'freiburg': 'freiburg_ref',
    'pittsburgh': 'pittsburgh_ref'
}

pcas = {
    'oxford': 'oxford_pca',
    'freiburg': 'freiburg_pca',
    'pittsburgh': 'pittsburgh_ref'
}

csv_root = os.path.join(fs_root(), 'lists')
log_root = os.path.join(fs_root(), 'logs/top_n')
Esempio n. 17
0
    s1 = plt.scatter(x, y, c=query_color, s=2)
    s1.set_rasterized(True)
    plt.savefig(out_png_1, bbox_inches='tight', pad_inches=0)

    plt.clf()
    plt.figure(figsize=(3, 3))
    s2 = plt.scatter(Y[:, 0], Y[:, 1], c=query_color, s=2)
    s2.set_rasterized(True)
    plt.savefig(out_png_1c, bbox_inches='tight', pad_inches=0)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()

    # Image folder
    parser.add_argument('--img_root', default=os.path.join(fs_root()))
    parser.add_argument('--query_lv_pickle')
    parser.add_argument('--query_csv')

    parser.add_argument('--pca_lv_pickle')
    parser.add_argument('--pca_csv')

    parser.add_argument('--T', default=25, type=float)
    parser.add_argument('--N', default=25, type=int)
    parser.add_argument('--p', default=100, type=int)

    # Output
    parser.add_argument('--out_root', default=os.path.join(fs_root(), 'tsne'))
    parser.add_argument('--log_dir',
                        default=os.path.join(fs_root(), 'logs/tsne'))
Esempio n. 18
0
    ax3.set_ylabel('Northing [m]')

    plt.savefig(out_img_scatter)


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--train_ref_date',
                        default='2014-12-02-15-30-08')  # Overcast
    parser.add_argument('--test_ref_date',
                        default='2014-12-02-15-30-08')  # Overcast
    parser.add_argument(
        '--val_ref_date',
        default='2014-05-14-13-50-20')  # Sunny, alternate-route
    parser.add_argument('--in_root',
                        default=os.path.join(fs_root(),
                                             'data/learnlarge/merged'))
    parser.add_argument('--out_root',
                        default=os.path.join(fs_root(),
                                             'data/learnlarge/parametrized'))
    parser.add_argument('--log_root',
                        default=os.path.join(
                            fs_root(), 'cpu_logs/learnlarge/parametrized'))
    parser.add_argument('--date_list',
                        default=os.path.join(fs_root(),
                                             'data/learnlarge/ins_dates.txt'))
    parser.add_argument('--task_id', default=-1, type=int)
    args = parser.parse_args()

    flags_to_args(args)
def compile_table(l, d):
    mkdir(OUT_ROOT)
    top_n_root = os.path.join(fs_root(), 'top_n')

    queries = [
        'oxford_night',
        'oxford_overcast',
        'oxford_snow',
        'oxford_sunny',
        'freiburg_cloudy',
        'freiburg_sunny',
        'pittsburgh_query'
    ]

    checkpoints = [
        # Trained on cold
        'triplet_5e-6_all_conditions_angle_1-4_cu_LRD0.9-5_noPCA_lam0.5_me0',
        'quadruplet_5e-6_all_conditions_angle_1-4_cu_LRD0.9-5_noPCA_lam0.5_me0',

        'lazy_triplet_5e-6_all_conditions_angle_1-4_cu_LRD0.9-5_noPCA_lam0.5_me0',
        'lazy_quadruplet_5e-6_all_conditions_angle_1-4_cu_LRD0.9-5_noPCA_lam0.5_me0',

        'sum_5e-6_all_conditions_angle_1-4_cu_LRD0.9-5_noPCA_lam0.5_me0',
        'h_sum_5e-6_all_conditions_angle_1-4_cu_LRD0.9-5_noPCA_lam0.5_me0',

        # Trained on small oxford
        'triplet_5e-6_full-10-25_cu_LRD0.9-5_noPCA_lam0.5_me0',
        'quadruplet_5e-6_full-10-25_cu_LRD0.9-5_noPCA_lam0.5_me0',

        'lazy_triplet_5e-6_full-10-25_cu_LRD0.9-5_noPCA_lam0.5_me0',
        'lazy_quadruplet_5e-6_full-10-25_cu_LRD0.9-5_noPCA_lam0.5_me0',

        'h_sum_5e-6_full-10-25_cu_LRD0.9-5_noPCA_lam0.5_me0',

        # Trained on large oxford
        'ha0_lotriplet_vl64'
        'ha0_loquadruplet_vl64'
        'ha0_lolazy_triplet_vl64'
        'ha0_lolazy_quadruplet_vl64'
        'ha0_lodistance_triplet_vl64'
        'ha0_lohuber_distance_triplet_vl64'
        'ha6_loevil_triplet_muTrue_vl64'
        'ha6_loevil_quadruplet_muTrue_vl64'
        'ha6_loresidual_det_muTrue_vl64'
        'ha0_lotriplet_vl0'
        'ha0_loquadruplet_vl0'
        'ha6_loevil_quadruplet_muTrue_vl0'
        'ha6_loresidual_det_muTrue_vl0'
        'ha0_lotriplet_muTrue_vl64'
        'ha0_lotriplet_muFalse_vl64'
        'ha6_lotriplet_muTrue_vl64'
        'ha6_lotriplet_muFalse_vl64'

        # Treined on Pittsburgh
        'pittsnetvlad',

        # Image-net
        'offtheshelf'
    ]

    losses = [
        'GT',

        'Triplet \cite{arandjelovic2016netvlad}',
        'Quadruplet \cite{chen2017beyond}',

        'Lazy triplet \cite{angelina2018pointnetvlad}',
        'Lazy quadruplet \cite{angelina2018pointnetvlad}',

        'Triplet + distance \cite{thoma2020geometrically}',
        'Triplet + Huber dist.~\cite{thoma2020geometrically}',

        'Triplet \cite{arandjelovic2016netvlad}',
        'Quadruplet \cite{chen2017beyond}',

        'Lazy triplet \cite{angelina2018pointnetvlad}',
        'Lazy quadruplet \cite{angelina2018pointnetvlad}',

        'Triplet + Huber dist.~\cite{thoma2020geometrically}',

        'Triplet \cite{arandjelovic2016netvlad}',
        'Quadruplet \cite{chen2017beyond}',

        'Lazy triplet \cite{angelina2018pointnetvlad}',
        'Lazy quadruplet \cite{angelina2018pointnetvlad}',

        'Triplet + distance \cite{thoma2020geometrically}',
        'Triplet + Huber dist.~\cite{thoma2020geometrically}',

        '\\textit{Triplet + HP}',
        '\\textit{Quadruplet + HP}',

        '\\textit{Volume}',
        '$\\mathit{Volume}^*$',

        'Triplet \cite{arandjelovic2016netvlad}',

        'Off-the-shelf \cite{deng2009imagenet}'
    ]

    setting = 'l{}_dim{}'.format(l, d)
    print(setting)

    table = defaultdict(list)

    table['Loss'] = losses

    for_mean = defaultdict(list)
    for i, query in enumerate(queries):
        print(query)

        print_gt = True

        if query.startswith('freiburg'):
            T = [0.5, 1.0, 1.5]
        else:
            T = [5.0, 10.0, 15.0]

        for j, checkpoint in enumerate(checkpoints):

            cp_name = checkpoint

            t_n_file = os.path.join(top_n_root, setting, '{}_{}.pickle'.format(query, cp_name))
            if not os.path.exists(t_n_file):
                print('Missing: {}'.format(t_n_file))
                table[query].append('-')
                for_mean[query].append([-1, -1, -1])
                continue
            print(t_n_file)

            [top_i, top_g_dists, top_f_dists, gt_i, gt_g_dist, ref_idx] = load_pickle(t_n_file)
            top_g_dists = np.array(top_g_dists)

            if print_gt:
                print_gt = False
                Y = [float(sum(gt_g_dist < x)) / float(len(gt_g_dist)) * 100 for x in T]
                table[query].append(['{:.1f}'.format(y) for y in Y])
                for_mean[query].append(Y)

            t_1_d = np.array([td[0] for td in top_g_dists])

            Y = [float(sum(t_1_d < x)) / float(len(t_1_d)) * 100 for x in T]
            table[query].append(['{:.1f}'.format(y) for y in Y])
            for_mean[query].append(Y)

        # Highlight best values:
        b = np.argmax(np.array(for_mean[query])[1:], axis=0)
        b = b + 1
        for ii, ib in enumerate(b):
            table[query][ib][ii] = '\\textbf{' + table[query][ib][ii] + '}'

        for ii in range(len(losses)):
            table[query][ii] = '/'.join(table[query][ii])

    for i in range(len(losses)):
        all = np.array([for_mean[query][i] for query in queries if for_mean[query][i][0] > -1])
        Y = np.mean(all, axis=0)
        table['mean'].append(['{:.1f}'.format(y) for y in Y])
        for_mean['mean'].append(Y)

    # Highlight best values:
    b = np.argmax(np.array(for_mean['mean'])[1:], axis=0)
    b = b + 1
    for ii, ib in enumerate(b):
        table['mean'][ib][ii] = '\\textbf{' + table['mean'][ib][ii] + '}'

    for ii in range(len(losses)):
        table['mean'][ii] = '/'.join(table['mean'][ii])

    out_name = os.path.join(OUT_ROOT, 'accuracy_table.csv')
    save_csv(table, out_name)
Esempio n. 20
0
import os

import numpy as np
from distloss.cold_helper import get_recursive_file_list, parse_file_list

from learnlarge.util.helper import mkdir, fs_root
from learnlarge.util.io import load_csv
from learnlarge.util.io import save_csv

out_root = os.path.join(fs_root(), 'lists')
N_SAMPLES = 5000

mkdir(out_root)

# Oxford
place = 'oxford'


def img_path(info):
    date = info[0]
    folder = info[1]
    t = info[2]
    return os.path.join('datasets/oxford_512', '{}_stereo_centre_{:02d}'.format(date, int(folder)), '{}.png'.format(t))


# Preselected reference
preselected_ref = os.path.join(fs_root(), 'data/learnlarge/shuffled/train_ref_000.csv')
p_meta = load_csv(preselected_ref)
p_meta['path'] = [img_path((d, f, t)) for d, f, t in
                  zip(p_meta['date'], p_meta['folder'], p_meta['t'])]
idxs_to_keep = np.linspace(0, len(p_meta['path']), num=N_SAMPLES, endpoint=False, dtype=int)
Esempio n. 21
0
    plt.savefig(out_name, bbox_inches='tight', pad_inches=0)
    plt.savefig(out_name.replace('.pdf', '.png'), bbox_inches='tight', pad_inches=0)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()

    # Image folder
    parser.add_argument('--l',
                        default=0.0, type=float)
    parser.add_argument('--d',
                        default='256', type=int)
    parser.add_argument('--checkpoints',
                        default='residual')
    parser.add_argument('--log_dir', default=os.path.join(fs_root(),  'logs', 'roc'))
    parser.add_argument('--out_root', default=os.path.join(fs_root(),  'plots'))

    FLAGS = parser.parse_args()

    # Define each FLAG as a variable (generated automatically with util.flags_to_globals(FLAGS))
    flags_to_globals(FLAGS)

    LOG_DIR = FLAGS.log_dir
    OUT_ROOT = FLAGS.out_root

    if not os.path.exists(LOG_DIR):
        os.makedirs(LOG_DIR)

    if not os.path.exists(OUT_ROOT):
        os.makedirs(OUT_ROOT)
import os

import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm

from learnlarge.util.helper import fs_root
from learnlarge.util.io import load_pickle, save_pickle, save_csv

matplotlib.use('Agg')

part_idx = 0

lv_file = os.path.join(
    fs_root(),
    'data/learnlarge/lv/offtheshelf/train_ref/{}.pickle'.format(part_idx))
tuple_file = os.path.join(
    fs_root(),
    'data/learnlarge/tuples/10000/train_ref_{}_10_25.pickle'.format(part_idx))
out_file = os.path.join(
    fs_root(),
    'data/learnlarge/scale_factor/offtheshelf_train_ref_10000_{}_10_25.pickle'.
    format(part_idx))
out_file_meta = os.path.join(
    fs_root(),
    'data/learnlarge/scale_factor/offtheshelf_train_ref_10000_{}_10_25.csv'.
    format(part_idx))
out_file_hist = os.path.join(
    fs_root(),
    'data/learnlarge/scale_factor/offtheshelf_train_ref_10000_{}_10_25.png'.
    anchor_xy = np.array([train_xy[int(i), :] for i in anchor_indices['idx']])

    out_img = os.path.join(out_root, '{}_{}_{}_{}.png'.format(s, mode, r, epoch))
    plt.clf()
    plt.clf()
    f, (ax1) = plt.subplots(1, 1, sharey=False)
    f.set_figheight(50)
    f.set_figwidth(50)
    ax1.scatter(anchor_xy[:, 0], anchor_xy[:, 1], c=np.arange(len(anchor_xy)))
    plt.savefig(out_img)


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--shuffled_root', default=os.path.join(fs_root(), 'data/learnlarge/shuffled'))
    parser.add_argument('--cluster_root', default=os.path.join(fs_root(), 'data/learnlarge/clusters'))
    parser.add_argument('--out_root', default=os.path.join(fs_root(), 'data/learnlarge/anchors'))
    parser.add_argument('--r', type=int, default=1)
    parser.add_argument('--max_epoch', type=int, default=5)
    args = parser.parse_args()

    flags_to_args(args)

    cluster_root = args.cluster_root
    out_root = args.out_root
    r = args.r
    shuffled_root = args.shuffled_root
    max_epoch = args.max_epoch

    if not os.path.exists(out_root):
Esempio n. 24
0
import argparse
import os

from learnlarge.util.helper import fs_root
from learnlarge.util.sge import run_one_job

parser = argparse.ArgumentParser()
parser.add_argument('--i', default=0, type=int)
args = parser.parse_args()

SERIES = '1M'

train_script = os.path.join(fs_root(), 'code/learnlarge/train/train.py')
out_root = os.path.join(fs_root(), 'logs/learnlarge')

# Number of jobs per queue
middle = 4
long = 2

settings = list()

# SOTA Baselines
settings.append({
    'loss': 'triplet',
    'vlad_cores': 64,
    'hard_positives_per_tuple': 0
})  # Triplet
settings.append({
    'loss': 'quadruplet',
    'vlad_cores': 64,
    'hard_positives_per_tuple': 0
Esempio n. 25
0
import argparse
import os

from learnlarge.util.helper import fs_root
from learnlarge.util.sge import run_one_job

parser = argparse.ArgumentParser()
parser.add_argument('--i', default=0, type=int)
args = parser.parse_args()

script = os.path.join(fs_root(), 'code/learnlarge/evaluate/011_any_grad_cam.py')


checkpoints = [

    # Trained on cold
    'triplet_5e-6_all_conditions_angle_1-4_cu_LRD0.9-5_noPCA_lam0.5_me0',
    'quadruplet_5e-6_all_conditions_angle_1-4_cu_LRD0.9-5_noPCA_lam0.5_me0',

    'lazy_triplet_5e-6_all_conditions_angle_1-4_cu_LRD0.9-5_noPCA_lam0.5_me0',
    'lazy_quadruplet_5e-6_all_conditions_angle_1-4_cu_LRD0.9-5_noPCA_lam0.5_me0',

    'sum_5e-6_all_conditions_angle_1-4_cu_LRD0.9-5_noPCA_lam0.5_me0',
    'h_sum_5e-6_all_conditions_angle_1-4_cu_LRD0.9-5_noPCA_lam0.5_me0',

    # Trained on small oxford
    'triplet_5e-6_full-10-25_cu_LRD0.9-5_noPCA_lam0.5_me0',
    'quadruplet_5e-6_full-10-25_cu_LRD0.9-5_noPCA_lam0.5_me0',

    'lazy_triplet_5e-6_full-10-25_cu_LRD0.9-5_noPCA_lam0.5_me0',
    'lazy_quadruplet_5e-6_full-10-25_cu_LRD0.9-5_noPCA_lam0.5_me0',
Esempio n. 26
0
from learnlarge.util.helper import fs_root
from learnlarge.util.helper import mkdir
from learnlarge.util.io import load_csv, save_csv, load_pickle
from learnlarge.util.meta import get_xy

matplotlib.use("pgf")
matplotlib.rcParams.update({
    "pgf.texsystem": "pdflatex",
    'font.family': 'serif',
    'text.usetex': True,
    'pgf.rcfonts': False,
})

np.random.RandomState(seed=42)

out_root = os.path.join(fs_root(), 'list_plots')
list_out_root = os.path.join(fs_root(), 'lists')

mkdir(out_root)
mkdir(list_out_root)

rows = 1
cols = 4

f, axs = plt.subplots(rows, cols, constrained_layout=False)
if rows == 1:
    axs = np.expand_dims(axs, 0)
if cols == 1:
    axs = np.expand_dims(axs, 1)
f.tight_layout()
f.set_figheight(2.5)  # 8.875in textheight
Esempio n. 27
0
    out_file = os.path.join(out_root, '{}_{}_{}.csv'.format(s, mode, r))
    save_csv(out_meta, out_file)


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--train_ref_date',
                        default='2014-12-02-15-30-08')  # Overcast
    parser.add_argument('--test_ref_date',
                        default='2014-12-02-15-30-08')  # Overcast
    parser.add_argument(
        '--val_ref_date',
        default='2014-05-14-13-50-20')  # Sunny, alternate-route
    parser.add_argument('--in_root',
                        default=os.path.join(fs_root(),
                                             'data/learnlarge/shuffled'))
    parser.add_argument('--out_root',
                        default=os.path.join(fs_root(),
                                             'data/learnlarge/clusters'))
    parser.add_argument('--num_clusters',
                        type=dict,
                        default={
                            'train': 7000,
                            'test': 2000,
                            'val': 1000
                        })
    parser.add_argument('--r', type=int, default=5)
    args = parser.parse_args()

    flags_to_args(args)
Esempio n. 28
0
        xy = np.array([(e, n) for e, n in zip(meta['northing'], meta['easting'])], dtype=float)

        ref_ids = greedy(xy, 1)
        print(len(ref_ids))

        save_txt('\n'.join(['{}'.format(i) for i in ref_ids]), os.path.join(out_root))


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--train_ref_date', default='2014-12-02-15-30-08')  # Overcast
    parser.add_argument('--test_ref_date', default='2014-12-02-15-30-08')  # Overcast
    parser.add_argument('--val_ref_date', default='2014-05-14-13-50-20')  # Sunny, alternate-route
    parser.add_argument('--r', default='5', type=float)  # Sunny, alternate-route
    parser.add_argument('--in_root', default=os.path.join(fs_root(), 'data/learnlarge/clean_merged_parametrized'))
    parser.add_argument('--out_root', default=os.path.join(fs_root(), 'data/learnlarge/localization_references'))
    parser.add_argument('--img_root',
                        default=os.path.join(fs_root(), 'datasets/oxford_512'))
    args = parser.parse_args()

    flags_to_args(args)

    img_root = args.img_root
    in_root = args.in_root
    out_root = args.out_root
    r = args.r
    test_ref_date = args.test_ref_date
    train_ref_date = args.train_ref_date
    val_ref_date = args.val_ref_date