예제 #1
0
def chunk_and_align_all(d):
    print d['match_file']
    rss1 = d['rss1']
    rss2 = d['rss2']
    pcd_dir1 = pjoin(dset_dir_from_rss(rss1), 'pcd_downsampled_normals')
    pcd_dir2 = pjoin(dset_dir_from_rss(rss2), 'pcd_downsampled_normals')

    # Read and save initial transform files
    enu1 = get_enu0(d['gps_file1'], d['gps_file1'])
    enu2 = get_enu0(d['gps_file2'], d['gps_file1'])

    h5f = h5py.File(d['match_file'], 'r')
    nn_matches = h5f['matches'][...]
    nn_dict = dict(zip(nn_matches[:, 1], nn_matches[:, 0]))
    h5f.close()

    assert EXPORT_START == 0
    start1 = nn_matches[0, 1] / EXPORT_STEP

    args_all = list()
    chunk_num = 0
    for k in range(start1, nn_matches[-1, 1] / EXPORT_STEP - CHUNK_SIZE, REALIGN_EVERY):
        #def chunk_and_align(start1, start2, enu1, enu2, rss1, rss2, pcd_dir1, pcd_dir2, chunk_num):
        try:
            k2 = get_closest_key_value(k * EXPORT_STEP, nn_dict, max_shift=10)
        except:
            # TODO Think this sometimes occurs near end of alignments
            break
        args_all.append((k, k2 / EXPORT_STEP, enu1, enu2, rss1, rss2, pcd_dir1, pcd_dir2, chunk_num))
        chunk_num += 1

    Parallel(n_jobs=NUM_CPUS)(delayed(chunk_and_align)(*args) for args in args_all)
    # For debugging
    '''
예제 #2
0
from os.path import join as pjoin
from pipeline_utils import dset_dir_from_rss, print_and_call
from pipeline_config import CAMERA, NUM_CPUS
from graphslam_config import MATCH_JSON_DATA, GRAPHSLAM_VIDEOS_DIR, GRAPHSLAM_MAPS_DIR,\
        GRAPHSLAM_EVAL_DIR
from joblib import Parallel, delayed

def disp(s):
    print s

if __name__ == '__main__':
    cmds = list()
    for match in MATCH_JSON_DATA:
        video_file = pjoin(dset_dir_from_rss(match['rss1']), '_'.join(match['rss1'][1:]) + '%d.avi' % CAMERA)
        fstem1 = '--'.join(match['rss1'])
        fstem2 = '--'.join(match['rss2'])
        outvideo = pjoin(GRAPHSLAM_VIDEOS_DIR, '+'.join((fstem1, fstem2)) + '.avi')
        align_data = pjoin(GRAPHSLAM_MAPS_DIR, '+'.join((fstem1, fstem2)) + '.npz')
        labels1 = pjoin(GRAPHSLAM_EVAL_DIR, fstem1 + '.h5')
        labels2 = pjoin(GRAPHSLAM_EVAL_DIR, fstem2 + '.h5')
        cmd = 'python project_map_on_video.py {vid} {align_data} {labels1} {labels2} {out} --cam {cam}'.format(
                vid=video_file, align_data=align_data, out=outvideo,
                labels1=labels1, labels2=labels2, cam=CAMERA)
        cmds.append(cmd)
    #print cmds
    Parallel(n_jobs=NUM_CPUS-1)(delayed(print_and_call)(cmd) for cmd in cmds)
예제 #3
0
from os.path import join as pjoin
from pipeline_utils import dset_dir_from_rss, print_and_call
from pipeline_config import CAMERA, NUM_CPUS
from graphslam_config import MATCH_JSON_DATA, GRAPHSLAM_VIDEOS_DIR, GRAPHSLAM_MAPS_DIR,\
        GRAPHSLAM_EVAL_DIR
from joblib import Parallel, delayed


def disp(s):
    print s


if __name__ == '__main__':
    cmds = list()
    for match in MATCH_JSON_DATA:
        video_file = pjoin(dset_dir_from_rss(match['rss1']),
                           '_'.join(match['rss1'][1:]) + '%d.avi' % CAMERA)
        fstem1 = '--'.join(match['rss1'])
        fstem2 = '--'.join(match['rss2'])
        outvideo = pjoin(GRAPHSLAM_VIDEOS_DIR, '+'.join(
            (fstem1, fstem2)) + '.avi')
        align_data = pjoin(GRAPHSLAM_MAPS_DIR, '+'.join(
            (fstem1, fstem2)) + '.npz')
        labels1 = pjoin(GRAPHSLAM_EVAL_DIR, fstem1 + '.h5')
        labels2 = pjoin(GRAPHSLAM_EVAL_DIR, fstem2 + '.h5')
        cmd = 'python project_map_on_video.py {vid} {align_data} {labels1} {labels2} {out} --cam {cam}'.format(
            vid=video_file,
            align_data=align_data,
            out=outvideo,
            labels1=labels1,
            labels2=labels2,