def eval_packing(dep_list, cfg):
    # Update dependency
    dep_str = None
    if len(dep_list) > 0:
        dep_str = ','.join(dep_list)

    print(' -- Packing results')
    cmd_list = [create_sh_cmd('pack_res.py', cfg)]
    create_and_queue_jobs(cmd_list, cfg, dep_str)
示例#2
0
def eval_viz_stereo(dep_list, cfg, debug=False):
    # Do this one for one run
    if cfg.run > 0:
        return

    # Update dependency
    dep_str = None
    if len(dep_list) > 0:
        dep_str = ','.join(dep_list)

    # The checks on existing files run inside, as there are many of them
    if debug:
        print(' -- Generating stereo visualizations (debug)')
        cmd_list = [create_sh_cmd('viz_stereo_debug.py', cfg)]
    else:
        print(' -- Generating stereo visualizations')
        cmd_list = [create_sh_cmd('viz_stereo.py', cfg)]
    create_and_queue_jobs(cmd_list, cfg, dep_str)
def eval_viz_colmap(dep_list, cfg):
    # Do this one for one run
    if cfg.run > 0:
        return

    # Update dependency
    dep_str = None
    if len(dep_list) > 0:
        dep_str = ','.join(dep_list)

    # The checks on existing files run inside, as there are many of them
    print(' -- Generating multi-view visualizations')
    cmd_list = [create_sh_cmd('viz_colmap.py', cfg)]
    create_and_queue_jobs(cmd_list, cfg, dep_str)
示例#4
0
def eval_multiview(dep_list, cfg, bag_size_list, bag_size_num, job_dict):
    colmap_jobs = []
    job_key = create_job_key('multiview', cfg)
    # Update dependency
    dep_str = None
    if len(dep_list) > 0:
        dep_str = ','.join(dep_list)
    # COLMAP evaluation
    #
    # TODO; For colmap, should we queue twice?
    cfg_bag = deepcopy(cfg)
    cmd_list = []
    cfg_list = []
    print(' -- The multiview task  will work on these bags {}'.format([
        '{} (x{})'.format(b, n) for b, n in zip(bag_size_list, bag_size_num)
    ]))
    for _bag_size, _num_in_bag in zip(bag_size_list, bag_size_num):
        for _bag_id in range(_num_in_bag):
            cfg_bag.bag_size = _bag_size
            cfg_bag.bag_id = _bag_id

            # Check if colmap evaluation is complete -- queue
            if not is_colmap_complete(cfg_bag):
                # Check if other program is doing the same job
                if job_key in job_dict:
                    print(' -- {} is already running on {}'.format(
                        'multiview', job_dict[job_key]))
                    return job_dict[job_key].split('-')

                cmd_list += [create_sh_cmd('eval_colmap.py', cfg_bag)]
                cfg_list += [deepcopy(cfg_bag)]
            else:
                print(' -- Multiview: bag size {} bag id {} results'
                      ' already exists'.format(_bag_size, _bag_id))
            # Check cfg_list to retrieve the estimated runtime. Queue
            # cmd_list and reset both lists if we are expected to have
            # less than 30 min of wall time after this job.
            t_split = [float(t) for t in cfg.cc_time.split(':')]
            if estimate_runtime(cfg_list) >= t_split[0] + \
                    t_split[1] / 60 - 0.5:
                colmap_jobs += [create_and_queue_jobs(cmd_list, cfg, dep_str)]
                cmd_list = []
                cfg_list = []
    # Queue any leftover jobs for this bag
    if len(cmd_list) > 0:
        colmap_jobs += [create_and_queue_jobs(cmd_list, cfg, dep_str)]
    # save colmap jobs list under its job key
    if len(colmap_jobs) != 0:
        job_dict[job_key] = '-'.join(colmap_jobs)
    return colmap_jobs
def create_eval_jobs(dep_list, mode, cfg, job_dict):
    # Check if job is complete
    if is_job_complete(mode, cfg):
        print(' -- File {} already exists'.format(mode))
        return []

    # Check if other program is doing the same job
    job_key = create_job_key(mode, cfg)
    if job_key in job_dict:
        print(' -- {} is already running on {}'.format(mode,
                                                       job_dict[job_key]))
        return [job_dict[job_key]]
    else:
        # Update dependency
        dep_str = None
        if len(dep_list) > 0:
            dep_str = ','.join(dep_list)
        # Check if matches are computed -- queue (dependent on previous
        # job)
        print(' -- Computing {}'.format(mode))
        cmd_list = [create_sh_cmd('compute_{}.py'.format(mode), cfg)]
        job = create_and_queue_jobs(cmd_list, cfg, dep_str)
        job_dict[job_key] = job
        return [job]