コード例 #1
0
def run_seq(cfg, scene, seq):
    print("    Start {}".format(seq))

    seq_folder = osp.join(cfg.dataset_root, scene, seq)
    color_names = uio.list_files(seq_folder, '*.color.png')
    color_paths = [osp.join(seq_folder, cf) for cf in color_names]
    depth_names = uio.list_files(seq_folder, '*.depth.png')
    depth_paths = [osp.join(seq_folder, df) for df in depth_names]
    # depth_paths = [osp.join(seq_folder, cf[:-10] + '.depth.png') for cf in depth_names]

    # n_frames = len(color_paths)
    n_frames = len(depth_paths)
    n_frags = int(math.ceil(float(n_frames) / cfg.frames_per_frag))

    out_folder = osp.join(cfg.out_root, scene, seq)
    uio.may_create_folder(out_folder)

    intrinsic_path = osp.join(cfg.dataset_root, scene, 'camera-intrinsics.txt')

    if cfg.threads > 1:
        from joblib import Parallel, delayed
        import multiprocessing

        Parallel(n_jobs=cfg.threads)(delayed(process_single_fragment)
                                     (cfg, color_paths, depth_paths, frag_id,
                                      n_frags, intrinsic_path, out_folder)
                                     for frag_id in range(n_frags))

    else:
        for frag_id in range(n_frags):
            process_single_fragment(cfg, color_paths, depth_paths, frag_id,
                                    n_frags, intrinsic_path, out_folder)

    print("    Finished {}".format(seq))
コード例 #2
0
def main2(args):
    print "Running..."

    # get extracted wikipedia file pathnames
    subdirs = io.list_directories(config.WIKIPEDIA_EXTRACTED_DIR)
    if args.letters:
        subdirs = [p for p in subdirs if p[-2] in config.WIKIPEDIA_SUB_DIR_PREFIXES]
    pathnames = []
    for sb in subdirs:
        pathnames.extend(io.list_files(sb))
    pathnames.sort()
    
    # create thread-safe queue
    queue = parallel.create_queue(pathnames)

    # create workers
    workers = []
    for i in xrange(args.threads):
        logger = log.create_logger('LOGGER %d' % i, 'log_%d.log' % i)
        sent_tokenizer = parser.get_sentence_tokenizer()
        if args.verbose:
            logger.setLevel(logging.DEBUG)
        worker = SentenceIndexWorker(queue, sent_tokenizer, logger)
        workers.append(worker)

    # begin
    for worker in workers:
        worker.start()

    # block until all files have been processed
    queue.join()

    print "Done!"
コード例 #3
0
def run_seq(cfg, scene, seq):
    print('    Start {}'.format(seq))

    out_folder = osp.join(cfg.out_root, scene, seq)
    if osp.exists(out_folder):
        print('    Skip...')
        return
    uio.make_clean_folder(out_folder)

    temp_folder = osp.join(cfg.temp_root, scene, seq)
    uio.make_clean_folder(temp_folder)

    print('    Start downsampling and computing FPFH')
    pcd_names = uio.list_files(osp.join(cfg.dataset_root, scene, seq),
                               'cloud_bin_*.ply',
                               alphanum_sort=True)
    if cfg.threads > 1:
        from joblib import Parallel, delayed
        import multiprocessing

        Parallel(n_jobs=cfg.threads)(
            delayed(downsample_and_compute_fpfh)(cfg, scene, seq, pcd_name)
            for pcd_name in pcd_names)
    else:
        for pcd_name in pcd_names:
            downsample_and_compute_fpfh(cfg, scene, seq, pcd_name)

    print('    Start matching FPFH')
    overlaps = uio.list_files(osp.join(cfg.overlap_root, scene, seq),
                              'cloud_bin_*.npy',
                              alphanum_sort=True)
    overlap_pcds = [npy_file[:-4].split('-') for npy_file in overlaps]
    if cfg.threads > 1:
        from joblib import Parallel, delayed
        import multiprocessing

        Parallel(n_jobs=cfg.threads)(
            delayed(match_fpfh)(cfg, scene, seq, pcd_pair[0], pcd_pair[1])
            for pcd_pair in overlap_pcds)
    else:
        for pcd_pair in overlap_pcds:
            match_fpfh(cfg, scene, seq, pcd_pair[0], pcd_pair[1])

    print('    Start collating kpts')
    collate_kpts(cfg, scene, seq)

    print("    Finished {}".format(seq))
コード例 #4
0
def run_seq(cfg, scene, seq):
    print("    Start {}".format(seq))

    pcd_names = uio.list_files(osp.join(cfg.dataset_root, scene, seq),
                               '*.ply',
                               alphanum_sort=True)
    if cfg.threads > 1:
        from joblib import Parallel, delayed
        import multiprocessing

        Parallel(n_jobs=cfg.threads)(
            delayed(compute_radius)(cfg, scene, seq, pcd_name)
            for pcd_name in pcd_names)
    else:
        for pcd_name in pcd_names:
            compute_radius(cfg, scene, seq, pcd_name)

    print("    Finished {}".format(seq))
コード例 #5
0
def list_pcd_pairs(root_dir, excluded_scenes=None):
    res = list()
    for scene in uio.list_folders(root_dir, alphanum_sort=False):
        if excluded_scenes is not None and scene in excluded_scenes:
            continue
        for seq in uio.list_folders(osp.join(root_dir, scene),
                                    alphanum_sort=True):
            seq_folder = osp.join(root_dir, scene, seq)
            for npy_file in uio.list_files(seq_folder,
                                           'cloud_bin_*.npy',
                                           alphanum_sort=True):
                cloud_name_i, cloud_name_j = npy_file[:-4].split('-')
                res.append(
                    OverlapMeta(scene=scene,
                                seq=seq,
                                cloud_name_i=cloud_name_i,
                                cloud_name_j=cloud_name_j,
                                full_path=osp.join(seq_folder, npy_file)))
    return res
コード例 #6
0
def list_pcds(root_dir, excluded_scenes=None):
    res = list()
    for scene in uio.list_folders(root_dir, alphanum_sort=False):
        if excluded_scenes is not None and scene in excluded_scenes:
            continue
        for seq in uio.list_folders(osp.join(root_dir, scene),
                                    alphanum_sort=True):
            seq_folder = osp.join(root_dir, scene, seq)
            pcloud_names = uio.list_files(seq_folder,
                                          '*.ply',
                                          alphanum_sort=True)
            metas = [
                PCloudMeta(
                    scene=scene,
                    seq=seq,
                    name=pn[:-4],
                    full_path=osp.join(seq_folder, pn),
                ) for pn in pcloud_names
            ]
            res.extend(metas)
    return res
コード例 #7
0
def downsample_pcds(in_root, out_root, max_points):
    import open3d as o3d
    o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Error)

    uio.may_create_folder(out_root)

    pcd_names = uio.list_files(in_root, 'cloud_bin_*.ply', alphanum_sort=True)
    pcd_stems = list()
    for pname in pcd_names:
        pstem = pname[:-4]
        pcd_path = osp.join(in_root, pname)
        pose_path = osp.join(in_root, pstem + '.pose.npy')
        pcd = o3d.io.read_point_cloud(pcd_path)
        pose = np.load(pose_path)
        pcd.transform(pose)

        down_pcd = Cloud.downsample_from(pcd, max_points)
        down_pcd.save(osp.join(out_root, pstem + '.npz'))

        pcd_stems.append(pstem)

    return pcd_stems
コード例 #8
0
def run_scene_matching(scene_name,
                       seq_name,
                       desc_type,
                       pcloud_root,
                       desc_root,
                       out_root,
                       inlier_thresh=0.1,
                       n_threads=1):
    out_folder = osp.join(out_root, desc_type)
    uio.may_create_folder(out_folder)

    out_filename = '{}-{}-{:.2f}'.format(scene_name, seq_name, inlier_thresh)
    if Path(osp.join(out_folder, out_filename + '.pkl')).is_file():
        print('[*] {} already exists. Skip computation.'.format(out_filename))
        return osp.join(out_folder, out_filename)

    fragment_names = uio.list_files(osp.join(pcloud_root, scene_name,
                                             seq_name),
                                    '*.ply',
                                    alphanum_sort=True)
    fragment_names = [fn[:-4] for fn in fragment_names]
    n_fragments = len(fragment_names)

    register_results = [
        RegisterResult(
            frag1_name=fragment_names[i],
            frag2_name=fragment_names[j],
            num_inliers=None,
            inlier_ratio=None,
            gt_flag=None,
        ) for i in range(n_fragments) for j in range(i + 1, n_fragments)
    ]
    poses = read_log(osp.join(pcloud_root, scene_name, seq_name, 'gt.log'))

    if n_threads > 1:
        from joblib import Parallel, delayed
        import multiprocessing

        results = Parallel(n_jobs=n_threads)(delayed(register_fragment_pair)(
            scene_name, seq_name, k.frag1_name, k.frag2_name, desc_type, poses,
            pcloud_root, desc_root, inlier_thresh) for k in register_results)
        for k, res in enumerate(results):
            register_results[k].num_inliers = res[0]
            register_results[k].inlier_ratio = res[1]
            register_results[k].gt_flag = res[2]
    else:
        for k in range(len(register_results)):
            num_inliers, inlier_ratio, gt_flag = register_fragment_pair(
                scene_name, seq_name, register_results[k].frag1_name,
                register_results[k].frag2_name, desc_type, poses, pcloud_root,
                desc_root, inlier_thresh)
            register_results[k].num_inliers = num_inliers
            register_results[k].inlier_ratio = inlier_ratio
            register_results[k].gt_flag = gt_flag

    with open(osp.join(out_folder, out_filename + '.pkl'), 'wb') as fh:
        to_save = {
            'register_results': register_results,
            'scene_name': scene_name,
            'seq_name': seq_name,
            'desc_type': desc_type,
            'inlier_thresh': inlier_thresh,
            'n_threads': n_threads,
        }
        pickle.dump(to_save, fh, protocol=pickle.HIGHEST_PROTOCOL)
    with open(osp.join(out_folder, out_filename + '.txt'), 'w') as fh:
        for k in register_results:
            fh.write('{} {} {} {:.8f} {}\n'.format(k.frag1_name, k.frag2_name,
                                                   k.num_inliers,
                                                   k.inlier_ratio, k.gt_flag))

    return osp.join(out_folder, out_filename)
コード例 #9
0
def collate_kpts(cfg, scene, seq):
    import open3d as o3d
    o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Error)

    kpt_pair_folder = osp.join(cfg.out_root, scene, seq)

    pcd_kpt_indices = defaultdict(list)
    for npy_file in uio.list_files(kpt_pair_folder, '*.npy', True):
        pcd_stem_i, pcd_stem_j = npy_file[:-4].split('-')
        kpt_pairs = np.load(osp.join(kpt_pair_folder, npy_file))
        pcd_kpt_indices[pcd_stem_i].extend(kpt_pairs[:, 0].tolist())
        pcd_kpt_indices[pcd_stem_j].extend(kpt_pairs[:, 1].tolist())
    if len(pcd_kpt_indices) < 1:
        return

    scene_points = list()
    scene_normals = list()
    labels = list()
    for pcd_stem, kpt_indices in pcd_kpt_indices.items():
        pcd = o3d.io.read_point_cloud(osp.join(cfg.dataset_root, scene, seq, pcd_stem + '.ply'))
        pose = np.load(osp.join(cfg.dataset_root, scene, seq, pcd_stem + '.pose.npy'))
        pcd.transform(pose) 
        pcd.normalize_normals()

        uni_kpt_indices = list(set(kpt_indices))
        scene_points.append(np.asarray(pcd.points)[uni_kpt_indices, :])
        scene_normals.append(np.asarray(pcd.normals)[uni_kpt_indices, :])
        labels.extend(list(zip([pcd_stem] * len(uni_kpt_indices), uni_kpt_indices)))
    scene_points = np.concatenate(scene_points, axis=0)
    scene_normals = np.concatenate(scene_normals, axis=0)

    print('    {} scene points/normals'.format(len(scene_points)))

    kdtree = o3d.geometry.KDTreeFlann(scene_points.T)
    num_points = len(scene_points)
    flags = [False] * num_points
    identities = list()
    for i in range(num_points):
        if flags[i]: continue

        [_, nn_indices,
         nn_dists2] = kdtree.search_radius_vector_3d(scene_points[i, :], cfg.dist_thresh)
        nn_indices = [j for j in nn_indices if not flags[j]] 

        nn_normal = [scene_normals[j] for j in nn_indices]
        if len(nn_normal) < 2: continue
        nn_normal = np.mean(np.asarray(nn_normal), axis=0)
        nn_normal /= np.linalg.norm(nn_normal)

        nn_pcd_indices = defaultdict(list)
        for j in nn_indices:
            if np.arccos(np.clip(np.dot(scene_normals[j], nn_normal), -1,
                                 1)) > cfg.angle_thresh:
                continue
            nn_pcd_indices[labels[j][0]].append(labels[j][1])
        if len(nn_pcd_indices) < 2: continue

        identities.append({k: random.choice(v) for k, v in nn_pcd_indices.items()})

        for j in nn_indices:
            flags[j] = True
        flags[i] = True

    with open(osp.join(cfg.out_root, scene, '{}.kpts.pkl'.format(seq)), 'wb') as fh:
        to_save = {'identities': identities}
        pickle.dump(to_save, fh, protocol=pickle.HIGHEST_PROTOCOL)

    print('    {} identities'.format(len(identities)))