def parse_input(args):
    path = utils.create_directory_if_needed()
    for arg in args:
        if os.path.isfile(arg):
            target_filename = path + arg.split("/")[-1].strip()
            shutil.copy(arg, target_filename)
            execute_all(target_filename)
            if MELD_AVAILABLE:
                utils.call_meld(MELD_PATH, arg, target_filename)
        elif os.path.isdir(arg):
            current_dir = r'./'
            if arg == current_dir:
                print 'Please provide a directory that not includes the scripts.'
            else:
                target_dirname = path + 'src'
                if os.path.exists(target_dirname):
                    shutil.rmtree(target_dirname)
                shutil.copytree(arg, target_dirname)
                file_list = utils.get_file_list(target_dirname, (".c"))
                cpu_num = multiprocessing.cpu_count()
                pool = multiprocessing.Pool(processes=cpu_num, maxtasksperchild=2)
                result = []
                for file_name in file_list:
                    logging.info('-----------------------------')
                    logging.info('Current file is ' + file_name )
                    result.append(pool.apply_async(execute_all,(file_name,)))
                pool.close()
                pool.join()
                for res in result:
                    print res.get()
                print "Sub-process(es) done!"
                if MELD_AVAILABLE:
                    utils.call_meld(MELD_PATH, arg, target_dirname)
        else:
            logging.info('Input ' + arg + ' is not valid.')
Exemple #2
0
def main():
    """
    Note: The main function implementation
    :return:
    """

    current_work_dir = os.getcwd()
    print('current work dir:%s' % (current_work_dir))

    # print(os.path.realpath(__file__))
    root_dir = os.path.dirname(os.path.realpath(__file__))
    print('root dir:%s' % (root_dir))
    lib_dir = os.path.join(root_dir, 'lib')
    sys.path.append(lib_dir)
    # print(sys.path)

    lowest_requred_version = (3, 4)
    utils.check_py_version(lowest_requred_version)

    file_list = utils.get_file_list(root_dir)
    for file in file_list:
        file_path = os.path.join(root_dir, file)
        # print('file:%-20s size:%-5d byte' % (file, os.path.getsize(file_path)))       # file size is byte
        print('file:{:<20s} size:{:<5d} byte'.format(
            file, os.path.getsize(file_path)))
    print("[get_file_list] Test ok.")

    # ROOT_DIR = '/home/zhonglin/workspace/github/pythonTest'
    ROOT_DIR = '/data/workspace/github/pythonTest/'
    # ROOT_DIR = ''
    new_dir = utils.remove_the_slash_in_dir(ROOT_DIR)
    print('new_dir:{}'.format(new_dir))
    print("[remove_the_slash_in_dir] Test ok.")

    test_dir = "/data/python_test"
    utils.check_dir(test_dir)

    source_file_path = "/data/workspace/github/pythonTest/lib/utils.py"
    dest_file_path = "/data/python_test/"
    utils.shutil_copy_file(source_file_path, dest_file_path)
    if os.path.isdir(dest_file_path):
        print(os.listdir(dest_file_path))
    else:
        print(os.listdir(os.path.dirname(dest_file_path)))
    print("[shutil_copy_file] Test ok.")

    # Note:os.path.dirname() usually combine with os.path.abspath(__file__) together when using
    xml_file_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                'data')
    utils.check_dir(xml_file_dir)
    xml_file_path = os.path.join(xml_file_dir, 'result.xml')
    utils.contruct_xml_file(xml_file_path)
    print("[contruct_xml_file] Test ok.")

    anno_result = utils.parse_xml_file(xml_file_path)
    print(anno_result)
    print("[parse_xml_file] Test ok.")
Exemple #3
0
    def __init__(self, args):

        self.files = []
        self.root = args.source_path
        self.use_mutuals = args.mutuals
        save_path = os.path.join(self.root, 'results', args.method)
        save_path += '/mutuals/' if args.mutuals else '/all/'

        logging.info("Loading the eval data from {}!".format(self.root))

        scene_names = get_folder_list(
            os.path.join(self.root, 'correspondences'))

        for folder in scene_names:
            curr_scene_name = folder.split('/')[-1]
            if os.path.exists(
                    os.path.join(save_path, curr_scene_name,
                                 'traj.txt')) and not args.overwrite:
                logging.info(
                    'Trajectory for scene {} already exists and will not be recomputed.'
                    .format(curr_scene_name))
            else:
                if args.only_gt_overlaping:
                    gt_pairs, gt_traj = read_trajectory(
                        os.path.join(self.root, 'raw_data', curr_scene_name,
                                     "gt.log"))
                    for idx_1, idx_2, _ in gt_pairs:
                        self.files.append(
                            os.path.join(
                                folder, curr_scene_name + '_{}_{}.npz'.format(
                                    str(idx_1).zfill(3),
                                    str(idx_2).zfill(3))))

                else:
                    corr_files = get_file_list(folder)
                    for corr in corr_files:
                        self.files.append(corr)
Exemple #4
0
def extract_features_batch(model, source_path, target_path, dataset,
                           voxel_size, device):
    """
    Extracts the per point features in the FCGF feature space and saves them to the predefined path

    Args:
        model (FCGF model instance): model used to inferr the descriptors
        source_path (str): path to the raw files
        target path (str): where to save the extracted data
        dataset (float): name of the dataset
        voxel_size (float): voxel sized used to create the sparse tensor
        device (pytorch device): cuda or cpu
    """

    source_path = os.path.join(source_path, dataset, 'raw_data')
    target_path = os.path.join(target_path, dataset, 'features')

    ensure_dir(target_path)

    folders = get_folder_list(source_path)

    assert len(folders) > 0, 'Could not find {} folders under {}'.format(
        dataset, source_path)

    logging.info(folders)
    list_file = os.path.join(target_path, 'list.txt')
    f = open(list_file, 'w')
    model.eval()

    for fo in folders:
        scene_name = fo.split()
        files = get_file_list(fo, '.ply')
        fo_base = os.path.basename(fo)
        ensure_dir(os.path.join(target_path, fo_base))

        f.write('%s %d\n' % (fo_base, len(files)))
        for i, fi in enumerate(files):
            save_fn = '%s_%03d' % (fo_base, i)
            if os.path.exists(
                    os.path.join(target_path, fo_base, save_fn + '.npz')):
                print(
                    'Correspondence file already exits moving to the next example.'
                )
            else:
                # Extract features from a file
                pcd = o3d.io.read_point_cloud(fi)

                if i % 100 == 0:
                    logging.info(f'{i} / {len(files)}: {save_fn}')

                xyz_down, feature = extract_features(model,
                                                     xyz=np.array(pcd.points),
                                                     rgb=None,
                                                     normal=None,
                                                     voxel_size=voxel_size,
                                                     device=device,
                                                     skip_check=True)

                np.savez_compressed(os.path.join(target_path, fo_base,
                                                 save_fn),
                                    points=np.array(pcd.points),
                                    xyz=xyz_down,
                                    feature=feature.detach().cpu().numpy())

    f.close()
Exemple #5
0
def extract_precomputed_training_data(dataset, source_path, target_path,
                                      voxel_size, inlier_threshold):
    """
    Prepares the data for training the filtering networks with precomputed correspondences (without FCGF descriptor) 

    Args:
        dataset (str): name of the dataset
        source_path (str): path to the raw data
        target_path (str): path to where the extracted data will be saved
        voxel_size (float): voxel size that was used to compute the features
        inlier_threshold (float): threshold to determine if a correspondence is an inlier or outlier
    """
    source_path = os.path.join(source_path, dataset, 'raw_data')
    features_path = os.path.join(target_path, dataset, 'features')
    correspondence_path = os.path.join(target_path, dataset, 'correspondences')
    target_path = os.path.join(target_path, dataset, 'training_data')

    ensure_dir(target_path)

    # Check that the GT global transformation matrices are available and that the FCGF features are computed
    folders = get_folder_list(source_path)

    assert len(folders) > 0, 'Could not find {} folders under {}'.format(
        dataset, source_path)

    logging.info('Found {} scenes from the {} dataset!'.format(
        len(folders), dataset))

    for fo in folders:

        scene_name = fo.split()
        fo_base = os.path.basename(fo)
        ensure_dir(os.path.join(target_path, fo_base))

        pc_files = get_file_list(fo, '.ply')
        trans_files = get_file_list(fo, '.txt')
        assert len(pc_files) <= len(
            trans_files
        ), 'The number of point cloud files does not equal the number of GT trans parameters!'

        feat_files = get_file_list(os.path.join(features_path, fo_base),
                                   '.npz')
        assert len(pc_files) == len(
            feat_files
        ), 'Features for scene {} are either not computed or some are missing!'.format(
            fo_base)

        coor_files = get_file_list(os.path.join(correspondence_path, fo_base),
                                   '.npz')

        assert len(coor_files) == int(
            (len(feat_files) * (len(feat_files) - 1)) / 2
        ), 'Correspondence files for the scene {} are missing. First run the correspondence extraction!'.format(
            fo_base)

        # Loop over all fragment pairs and compute the training data
        for idx_1 in range(len(pc_files)):
            for idx_2 in range(idx_1 + 1, len(pc_files)):
                if os.path.exists(
                        os.path.join(
                            target_path, fo_base,
                            '{}_{}_{}.npz'.format(fo_base,
                                                  str(idx_1).zfill(3),
                                                  str(idx_2).zfill(3)))):
                    logging.info(
                        'Training file already exits moving to the next example.'
                    )

                data = np.load(
                    os.path.join(
                        correspondence_path, fo_base,
                        '{}_{}_{}.npz'.format(fo_base,
                                              str(idx_1).zfill(3),
                                              str(idx_2).zfill(3))))
                xs = data['xs']
                mutuals = data['mutuals']
                ratios = data['ratios']

                # Get the GT transformation parameters
                t_1 = np.genfromtxt(os.path.join(
                    source_path, fo_base,
                    'cloud_bin_{}.info.txt'.format(idx_1)),
                                    skip_header=1)
                t_2 = np.genfromtxt(os.path.join(
                    source_path, fo_base,
                    'cloud_bin_{}.info.txt'.format(idx_2)),
                                    skip_header=1)

                # Get the GT transformation parameters
                pc_1 = load_point_cloud(os.path.join(
                    source_path, fo_base, 'cloud_bin_{}.ply'.format(idx_1)),
                                        data_type='numpy')
                pc_2 = load_point_cloud(os.path.join(
                    source_path, fo_base, 'cloud_bin_{}.ply'.format(idx_2)),
                                        data_type='numpy')

                pc_1_tr = transform_point_cloud(pc_1, t_1[0:3, 0:3],
                                                t_1[0:3, 3].reshape(-1, 1))
                pc_2_tr = transform_point_cloud(pc_2, t_2[0:3, 0:3],
                                                t_2[0:3, 3].reshape(-1, 1))

                overlap_ratio = compute_overlap_ratio(pc_1_tr,
                                                      pc_2_tr,
                                                      np.eye(4),
                                                      method='FCGF',
                                                      voxel_size=voxel_size)

                # Estimate pairwise transformation parameters
                t_3 = np.matmul(np.linalg.inv(t_2), t_1)

                r_matrix = t_3[0:3, 0:3]
                t_vector = t_3[0:3, 3]

                # Transform the keypoints of the first point cloud
                pc_1_key_tr = transform_point_cloud(xs[:, 0:3], r_matrix,
                                                    t_vector.reshape(-1, 1))

                # Compute the residuals after the transformation
                y_s = np.sqrt(
                    np.sum(np.square(pc_1_key_tr - xs[:, 3:6]), axis=1))

                # Inlier percentage
                inlier_ratio = np.where(
                    y_s < inlier_threshold)[0].shape[0] / y_s.shape[0]
                inlier_ratio_mutuals = np.where(
                    y_s[mutuals.astype(bool).reshape(-1)] < inlier_threshold
                )[0].shape[0] / np.sum(mutuals)

                np.savez_compressed(os.path.join(
                    target_path, fo_base, 'cloud_{}_{}.npz'.format(
                        str(idx_1).zfill(3),
                        str(idx_2).zfill(3))),
                                    R=r_matrix,
                                    t=t_vector,
                                    x=xs,
                                    y=y_s,
                                    mutuals=mutuals,
                                    inlier_ratio=inlier_ratio,
                                    inlier_ratio_mutuals=inlier_ratio_mutuals,
                                    ratios=ratios,
                                    overlap=overlap_ratio)
Exemple #6
0
def run_correspondence_extraction(dataset, source_path, target_path,
                                  n_correspondences, idx):
    """
    Computes the correspondences in the FCGF space together with the mutuals and ratios side information

    Args:
        dataset (str): name of the dataset
        source_path (str): path to the raw data
        target_path (str): path to where the extracted data will be saved
        n_correspondences (int): number of points to sample
        idx (int): index of the scene, used for parallel processing

    """

    # Initialize all the paths
    features_path = os.path.join(target_path, dataset, 'features')
    target_path = os.path.join(target_path, dataset, 'correspondences')

    fo = get_folder_list(source_path)[idx]
    fo_base = os.path.basename(fo)
    files = get_file_list(os.path.join(features_path, fo_base), '.npz')

    ensure_dir(os.path.join(target_path, fo_base))

    # Loop over all fragment pairs and compute the training data
    for idx_1 in range(len(files)):
        for idx_2 in range(idx_1 + 1, len(files)):
            if os.path.exists(
                    os.path.join(
                        target_path, fo_base,
                        '{}_{}_{}.npz'.format(fo_base,
                                              str(idx_1).zfill(3),
                                              str(idx_2).zfill(3)))):
                logging.info(
                    'Correspondence file already exits moving to the next example.'
                )

            else:
                pc_1_data = np.load(
                    os.path.join(
                        features_path, fo_base,
                        fo_base + '_{}.npz'.format(str(idx_1).zfill(3))))
                pc_1_features = pc_1_data['feature']
                pc_1_keypoints = pc_1_data['xyz']

                pc_2_data = np.load(
                    os.path.join(
                        features_path, fo_base,
                        fo_base + '_{}.npz'.format(str(idx_2).zfill(3))))
                pc_2_features = pc_2_data['feature']
                pc_2_keypoints = pc_2_data['xyz']

                # Sample with replacement if less then n_correspondences points are in the point cloud
                if pc_1_features.shape[0] >= n_correspondences:
                    inds_1 = np.random.choice(pc_1_features.shape[0],
                                              n_correspondences,
                                              replace=False)
                else:
                    inds_1 = np.random.choice(pc_1_features.shape[0],
                                              n_correspondences,
                                              replace=True)

                if pc_2_features.shape[0] >= n_correspondences:
                    inds_2 = np.random.choice(pc_2_features.shape[0],
                                              n_correspondences,
                                              replace=False)
                else:
                    inds_2 = np.random.choice(pc_2_features.shape[0],
                                              n_correspondences,
                                              replace=True)

                pc_1_features = pc_1_features[inds_1, :]
                pc_2_features = pc_2_features[inds_2, :]
                pc_1_key = pc_1_keypoints[inds_1, :]
                pc_2_key = pc_2_keypoints[inds_2, :]

                # find the correspondence using nearest neighbor search in the feature space (two way)
                nn_search = NearestNeighbors(n_neighbors=1,
                                             metric='minkowski',
                                             p=2)
                nn_search.fit(pc_2_features)
                nn_dists, nn_indices = nn_search.kneighbors(
                    X=pc_1_features, n_neighbors=2, return_distance=True)

                nn_search.fit(pc_1_features)
                nn_dists_1, nn_indices_1 = nn_search.kneighbors(
                    X=pc_2_features, n_neighbors=2, return_distance=True)

                ol_nn_ids = np.where(
                    (nn_indices[nn_indices_1[:, 0], 0] -
                     np.arange(pc_1_features.shape[0])) == 0)[0]

                # Initialize mutuals and ratios
                mutuals = np.zeros((n_correspondences, 1))
                mutuals[ol_nn_ids] = 1
                ratios = nn_dists[:, 0] / nn_dists[:, 1]

                # Concatenate the correspondence coordinates
                xs = np.concatenate(
                    (pc_1_key[nn_indices_1[:, 0], :], pc_2_key), axis=1)

                np.savez_compressed(os.path.join(
                    target_path, fo_base,
                    '{}_{}_{}.npz'.format(fo_base,
                                          str(idx_1).zfill(3),
                                          str(idx_2).zfill(3))),
                                    x=xs,
                                    mutuals=mutuals,
                                    ratios=ratios)