def import_kitti_pointclouds_cli():
    parser = argparse.ArgumentParser()
    parser.add_argument('--root',
                        help='Location of the registration result database',
                        type=str)
    parser.add_argument(
        '--pointcloud_root',
        help='Location of the point clouds designated by the pairs',
        type=str)
    parser.add_argument(
        '--pointcloud_dataset_type',
        help='The type of pointcloud dataset we import pointclouds from',
        type=str,
        default='ethz')
    parser.add_argument('-j', '--n-cores', default=8, type=int)
    parser.add_argument('--sequence-name', type=str)
    args = parser.parse_args()

    db = RegistrationPairDatabase(args.root)
    pointcloud_root = pathlib.Path(args.pointcloud_root)
    pointcloud_dataset = create_registration_dataset(
        args.pointcloud_dataset_type, args.pointcloud_root)

    data = [(db, pointcloud_dataset, args.sequence_name, i)
            for i in range(pointcloud_dataset.n_clouds() - 1)]
    parallel_starmap_progressbar(import_one_kitti_pointcloud_pair,
                                 data,
                                 n_cores=args.n_cores)
Exemplo n.º 2
0
def predict_covariances(pairs, descriptor_algo, model):
    descriptors = parallel_starmap_progressbar(predict_covariance,
                                               [(pair, descriptor_algo)
                                                for pair in pairs])

    descriptors_np = np.empty((len(pairs), len(descriptor_algo.labels())))
    for i, descriptor in enumerate(descriptors):
        descriptors_np[i] = descriptor

    predictions = model.predict(descriptors_np)

    return predictions
def prediction_cli():
    parser = argparse.ArgumentParser()
    parser.add_argument('dataset', help='Path to the dataset used to train the model', type=str)
    parser.add_argument('model', help='Path to the trained model', type=str)
    parser.add_argument('output', help='Where to output the vtk files', type=str)
    parser.add_argument('--registration-database', help='Fetch the pointclouds to give some context to the generated covariances.')
    parser.add_argument('--filter', help='Locations to filter during the query', type=str, default='')
    args = parser.parse_args()

    print('Loading dataset...')
    with open(args.dataset) as f:
        dataset = json.load(f)
    print('Done')

    filtering_re = re.compile(args.filter)


    model = model_from_file(args.model, 'cello')

    eprint(model)

    xs = np.array(dataset['data']['xs'])

    pairs = dataset['data']['pairs']
    selection = np.ones(len(pairs), dtype=np.bool)
    for i, pair in enumerate(pairs):
        if filtering_re.match(pair['dataset']) and args.filter:
            selection[i] = 0

    eprint(len(selection))
    eprint(selection.sum())

    xs = xs[selection]

    ys_predicted = model.predict(xs)
    np.save(args.output + '/predictions.npy', ys_predicted)

    db = RegistrationPairDatabase(args.registration_database)

    parallel_starmap_progressbar(generate_one_prediction, [(i, ys_predicted[i], dataset['data']['pairs'][i], db, args.output) for i in range(len(ys_predicted))])
Exemplo n.º 4
0
def make_sampled_trajectory(pairs, clustering_algo, n_cores=8):
    trajectory = np.empty((len(pairs) + 1, 4, 4))
    trajectory[0] = np.identity(4)

    clusterings = parallel_starmap_progressbar(compute_clustering,
                                               [(pair, clustering_algo)
                                                for pair in pairs],
                                               n_cores=n_cores)

    for i in range(len(pairs)):
        pair = pairs[i]
        results = pair.registration_results()

        t = random.choice(results[clusterings[i]])
        trajectory[i + 1] = trajectory[i] @ t

    return trajectory
def filter_failing_registrations(registration_pairs,
                                 clustering,
                                 filter_threshold=0.3,
                                 n_cores=8):
    distribution_algo = SamplingDistributionComputationAlgorithm(clustering)

    distances = parallel_starmap_progressbar(distance_mean_ground_truth,
                                             [(x, distribution_algo)
                                              for x in registration_pairs],
                                             n_cores=n_cores)

    filtered_registration_pairs = []
    for i in range(len(distances)):
        if distances[i] < filter_threshold:
            filtered_registration_pairs.append(registration_pairs[i])
        else:
            print('Rejecting {} because it has a distance of {}'.format(
                registration_pairs[i], distances[i]))

    return filtered_registration_pairs
def generate_examples_cli():
    parser = argparse.ArgumentParser()
    parser.add_argument('--output',
                        type=str,
                        help='Where to store the examples',
                        default='dataset.json')
    parser.add_argument('--input',
                        type=str,
                        help='Where the registration results are stored',
                        default='.',
                        required=True)
    parser.add_argument('--exclude',
                        type=str,
                        help='Regex of names of datasets to exclude',
                        default='')
    parser.add_argument('-j',
                        '--n_cores',
                        type=int,
                        help='N of cores to use for the computation',
                        default=8)
    parser.add_argument('-c',
                        '--config',
                        type=str,
                        help='Path to a json config for the descriptor.')
    parser.add_argument('--descriptor-only',
                        action='store_true',
                        help='Generate only the descriptor.')
    parser.add_argument('--rotations',
                        '-r',
                        nargs='+',
                        type=float,
                        default=[0.0])
    parser.add_argument('--max-mean-gt-distance', type=float, default=0.3)
    args = parser.parse_args()

    np.set_printoptions(linewidth=120)

    db = RegistrationPairDatabase(args.input, args.exclude)
    registration_pairs = db.registration_pairs()

    output_path = pathlib.Path(args.output)

    clustering_algorithm = CenteredClusteringAlgorithm(radius=1.0,
                                                       k=16,
                                                       n_seed_init=32)
    clustering_algorithm.seed_selector = 'localized'
    clustering_algorithm.rescale = True

    clustering_algorithm = RegistrationPairClusteringAdapter(
        clustering_algorithm)

    distribution_algorithm = FixedCenterSamplingDistributionAlgorithm(
        clustering_algorithm)
    covariance_algo = DistributionAlgorithmToCovarianceAlgorithm(
        distribution_algorithm)

    with open(args.config) as f:
        descriptor_config = json.load(f)

    descriptor = descriptor_factory(descriptor_config)

    eprint('Using descriptor: {}'.format(repr(descriptor)))
    eprint('Generating with rotations: {}'.format(args.rotations))

    examples = []
    pairs = []
    for x in registration_pairs:
        examples.extend([(x, descriptor, covariance_algo, args.descriptor_only,
                          r) for r in args.rotations])
        pairs.extend([{
            'dataset': x.dataset,
            'reading': x.reading,
            'reference': x.reference,
            'rotation': r
        } for r in args.rotations])

    random.shuffle(examples)

    results = parallel_starmap_progressbar(generate_one_example,
                                           examples,
                                           n_cores=args.n_cores)

    # results = [generate_one_example(*x) for x in examples]

    xs = []
    ys = []
    for p in results:
        x, y = p
        xs.append(x.tolist())
        if not args.descriptor_only:
            ys.append(y.tolist())

    output_dict = {
        'metadata': {
            'what': 'learning_dataset',
            'date': str(datetime.datetime.today()),
            'descriptor': str(descriptor),
            'covariance_algo': str(covariance_algo),
            'descriptor_labels': descriptor.labels(),
            'descriptor_config': descriptor_config,
            'filter': args.exclude
        },
        'statistics': {
            'n_examples': len(xs)
        },
        'data': {
            'pairs': pairs,
            'xs': xs,
        }
    }

    if not args.descriptor_only:
        output_dict['data']['ys'] = ys

    with open(args.output, 'w') as dataset_file:
        json.dump(output_dict, dataset_file)