def cli():
    parser = argparse.ArgumentParser()
    parser.add_argument('output', type=str, help='Where to save the vtk file.')
    parser.add_argument('--dim1', type=int, default=0)
    parser.add_argument('--dim2', type=int, default=1)
    parser.add_argument('--dim3', type=int, default=2)
    parser.add_argument('--rotation', action='store_true', help='Print rotation dimensions instead of translation.')
    parser.add_argument('--initial-estimate', action='store_true', help='Print initial estimates instead of results.')
    parser.add_argument('--center_around_gt', action='store_true', help='Center the results around the ground truth')
    parser.add_argument('-rz', '--rotation_around_z', type=float, default=0.0)
    parser.add_argument('-k', help='N of neighbors for density computation', type=int, default=12)
    args = parser.parse_args()

    json_data = json.load(sys.stdin)

    if args.rotation:
        dims = (3,4,5)
    else:
        dims = (args.dim1, args.dim2, args.dim3)

    left_multiply = rotation_around_z_matrix(args.rotation_around_z)
    right_multiply = np.linalg.inv(rotation_around_z_matrix(args.rotation_around_z))

    if args.center_around_gt:
        left_multiply = np.dot(left_multiply, np.linalg.inv(np.array(json_data['metadata']['ground_truth'])))

    points = positions_of_registration_data(json_data, (args.initial_estimate in POSITIVE_STRINGS), left_multiply=left_multiply, right_multiply=right_multiply)

    print(points)

    data_dict = empty_to_none(data_dict_of_registration_data(json_data, k=args.k))
    pointcloud_to_vtk(points[:, dims], args.output, data_dict)
Beispiel #2
0
def to_vtk(dataset, clustering, output, center_around_gt=False):
    """
    Make a vtk file showing a clustering on a certain dataset.
    :arg dataset: The full dataset to which the clustering is applied.
    :arg clustering: The clustering data row.
    """
    points = positions_of_registration_data(dataset)

    if center_around_gt:
        points = center_lie_around_t(
            points, np.array(dataset['metadata']['ground_truth']))

    clustering_data = clusters_of_points(clustering['clustering'], len(points))

    data_dict = data_dict_of_registration_data(dataset)
    data_dict['clustering'] = np.ascontiguousarray(clustering_data)

    pointcloud_to_vtk(points[:, 0:3],
                      '{}_translation'.format(output),
                      data=data_dict)
    pointcloud_to_vtk(points[:, 3:6],
                      '{}_rotation'.format(output),
                      data=data_dict)

    mean = np.array(clustering['mean_of_central'])
    mean = se3log(mean)
    covariance = np.array(clustering['covariance_of_central'])

    distribution_to_vtk_ellipsoid(mean[0:3], covariance[0:3, 0:3],
                                  '{}_translation_ellipsoid'.format(output))
    distribution_to_vtk_ellipsoid(mean[3:6], covariance[3:6, 3:6],
                                  '{}_rotation_ellipsoid'.format(output))
def generate_one_prediction(i, y_predicted, pair_id, registration_pair_database, output):
    distribution_to_vtk_ellipsoid(np.zeros(3), y_predicted[0:3,0:3], output + '/translation_predicted_' + str(i).zfill(4))

    distribution_to_vtk_ellipsoid(np.zeros(3), y_predicted[3:6,3:6], output + '/rotation_predicted_' + str(i).zfill(4))

    registration_pair = registration_pair_database.get_registration_pair(pair_id['dataset'], pair_id['reading'], pair_id['reference'])

    reading = registration_pair.points_of_reading()

    pointcloud_to_vtk(reading, output + '/reading_{}'.format(str(i).zfill(4)))
def cli():
    parser = argparse.ArgumentParser()
    parser.add_argument('input', type=str, help='Registration results')
    parser.add_argument('output', type=str, help='Output folder')
    args = parser.parse_args()

    with open(args.input) as f:
        data_dict = json.load(f)

    lie = lie_vectors_of_registrations(data_dict)
    density = density_of_points(lie)

    group = np.zeros((len(lie), 4, 4))
    for i, l in enumerate(lie):
        group[i] = se3.exp(l)

    print(lie)
    print(group)
    print(density)

    sorted_densities = sorted(density)
    sample_densities = np.linspace((np.min(density)), (np.max(density)), 50)
    print(sample_densities)

    print(np.mean(lie, axis=0))

    for i in range(0, 4000, 100):
        d = sorted_densities[i]
        results_subset = group[density > sorted_densities[i]]

        if len(results_subset) > 10:
            mean, covariance = se3.gaussian_from_sample(results_subset)

            pointcloud_to_vtk(
                lie[density > d][:, 0:3],
                'points_{}'.format(i),
                data={'density': np.ascontiguousarray(density[density > d])})
            distribution_to_vtk_ellipsoid(
                se3.log(mean)[0:3], covariance[0:3, 0:3],
                'ellipsoid_{}'.format(i))
            print(covariance[0:3, 0:3])
def apply_mask_cli():
    parser = argparse.ArgumentParser(description='Apply as point selection mask on a pair of pointclouds.')
    parser.add_argument('database', type=str, help='Location of the registration result database to use.')
    parser.add_argument('dataset', type=str)
    parser.add_argument('reading', type=int)
    parser.add_argument('reference', type=int)
    parser.add_argument('--output', type=str, default='.', help='Output directory of the visualization.')
    parser.add_argument('--radius', type=float, default=0.1, help='For the overlap mask generator, the max distance between points for them to be neighbors.')
    parser.add_argument('--range', type=float, help='For the angle mask generator, the range of angles accepted.', default=0.0)
    parser.add_argument('--offset', type=float, help='For the angle mask generator, the offset of angles accepted.', default=0.0)
    parser.add_argument('-c', '--config', type=str, help='Path to a json config for the mask')
    parser.add_argument('-r', '--rotation', type=float, help='Rotation around the z axis to apply to the cloud pair before computing the descriptor, in radians.', default=0.0)
    args = parser.parse_args()

    db = RegistrationPairDatabase(args.database)
    pair = db.get_registration_pair(args.dataset, args.reading, args.reference)

    pair.rotation_around_z = args.rotation

    reading = pair.points_of_reading()
    reference = pair.points_of_reference()

    with open(args.config) as f:
        config = json.load(f)
        print(config)
        mask_generator = mask_factory(config)

    reading_masks, reference_masks = mask_generator.compute(pair)


    eprint('Transform of pair: ')
    eprint(pair.transform())


    pointcloud_to_vtk(reference, args.output + '/reference')
    pointcloud_to_vtk(transform_points(reading, pair.transform()), args.output + '/reading')


    for i in range(len(reading_masks)):
        if reference_masks[i].any():
            pointcloud_to_vtk(reference[reference_masks[i]], args.output + '/' + '{}_reference_{}'.format(mask_generator.__repr__(), i))

        if reading_masks[i].any():
            transformed_masked_reading = transform_points(reading[reading_masks[i]], pair.transform())

            pointcloud_to_vtk(transformed_masked_reading, args.output + '/' + '{}_reading_{}'.format(mask_generator.__repr__(), i))
Beispiel #6
0
def dataset_to_vtk(dataset, filename, dims=(0, 1, 2)):
    positions = positions_of_registration_data(dataset)
    data = empty_to_none(data_dict_of_registration_data(dataset))

    pointcloud_to_vtk(positions[:, dims], filename, data=data)
def save_one_frame(points, output, data_dict, i, pre_transform):
    filename = output + '_{0:03d}'.format(i)
    print(filename)
    data = empty_to_none(data_dict)

    pointcloud_to_vtk(points[i], filename, data)