Exemplo n.º 1
0
def shot_from_json(key, obj, cameras):
    """
    Read shot from a json object
    """
    pose = types.Pose()
    pose.rotation = obj["rotation"]
    if "translation" in obj:
        pose.translation = obj["translation"]

    metadata = types.ShotMetadata()
    metadata.orientation = obj.get("orientation")
    metadata.capture_time = obj.get("capture_time")
    metadata.gps_dop = obj.get("gps_dop")
    metadata.gps_position = obj.get("gps_position")

    shot = types.Shot()
    shot.id = key
    shot.metadata = metadata
    shot.pose = pose
    shot.camera = cameras.get(obj["camera"])

    if 'scale' in obj:
        shot.scale = obj['scale']
    if 'covariance' in obj:
        shot.covariance = np.array(obj['covariance'])
    if 'merge_cc' in obj:
        shot.merge_cc = obj['merge_cc']
    if 'vertices' in obj and 'faces' in obj:
        shot.mesh = types.ShotMesh()
        shot.mesh.vertices = obj['vertices']
        shot.mesh.faces = obj['faces']

    return shot
Exemplo n.º 2
0
def test_bundle_alignment_prior():
    """Test that cameras are aligned to have the Y axis pointing down."""
    camera = pygeometry.Camera.create_perspective(1.0, 0.0, 0.0)
    camera.id = 'camera1'

    shot = types.Shot()
    shot.id = '1'
    shot.camera = camera
    shot.pose = types.Pose(np.random.rand(3), np.random.rand(3))
    shot.metadata = types.ShotMetadata()
    shot.metadata.gps_position = [0, 0, 0]
    shot.metadata.gps_dop = 1

    r = types.Reconstruction()
    r.add_camera(camera)
    r.add_shot(shot)
    graph = nx.Graph()
    camera_priors = {camera.id: camera}
    gcp = []
    myconfig = config.default_config()

    reconstruction.bundle(graph, r, camera_priors, gcp, myconfig)

    assert np.allclose(shot.pose.translation, np.zeros(3))
    # up vector in camera coordinates is (0, -1, 0)
    assert np.allclose(shot.pose.transform([0, 0, 1]), [0, -1, 0])
Exemplo n.º 3
0
def get_image_metadata(data, image):
    """Get image metadata as a ShotMetadata object."""
    metadata = types.ShotMetadata()
    exif = data.load_exif(image)
    reference = data.load_reference()
    if ('gps' in exif and 'latitude' in exif['gps']
            and 'longitude' in exif['gps']):
        lat = exif['gps']['latitude']
        lon = exif['gps']['longitude']
        if data.config['use_altitude_tag']:
            alt = exif['gps'].get('altitude', 2.0)
        else:
            alt = 2.0  # Arbitrary value used to align the reconstruction
        x, y, z = reference.to_topocentric(lat, lon, alt)
        metadata.gps_position = [x, y, z]
        metadata.gps_dop = exif['gps'].get('dop', 15.0)
    else:
        metadata.gps_position = [0.0, 0.0, 0.0]
        metadata.gps_dop = 999999.0

    metadata.orientation = exif.get('orientation', 1)

    if 'accelerometer' in exif:
        metadata.accelerometer = exif['accelerometer']

    if 'compass' in exif:
        metadata.compass = exif['compass']

    if 'capture_time' in exif:
        metadata.capture_time = exif['capture_time']

    if 'skey' in exif:
        metadata.skey = exif['skey']

    return metadata
Exemplo n.º 4
0
def get_image_metadata(data, image):
    metadata = types.ShotMetadata()
    exif = data.load_exif(image)
    reflla = data.load_reference_lla()
    if 'gps' in exif and 'latitude' in exif['gps'] and 'longitude' in exif['gps']:
        lat = exif['gps']['latitude']
        lon = exif['gps']['longitude']
        if data.config.get('use_altitude_tag', False):
            alt = exif['gps'].get('altitude', 2.0)
        else:
            alt = 2.0 # Arbitrary constant value that will be used to align the reconstruction
        x, y, z = geo.topocentric_from_lla(lat, lon, alt,
            reflla['latitude'], reflla['longitude'], reflla['altitude'])
        metadata.gps_position = [x, y, z]
        metadata.gps_dop = exif['gps'].get('dop', 15.0)
    else:
        metadata.gps_position = [0.0, 0.0, 0.0]
        metadata.gps_dop = 999999.0

    metadata.orientation = exif.get('orientation', 1)

    if 'accelerometer' in exif:
        metadata.accelerometer = exif['accelerometer']

    if 'compass' in exif:
        metadata.compass = exif['compass']

    if 'capture_time' in exif:
        metadata.capture_time = exif['capture_time']

    if 'skey' in exif:
        metadata.skey = exif['skey']

    return metadata
Exemplo n.º 5
0
def test_absolute_pose_single_shot():
    """Single-camera resection on a toy reconstruction with
    1/1000 pixel noise and zero outliers."""
    parameters = config.default_config()
    synthetic_data, synthetic_tracks = synthetic_reconstruction()

    shot_id = 'shot1'
    camera_id = 'camera1'
    metadata = types.ShotMetadata()
    camera = synthetic_data.cameras[camera_id]

    graph_inliers = nx.Graph()
    shot_before = synthetic_data.shots[shot_id]
    status, report = reconstruction.resect(synthetic_tracks, graph_inliers,
                                           synthetic_data, shot_id, camera,
                                           metadata,
                                           parameters['resection_threshold'],
                                           parameters['resection_min_inliers'])
    shot_after = synthetic_data.shots[shot_id]

    assert status is True
    assert report['num_inliers'] == len(graph_inliers.edges())
    assert report['num_inliers'] is len(synthetic_data.points)
    np.testing.assert_almost_equal(shot_before.pose.rotation,
                                   shot_after.pose.rotation, 1)
    np.testing.assert_almost_equal(shot_before.pose.translation,
                                   shot_after.pose.translation, 1)
Exemplo n.º 6
0
def test_reconstruction_class_initialization():

    # Instantiate Reconstruction
    reconstruction = types.Reconstruction()

    # Instantiate camera instrinsics
    camera = types.PerspectiveCamera()
    camera.id = 'apple iphone 4s back camera 4.28mm f/2.4'
    camera.focal = 0.9722222222222222
    camera.k1 = 0.006094395128698237
    camera.k2 = -0.0004952058188617129
    camera.height = 2448
    camera.width = 3264

    # Instantiate GPS data
    metadata = types.ShotMetadata()
    metadata.orientation = 1
    metadata.capture_time = 0.0
    metadata.gps_dop = 5.0
    metadata.gps_position = [
        1.0815875281451939, -0.96510451436708888, 1.2042133903991235
    ]

    # Instantiate shots
    pose0 = types.Pose()
    pose0.rotation = [0.0, 0.0, 0.0]
    pose0.translation = [0.0, 0.0, 0.0]

    shot0 = types.Shot()
    shot0.id = 0
    shot0.camera = camera
    shot0.pose = pose0
    shot0.metadata = metadata

    pose1 = types.Pose()
    pose1.rotation = [0.0, 0.0, 0.0]
    pose1.translation = [-1.0, 0.0, 0.0]

    shot1 = types.Shot()
    shot1.id = 1
    shot1.camera = camera
    shot1.pose = pose1
    shot1.metadata = metadata

    # Add info to current reconstruction
    reconstruction.add_camera(camera)
    reconstruction.add_shot(shot0)
    reconstruction.add_shot(shot1)

    # TEST
    assert len(reconstruction.cameras) == 1
    assert len(reconstruction.shots) == 2
    assert len(reconstruction.points) == 0
    assert reconstruction.get_camera(camera.id) == camera
    assert reconstruction.get_camera(1) is None
    assert reconstruction.get_shot(shot0.id) == shot0
    assert reconstruction.get_shot(shot1.id) == shot1
    assert reconstruction.get_shot(2) is None
def build_reconstruction(opensfm_path, log_file, dataset_path):
    if not opensfm_path in sys.path:
        sys.path.insert(1, opensfm_path)

    from opensfm import dataset, matching, reconstruction, types, io
    from opensfm.reconstruction import TrackTriangulator
    # from opensfm import learners
    from opensfm import log
    global types

    Rs, ts, subsampled_images, timestamps = parse_log_file(log_file,
                                                           sample_rate=10)
    generate_dataset(dataset_path, subsampled_images)

    recon = types.Reconstruction()
    camera = build_camera()

    recon.add_camera(camera)
    for i, rotation in enumerate(Rs):
        pose = types.Pose()
        # pose.rotation = Rs[i]
        # pose.set_rotation_matrix(pose.get_rotation_matrix().T)
        pose.set_rotation_matrix(Rs[i])
        # pose.set_rotation_matrix(-pose.get_rotation_matrix())
        pose.set_rotation_matrix(pose.get_rotation_matrix().T)
        pose.set_origin(np.array(ts[i]))
        # pose.translation = 20.0 * pose.translation
        # pose.set_rotation_matrix(np.random.rand(3,3))
        # pose.set_rotation_matrix(pose.get_rotation_matrix().T)
        # pose.set_origin(np.array(ts[i]))
        # pose.translation = np.array(ts[i])
        # pose.translation = ts[i]
        # pose.translation = 20.0 * pose.translation

        shot = types.Shot()
        shot.camera = camera
        shot.pose = pose
        shot.id = '{}.png'.format(timestamps[i])

        sm = types.ShotMetadata()
        sm.orientation = 1
        sm.gps_position = [0.0, 0.0, 0.0]
        sm.gps_dop = 999999.0
        shot.metadata = sm

        # add shot to reconstruction
        recon.add_shot(shot)

    data = dataset.DataSet(dataset_path)
    data.save_reconstruction([recon], 'reconstruction_gt.json')
Exemplo n.º 8
0
def build_reconstruction(opensfm_path, log_file, trans_file, dataset_path):
    if not opensfm_path in sys.path:
        sys.path.insert(1, opensfm_path)

    from opensfm import dataset, matching, reconstruction, types, io
    from opensfm.reconstruction import TrackTriangulator
    from opensfm import log
    global types

    T = parse_log_file(log_file)
    T_g = parse_trans_file(trans_file)
    pose_g = types.Pose()
    pose_g.set_rotation_matrix(np.matrix(T_g[0:3, 0:3]))
    pose_g.set_rotation_matrix(np.matrix(T_g[0:3, 0:3]).T)
    pose_g.set_origin(T_g[0:3, 3])

    recon = types.Reconstruction()
    camera = build_camera()

    recon.add_camera(camera)
    for i, transformation in enumerate(T):
        pose = types.Pose()

        pose.set_rotation_matrix(np.matrix(transformation[0:3, 0:3]))

        pose.set_rotation_matrix(np.matrix(transformation[0:3, 0:3]).T)
        pose.set_origin(transformation[0:3, 3])
        pose = pose.compose(pose_g)

        shot = types.Shot()
        shot.camera = camera
        shot.pose = pose
        shot.id = '{}.jpg'.format(str(i + 1).zfill(6))

        sm = types.ShotMetadata()
        sm.orientation = 1
        sm.gps_position = [0.0, 0.0, 0.0]
        sm.gps_dop = 999999.0
        shot.metadata = sm

        # add shot to reconstruction
        recon.add_shot(shot)

    data = dataset.DataSet(dataset_path)
    data.save_reconstruction([recon], 'reconstruction_gt.json')
def build_reconstruction(opensfm_path, log_file, dataset_path):
    if not opensfm_path in sys.path:
        sys.path.insert(1, opensfm_path)

    from opensfm import dataset, matching, reconstruction, types, io
    from opensfm.reconstruction import TrackTriangulator
    # from opensfm import learners
    # from opensfm import log
    global types

    Rs, ts, subsampled_images = parse_log_file(log_file)

    recon = types.Reconstruction()
    camera = build_camera()

    recon.add_camera(camera)

    offset = None
    pose0 = types.Pose()
    pose0_recon = types.Pose()
    for i, _ in enumerate(Rs):
        pose = types.Pose()
        pose.rotation = Rs[i]
        # pose.set_rotation_matrix(Rs[i])

        # print pose.get_rotation_matrix()

        pose.set_rotation_matrix(pose.get_rotation_matrix().T)
        pose.set_origin(np.array(ts[i]))

        if False and i == 0:
            print subsampled_images[i]
            # pose0 = types.Pose()
            # pose0_recon = types.Pose()

            pose0.rotation = pose.rotation
            pose0.translation = pose.translation

            pose0_recon.rotation = [
                1.46327114203856, 0.6520934519442041, -0.7289951219890223
            ]
            pose0_recon.translation = [
                -151.62008675764042, 7.551077656334444, 32.03538718382186
            ]

        if False:
            R_ = np.matrix(pose.get_rotation_matrix()) * np.matrix(
                pose0.get_rotation_matrix()).T * np.matrix(
                    pose0_recon.get_rotation_matrix())
            pose.set_rotation_matrix(R_)

            # Bad cases
            if subsampled_images[i] == '1476939075123622.jpg':
                print '-' * 100
                print '{} : {}'.format(subsampled_images[i], pose.rotation)
                pose.rotation[2] = math.pi + pose.rotation[2]
            # Good cases
            if subsampled_images[i] == '1476939074934112.jpg':
                print '+' * 100
                print '{} : {}'.format(subsampled_images[i], pose.rotation)
            # if offset is None:
            #     offset = pose.get_origin() - pose0_recon.get_origin()

            # pose.set_origin(pose.get_origin() - offset)
            # pose.translation = pose.translation * 0.1

            # pose.translation = np.array(ts[i])
            # print pose.get_origin()

            # print pose.get_rotation_matrix()
            # print pose.get_rotation_matrix()
            # print pose.get_origin()
            # sys.exit(1)

            # t = pose.translation
            # t[0] = -t[0]
            # t[1] = -t[1]
            # t[2] = -t[2]
            # t[1],t[2] = t[2],t[1]
            # pose.translation = t

            # print pose.rotation
            # print pose.translation
            # print '#'*100
            # sys.exit(1)
            # R = pose.get_rotation_matrix()
            # R[:,1], R[:,2] = R[:,2], R[:,1]
            # pose.set_rotation_matrix(R)

            # R = pose.get_rotation_matrix()
            # R[:,1], R[:,2] = R[:,2], R[:,1]
            # pose.set_rotation_matrix(R.T)
            # t = pose.translation
            # t = pose.get_rotation_matrix() * np.matrix(pose.translation.reshape((3,1)))
            # print pose.translation

        pose.translation = 20.0 * pose.translation
        # print pose.translation
        # sys.exit(1)

        shot = types.Shot()
        shot.camera = camera
        shot.pose = pose
        shot.id = subsampled_images[i]

        sm = types.ShotMetadata()
        sm.orientation = 1
        sm.gps_position = [0.0, 0.0, 0.0]
        sm.gps_dop = 999999.0
        # sm.capture_time = 0.0
        shot.metadata = sm

        # add shot to reconstruction
        recon.add_shot(shot)

    data = dataset.DataSet(dataset_path)
    data.save_reconstruction([recon], 'reconstruction_gt.json')
Exemplo n.º 10
0
def get_empty_metadata():
    empty_metadata = types.ShotMetadata()
    empty_metadata.gps_position = [0.0, 0.0, 0.0]
    empty_metadata.gps_dop = 999999.0
    empty_metadata.orientation = 1
    return empty_metadata