示例#1
0
def align_reconstruction_naive_similarity(config, reconstruction, gcp):
    """Align with GPS and GCP data using direct 3D-3D matches."""
    X, Xp = alignment_constraints(config, reconstruction, gcp)

    if len(X) == 0:
        return 1.0, np.identity(3), np.zeros((3))

    # Translation-only case, either : 
    #  - a single value
    #  - identical values
    same_values = (np.linalg.norm(np.std(Xp, axis=0)) < 1e-10)
    single_value = (len(X) == 1)
    if single_value:
        logger.warning('Only 1 constraints. Using translation-only alignment.')
    if same_values:
        logger.warning('GPS/GCP data seems to have identical values. Using translation-only alignment.')
    if same_values or single_value:
        t = np.array(Xp[0]) - np.array(X[0])
        return 1.0, np.identity(3), t

    # Will be up to some unknown rotation
    if len(X) == 2:
        logger.warning('Only 2 constraints. Will be up to some unknown rotation.')
        X.append(X[1])
        Xp.append(Xp[1])

    # Compute similarity Xp = s A X + b
    X = np.array(X)
    Xp = np.array(Xp)
    T = tf.superimposition_matrix(X.T, Xp.T, scale=True)

    A, b = T[:3, :3], T[:3, 3]
    s = np.linalg.det(A)**(1. / 3)
    A /= s
    return s, A, b
示例#2
0
def align_reconstruction_naive_similarity(reconstruction, gcp):
    """Align with GPS and GCP data using direct 3D-3D matches."""
    X, Xp = [], []

    # Get Ground Control Point correspondences
    if gcp:
        triangulated, measured = triangulate_all_gcp(reconstruction, gcp)
        X.extend(triangulated)
        Xp.extend(measured)

    # Get camera center correspondences
    for shot in reconstruction.shots.values():
        X.append(shot.pose.get_origin())
        Xp.append(shot.metadata.gps_position)

    if len(X) < 3:
        return

    # Compute similarity Xp = s A X + b
    X = np.array(X)
    Xp = np.array(Xp)
    T = tf.superimposition_matrix(X.T, Xp.T, scale=True)

    A, b = T[:3, :3], T[:3, 3]
    s = np.linalg.det(A)**(1. / 3)
    A /= s
    return s, A, b
示例#3
0
def align_reconstruction_naive_similarity(reconstruction, gcp):
    """Align with GPS and GCP data using direct 3D-3D matches."""
    X, Xp = [], []

    # Get Ground Control Point correspondences
    if gcp:
        triangulated, measured = triangulate_all_gcp(reconstruction, gcp)
        X.extend(triangulated)
        Xp.extend(measured)

    # Get camera center correspondences
    for shot in reconstruction.shots.values():
        X.append(shot.pose.get_origin())
        Xp.append(shot.metadata.gps_position)

    if len(X) < 3:
        return

    # Compute similarity Xp = s A X + b
    X = np.array(X)
    Xp = np.array(Xp)
    T = tf.superimposition_matrix(X.T, Xp.T, scale=True)

    A, b = T[:3, :3], T[:3, 3]
    s = np.linalg.det(A)**(1. / 3)
    A /= s
    return s, A, b
示例#4
0
文件: align.py 项目: rdyadav/OpenSfM
def align_reconstruction_naive_similarity(config, reconstruction, gcp):
    """Align with GPS and GCP data using direct 3D-3D matches."""
    X, Xp = alignment_constraints(config, reconstruction, gcp)

    if len(X) == 0:
        return 1.0, np.identity(3), np.zeros((3))

    # Translation-only case
    if len(X) == 1:
        logger.warning('Only 1 constraints. Using translation-only alignment.')
        t = np.array(Xp[0]) - np.array(X[0])
        return 1.0, np.identity(3), t

    # Will be up to some unknown rotation
    if len(X) == 2:
        logger.warning(
            'Only 2 constraints. Will be up to some unknown rotation.')
        X.append(X[1])
        Xp.append(Xp[1])

    # Compute similarity Xp = s A X + b
    X = np.array(X)
    Xp = np.array(Xp)
    T = tf.superimposition_matrix(X.T, Xp.T, scale=True)

    A, b = T[:3, :3], T[:3, 3]
    s = np.linalg.det(A)**(1. / 3)
    A /= s
    return s, A, b
示例#5
0
文件: align.py 项目: onsiteiq/OpenSfM
def get_sab_3d(reconstruction, filtered_shots, gcp, config):
    X, Xp = [], []

    # Get Ground Control Point correspondences
    if gcp and config['align_use_gcp']:
        triangulated, measured = triangulate_all_gcp(reconstruction, gcp)
        X.extend(triangulated)
        Xp.extend(measured)

    # Get camera center correspondences
    if config['align_use_gps']:
        for shot in filtered_shots:
            X.append(shot.pose.get_origin())
            Xp.append(shot.metadata.gps_position)

    # Compute similarity Xp = s A X + b
    X = np.array(X)
    Xp = np.array(Xp)
    T = tf.superimposition_matrix(X.T, Xp.T, scale=True)

    A, b = T[:3, :3], T[:3, 3]
    s = np.linalg.det(A)**(1. / 3)
    A /= s

    return s, A, b
示例#6
0
def align_reconstruction_naive_similarity(reconstruction):
    if len(reconstruction['shots']) < 3: return
    # Compute similarity Xp = s A X + b
    X, Xp = [], []
    for shot in reconstruction['shots'].values():
        X.append(optical_center(shot))
        Xp.append(shot['gps_position'])
    X = np.array(X)
    Xp = np.array(Xp)
    T = tf.superimposition_matrix(X.T, Xp.T, scale=True)

    A, b = T[:3, :3], T[:3, 3]
    s = np.linalg.det(A)**(1. / 3)
    A /= s
    return s, A, b
示例#7
0
def align_reconstruction_naive_similarity(reconstruction):
    if len(reconstruction['shots']) < 3: return
    # Compute similarity Xp = s A X + b
    X, Xp = [], []
    for shot in reconstruction['shots'].values():
        X.append(optical_center(shot))
        Xp.append(shot['gps_position'])
    X = np.array(X)
    Xp = np.array(Xp)
    T = tf.superimposition_matrix(X.T, Xp.T, scale=True)

    A, b = T[:3,:3], T[:3,3]
    s = np.linalg.det(A)**(1./3)
    A /= s
    return s, A, b
示例#8
0
def align_reconstruction_naive_similarity(reconstruction):
    if len(reconstruction.shots) < 3: return
    # Compute similarity Xp = s A X + b
    X, Xp = [], []
    for shot in reconstruction.shots.values():
        X.append(shot.pose.get_origin())
        Xp.append(shot.metadata.gps_position)
    X = np.array(X)
    Xp = np.array(Xp)
    T = tf.superimposition_matrix(X.T, Xp.T, scale=True)

    A, b = T[:3, :3], T[:3, 3]
    s = np.linalg.det(A)**(1. / 3)
    A /= s
    return s, A, b
示例#9
0
def align_reconstruction_naive_similarity(reconstruction):
    if len(reconstruction.shots) < 3: return
    # Compute similarity Xp = s A X + b
    X, Xp = [], []
    for shot in reconstruction.shots.values():
        X.append(shot.pose.get_origin())
        Xp.append(shot.metadata.gps_position)
    X = np.array(X)
    Xp = np.array(Xp)
    T = tf.superimposition_matrix(X.T, Xp.T, scale=True)

    A, b = T[:3,:3], T[:3,3]
    s = np.linalg.det(A)**(1./3)
    A /= s
    return s, A, b
示例#10
0
文件: align.py 项目: mfkiwl/OpenSfM
def compute_naive_similarity(
    config: Dict[str, Any],
    reconstruction: types.Reconstruction,
    gcp: List[pymap.GroundControlPoint],
    use_gps: bool,
    use_scale: bool,
) -> Optional[Tuple[float, np.ndarray, np.ndarray]]:
    """Compute similarity with GPS and GCP data using direct 3D-3D matches."""
    X, Xp = alignment_constraints(config, reconstruction, gcp, use_gps)

    if len(X) == 0:
        return None

    # Translation-only case, either :
    #  - a single value
    #  - identical values
    same_values = np.linalg.norm(np.std(Xp, axis=0)) < 1e-10
    single_value = len(X) == 1
    if single_value:
        logger.warning("Only 1 constraints. Using translation-only alignment.")
    if same_values:
        logger.warning(
            "GPS/GCP data seems to have identical values. Using translation-only alignment."
        )
    if same_values or single_value:
        t = np.array(Xp[0]) - np.array(X[0])
        return 1.0, np.identity(3), t

    # Will be up to some unknown rotation
    if len(X) == 2:
        logger.warning(
            "Only 2 constraints. Will be up to some unknown rotation.")
        X.append(X[1])
        Xp.append(Xp[1])

    # Compute similarity Xp = s A X + b
    X = np.array(X)
    Xp = np.array(Xp)
    T = tf.superimposition_matrix(X.T, Xp.T, scale=use_scale)

    A, b = T[:3, :3], T[:3, 3]
    s = np.linalg.det(A)**(1.0 / 3)
    A /= s
    return s, A, b