def transformed_points(points, transformation):
    """
    Return the transformation "transformation" applied on 3D points "points",
    where "transformation" equals ("delta_quaternion", "delta_scale", "delta_location") (apply from left to right).
    """
    delta_quaternion, delta_scale, delta_location = transformation
    delta_quaternion = delta_quaternion.reshape(4, 1)
    
    return np.array([
            delta_location + delta_scale * trfm.apply_quat_on_point(delta_quaternion, p).reshape(3)
            for p in points ])
Пример #2
0
def transformed_points(points, transformation):
    """
    Return the transformation "transformation" applied on 3D points "points",
    where "transformation" equals ("delta_quaternion", "delta_scale", "delta_location") (apply from left to right).
    """
    delta_quaternion, delta_scale, delta_location = transformation
    delta_quaternion = delta_quaternion.reshape(4, 1)

    return np.array([
        delta_location +
        delta_scale * trfm.apply_quat_on_point(delta_quaternion, p).reshape(3)
        for p in points
    ])
def transform_between_cam_trajectories(cam_trajectory_from, cam_trajectory_to,
                                       at_frame=1, at_time=None,
                                       infer_scale=True, offset_frames=None, offset_time=float("inf")):
    """
    Returns the transformation ("delta_quaternion", "delta_scale", "delta_location") (apply from left to right)
    between two camera trajectories "cam_trajectory_from" and "cam_trajectory_to",
    of which the format is given by the output of "load_cam_trajectory_TUM()".
    
    "at_frame" or "at_time" : moment at which the translation and rotation are calculated
    "at_frame" : frame number, starting from 1; set to None if "at_time" is used instead
    "at_time" : timestamp in seconds; set to None if "at_frame" is used instead
    
    "infer_scale" : set to True if the scale should also be calculated;
                    requires one of following offsets to be set:
    "offset_frames" or "offset_time" : offset between first and second moment
    "offset_frames" : frames inbetween both moments; set to None if "offset_time" is used instead
    "offset_time" : seconds inbetween both moments; set to None if "offset_frames" is used instead
    
    Note: in case "at_frame" or "offset_frames" is used,
    the corresponding timestamps of the "to" trajectory are used.
    In case a timestamp at first or second moment of one of the trajectories
    is out of range of the other trajectory,
    the moments are adjusted such that the timestamps between trajectories match as good as possible.
    """
    ts_from, locs_from, quats_from = cam_trajectory_from
    ts_to, locs_to, quats_to = cam_trajectory_to
    
    # Return unit transformation, if one of the trajectories is empty
    if not len(ts_from) or not len(ts_to):
        return trfm.unit_quat().reshape(4), 1., np.zeros(3)
    
    def closest_element_index(array, element):
        if abs(element) != float("inf"):
            return (np.abs(array - element)).argmin()
        elif element == float("inf"):
            return len(array) - 1
        else:    # element == float("-inf")
            return 0
    
    # Get time and frame indices at first moment
    if at_frame != None:
        at_frame_to = max(0, min(at_frame - 1, len(ts_to) - 1))    # to Python indexing
    elif at_time != None:
        at_frame_to = closest_element_index(ts_to, at_time)
    at_frame_from = closest_element_index(ts_from, ts_to[at_frame_to])
    at_frame_to = closest_element_index(ts_to, ts_from[at_frame_from])    # make sure both are close to eachother
    at_time = ts_to[at_frame_to]    # trajectory "to" is considered as the groundtruth one, hence the preference
    
    # Calculate rotation and fetch location, at first moment
    delta_quaternion = trfm.delta_quat(
            quats_to[at_frame_to].reshape(4, 1), quats_from[at_frame_from].reshape(4, 1) )
    location_from = locs_from[at_frame_from]
    location_to = locs_to[at_frame_to]
    
    delta_scale = 1.
    if infer_scale:
        # Get frame indices at second moment; implementation analogue to first moment
        if offset_frames != None:
            scnd_frame_to = max(0, min(at_frame_to + offset_frames, len(ts_to) - 1))
        elif offset_time != None:
            scnd_frame_to = closest_element_index(ts_to, at_time + offset_time)
        scnd_frame_from = closest_element_index(ts_from, ts_to[scnd_frame_to])
        scnd_frame_to = closest_element_index(ts_to, ts_from[scnd_frame_from])
        
        # Calculate scale:
        # first the "from" trajectory is transformed such that
        # the cam poses of both trajectories are equal at the first moment,
        # then the translation-vectors between first and second moment are calculated for both trajectories, ...
        inbetween_location_from = trfm.apply_quat_on_point(
                delta_quaternion, locs_from[scnd_frame_from] - locs_from[at_frame_from] ).reshape(3)
        inbetween_location_to   = locs_to[scnd_frame_to] - locs_to[at_frame_to]
        # ... then the translation-vector of the "to" trajectory is projection on
        # the normalized translation-vector of the "from" trajectory,
        # and the delta_scale is defined by the ratio of the length of this projection-vector,
        # with the length of the translation-vector of the "to" trajectory.
        nominator   = inbetween_location_from.dot(inbetween_location_to)
        denominator = inbetween_location_from.dot(inbetween_location_from)
        if denominator != 0:
            delta_scale = nominator / denominator
    
    # Return transformation: translation, rotation and scale
    delta_location = location_to - delta_scale * trfm.apply_quat_on_point(delta_quaternion, location_from).reshape(3)
    return delta_quaternion.reshape(4), delta_scale, delta_location
Пример #4
0
def transform_between_cam_trajectories(cam_trajectory_from,
                                       cam_trajectory_to,
                                       at_frame=1,
                                       at_time=None,
                                       infer_scale=True,
                                       offset_frames=None,
                                       offset_time=float("inf")):
    """
    Returns the transformation ("delta_quaternion", "delta_scale", "delta_location") (apply from left to right)
    between two camera trajectories "cam_trajectory_from" and "cam_trajectory_to",
    of which the format is given by the output of "load_cam_trajectory_TUM()".
    
    "at_frame" or "at_time" : moment at which the translation and rotation are calculated
    "at_frame" : frame number, starting from 1; set to None if "at_time" is used instead
    "at_time" : timestamp in seconds; set to None if "at_frame" is used instead
    
    "infer_scale" : set to True if the scale should also be calculated;
                    requires one of following offsets to be set:
    "offset_frames" or "offset_time" : offset between first and second moment
    "offset_frames" : frames inbetween both moments; set to None if "offset_time" is used instead
    "offset_time" : seconds inbetween both moments; set to None if "offset_frames" is used instead
    
    Note: in case "at_frame" or "offset_frames" is used,
    the corresponding timestamps of the "to" trajectory are used.
    In case a timestamp at first or second moment of one of the trajectories
    is out of range of the other trajectory,
    the moments are adjusted such that the timestamps between trajectories match as good as possible.
    """
    ts_from, locs_from, quats_from = cam_trajectory_from
    ts_to, locs_to, quats_to = cam_trajectory_to

    # Return unit transformation, if one of the trajectories is empty
    if not len(ts_from) or not len(ts_to):
        return trfm.unit_quat().reshape(4), 1., np.zeros(3)

    def closest_element_index(array, element):
        if abs(element) != float("inf"):
            return (np.abs(array - element)).argmin()
        elif element == float("inf"):
            return len(array) - 1
        else:  # element == float("-inf")
            return 0

    # Get time and frame indices at first moment
    if at_frame != None:
        at_frame_to = max(0, min(at_frame - 1,
                                 len(ts_to) - 1))  # to Python indexing
    elif at_time != None:
        at_frame_to = closest_element_index(ts_to, at_time)
    at_frame_from = closest_element_index(ts_from, ts_to[at_frame_to])
    at_frame_to = closest_element_index(
        ts_to, ts_from[at_frame_from])  # make sure both are close to eachother
    at_time = ts_to[
        at_frame_to]  # trajectory "to" is considered as the groundtruth one, hence the preference

    # Calculate rotation and fetch location, at first moment
    delta_quaternion = trfm.delta_quat(quats_to[at_frame_to].reshape(4, 1),
                                       quats_from[at_frame_from].reshape(4, 1))
    location_from = locs_from[at_frame_from]
    location_to = locs_to[at_frame_to]

    delta_scale = 1.
    if infer_scale:
        # Get frame indices at second moment; implementation analogue to first moment
        if offset_frames != None:
            scnd_frame_to = max(
                0, min(at_frame_to + offset_frames,
                       len(ts_to) - 1))
        elif offset_time != None:
            scnd_frame_to = closest_element_index(ts_to, at_time + offset_time)
        scnd_frame_from = closest_element_index(ts_from, ts_to[scnd_frame_to])
        scnd_frame_to = closest_element_index(ts_to, ts_from[scnd_frame_from])

        # Calculate scale:
        # first the "from" trajectory is transformed such that
        # the cam poses of both trajectories are equal at the first moment,
        # then the translation-vectors between first and second moment are calculated for both trajectories, ...
        inbetween_location_from = trfm.apply_quat_on_point(
            delta_quaternion,
            locs_from[scnd_frame_from] - locs_from[at_frame_from]).reshape(3)
        inbetween_location_to = locs_to[scnd_frame_to] - locs_to[at_frame_to]
        # ... then the translation-vector of the "to" trajectory is projection on
        # the normalized translation-vector of the "from" trajectory,
        # and the delta_scale is defined by the ratio of the length of this projection-vector,
        # with the length of the translation-vector of the "to" trajectory.
        nominator = inbetween_location_from.dot(inbetween_location_to)
        denominator = inbetween_location_from.dot(inbetween_location_from)
        if denominator != 0:
            delta_scale = nominator / denominator

    # Return transformation: translation, rotation and scale
    delta_location = location_to - delta_scale * trfm.apply_quat_on_point(
        delta_quaternion, location_from).reshape(3)
    return delta_quaternion.reshape(4), delta_scale, delta_location