Esempio n. 1
0
def _get_p_map_w_orig(pPoseAB, gPose2o, numTuple):
    """
    get the predicted truth path map
    poses are w.r.t. previous frame
    number of tuples in the batch. numTuple-1 transformation matrices. numTuple-2 will be added from gPose2o
    """
    ## origin = np.array([[0], [0], [0]], dtype=np.float32)
    ## pathMap = np.ndarray(shape=[3,0], dtype=np.float32)
    ## pathMap = np.append(pathMap, origin, axis=1)
    ## # Sequential transformations that takes points form frame i to i+1
    ## for i in range(len(pPoseAB)-1,-1,-1):
    ##     poseA2B = kitti._get_3x4_tmat(np.array(pPoseAB[i]))
    ##     pathMap = kitti.transform_pcl(pathMap, poseA2B)
    ##     pathMap = np.append(pathMap, origin, axis=1)
    ## #### PathMap consists of all points transformed to the last frame coordinates
    ## # transform points at last frame coordinates to origin frame
    ## #pathMap = kitti.transform_pcl(pathMap, gPose2o[len(gPose2o)-1])
    ## pathMap = kitti.transform_pcl(pathMap, gPose2o[0])

    origin = np.array([[0], [0], [0]], dtype=np.float32)
    pathMap = np.ndarray(shape=[3, 0], dtype=np.float32)
    poseA2o = kitti._get_3x4_tmat(gPose2o[0])
    # because inv(OrigTo1)*OrigTo1*(0,0,0)=(0,0,0) so simply append (0,0,0)
    #gtLoc = kitti.transform_pcl(origin, gPose2o[0])
    #pathMap = np.append(pathMap, gtLoc, axis=1)
    pathMap = np.append(pathMap, origin, axis=1)
    for i in range(len(pPoseAB)):
        poseA2B = kitti._get_3x4_tmat(np.array(pPoseAB[i]))
        poseB2O = _get_tMat_B_2_O(poseA2o, poseA2B)
        oAtNextFrame = kitti.transform_pcl(origin, poseA2B)
        oAtOrigFrame = kitti.transform_pcl(origin, poseB2O)
        pathMap = np.append(pathMap, oAtOrigFrame, axis=1)
        poseA2o = poseB2O
    #### PathMap consists of all points in the origin frame coordinates
    return pathMap
def _apply_prediction_pcl_depImg(pclSource, targetT, targetP, params6=True, **kwargs):
    '''
    Transform pclSource, Calculate new targetT based on targetP, Create new depth image, and pcl file
    Return:
        - New pclSource
        - New targetT
        - New depthImageB
    '''
    # remove trailing zeros
    pclSource = kitti.remove_trailing_zeros(pclSource)
    if params6:
        tMatP = kitti._get_tmat_from_params(targetP)
        tMatT = kitti._get_tmat_from_params(targetT)
    else:
        tMatP = targetP.reshape([3,4])
        tMatT = targetT.reshape([3,4])
    # get transformed pclSource based on targetP
    pclSourceTransformed = kitti.transform_pcl(pclSource, tMatP)    
    # get new depth image of transformed pclSource
    depthImageB, _ = kitti.get_depth_image_pano_pclView(pclSourceTransformed)
    pclSourceTransformed = kitti._zero_pad(pclSourceTransformed, kwargs.get('pclCols')-pclSourceTransformed.shape[1])
    # get residual Target
    tMatResP2T = kitti.get_residual_tMat_p2t(tMatT, tMatP) # first is source2target, second is source2predicted
    if params6:
        targetResP2T = kitti._get_tmat_from_params(tMatResP2T)
    else:
        targetResP2T = tMatResP2T.reshape([12])
    return pclSourceTransformed, targetResP2T, depthImageB
Esempio n. 3
0
def _apply_prediction_periodic(pclA, targetT, targetP, **kwargs):
    '''
    Transform pclA, Calculate new targetT based on targetP, Create new depth image
    Return:
        - New PCLA
        - New targetT
        - New depthImage
    '''
    # remove trailing zeros
    pclA = kitti.remove_trailing_zeros(pclA)
    # get transformed pclA based on targetP
    tMatP = kitti._get_tmat_from_params(targetP)
    pclATransformed = kitti.transform_pcl(pclA, tMatP)
    # get new depth image of transformed pclA
    depthImageA, _ = kitti.get_depth_image_pano_pclView(pclATransformed)
    pclATransformed = kitti._zero_pad(
        pclATransformed,
        kwargs.get('pclCols') - pclATransformed.shape[1])
    # get residual Target
    #tMatResA2B = kitti.get_residual_tMat_A2B(targetT, targetP)
    targetP[0] = targetP[0] % np.pi
    targetP[1] = targetP[1] % np.pi
    targetP[2] = targetP[2] % np.pi
    targetResP2T = targetT - targetP
    return pclATransformed, targetResP2T, depthImageA
Esempio n. 4
0
def _get_gt_map_backwards(gtPose):
    """
    iterate backwards to transform step by step backwards
    """
    origin = np.array([[0], [0], [0]], dtype=np.float32)
    pathMap = np.array([[0], [0], [0]], dtype=np.float32)
    for i in range(gtPose.shape[0] - 2, -1, -1):
        poseA = kitti._get_3x4_tmat(gtPose[i])
        poseB = kitti._get_3x4_tmat(gtPose[i + 1])
        poseB2A = _get_tMat_A_2_B(poseB, poseA)
        pathMap = kitti.transform_pcl(pathMap, poseB2A)
        pathMap = np.append(pathMap, origin, axis=1)
    #### PathMap consists of all points transformed to the frame 0 coordinates
    # transform them to origin access
    pathMap = kitti.transform_pcl(pathMap, gtPose[0])
    # add final origin
    pathMap = np.append(pathMap, origin, axis=1)
    return pathMap
Esempio n. 5
0
def _get_p_map_w_orig_points(pPoseAB, gPose2o):
    """
    Original Coordinates are used and only transformation for each frame is plotted
    len(pPoseAB) == len(gPose2o)+1
    get the predicted truth path map
    poses are w.r.t. previous frame
    """
    origin = np.array([[0], [0], [0]], dtype=np.float32)
    pathMap = np.ndarray(shape=[3, 0], dtype=np.float32)
    gtLoc = kitti.transform_pcl(origin, gPose2o[0])
    pathMap = np.append(pathMap, gtLoc, axis=1)
    # Sequential transformations that takes points form frame i to i+1
    for i in range(len(pPoseAB)):
        poseA2B = kitti._get_3x4_tmat(np.array(pPoseAB[i]))
        oAtNextFrame = kitti.transform_pcl(origin, poseA2B)
        oAtOrigFrame = kitti.transform_pcl(oAtNextFrame, gPose2o[i + 1])
        pathMap = np.append(pathMap, oAtOrigFrame, axis=1)
    #### PathMap consists of all points in the origin frame coordinates
    return pathMap
Esempio n. 6
0
def _get_p_map(pPose):
    """
    get the predicted truth path map
    poses are w.r.t. previous frame
    """
    origin = np.array([[0], [0], [0]], dtype=np.float32)
    pathMap = np.ndarray(shape=[3, 0], dtype=np.float32)
    pathMap = np.append(pathMap, origin, axis=1)
    for i in range(len(pPose)):
        pose = kitti._get_3x4_tmat(np.array(pPose[i]['tmat']))
        origin = kitti.transform_pcl(origin, pose)
        pathMap = np.append(pathMap, origin, axis=1)
    return pathMap
Esempio n. 7
0
def _get_gt_map(gtPose):
    """
    get the ground truth path map
    pose are w.r.t. the origin
    """
    origin = np.array([[0], [0], [0]], dtype=np.float32)
    pathMap = np.ndarray(shape=[3, 0], dtype=np.float32)
    pathMap = np.append(pathMap, origin, axis=1)
    for i in range(len(gtPose)):
        pose = kitti._get_3x4_tmat(gtPose[i])
        pointT = kitti.transform_pcl(origin, pose)
        pathMap = np.append(pathMap, pointT, axis=1)
    return pathMap
Esempio n. 8
0
def GetMapViaPose(poseX2Olist):
    """
    Input:
        poselist X2O
    Output:
        Map
    """
    origin = np.array([[0], [0], [0]], dtype=np.float32)
    pathMap = np.ndarray(shape=[3, 0], dtype=np.float32)
    pathMap = np.append(pathMap, origin, axis=1)
    for i in range(len(poseX2Olist)):
        pointT = kitti.transform_pcl(origin, poseX2Olist[i])
        pathMap = np.append(pathMap, pointT, axis=1)
    return pathMap
Esempio n. 9
0
def GetMapViaTmat(tmatlist):
    """
    Input:
        tmatList B2A
    Output:
        Map
    """
    '''
    Sequencing : P(n) -> P(n-1) -> ... -> P(O)
    '''
    origin = np.array([[0], [0], [0]], dtype=np.float32)
    pathMap = np.ndarray(shape=[3, 0], dtype=np.float32)
    pathMap = np.append(pathMap, origin, axis=1)
    for i in range(len(tmatlist) - 1, -1, -1):  #last index to 0
        pathMap = kitti.transform_pcl(pathMap, tmatlist[i])
        pathMap = np.append(pathMap, origin, axis=1)
    #pathMap = np.flip(pathMap)
    return pathMap
Esempio n. 10
0
def _apply_prediction(pclA, tMatT, tMatP, **kwargs):
    '''
    Transform pclA, Calculate new tMatT based on tMatP, Create new depth image
    Return:
        - New PCLA
        - New tMatT
        - New depthImage
    '''
    # remove trailing zeros
    pclA = kitti.remove_trailing_zeros(pclA)
    # get transformed pclA based on tMatP
    pclATransformed = kitti.transform_pcl(pclA, tMatP)
    # get new depth image of transformed pclA
    _, depthImageA = kitti.get_depth_image_pano_pclView(pclATransformed)
    pclATransformed = kitti._zero_pad(
        pclATransformed,
        kwargs.get('pclCols') - pclATransformed.shape[1])
    # get residual tMat
    tMatResA2B = kitti.get_residual_tMat_A2B(tMatT, tMatP)
    return pclATransformed, tMatResA2B, depthImageA
def _apply_prediction_depImg(pclSource, targetT, targetP, prevP, params6=True, **kwargs):
    '''
    Transform pclSource, Calculate new targetT based on targetP, Create new depth image
    DOESN"T TOUCH THE PCL, BUT USES prevP TO DO TRANSFORMATION FOR CORRECT DEPTH IMAGE
    
    TargetP
        is transformation from space pi to space t.
    prevP
        is transformation from space s to space pi. s to p0 is identity.
    targetT
        is transformation from space s to space t.
    Return:
        - New pclSource
        - New targetT
        - New depthImageB
    '''
    # remove trailing zeros
    pclSource = kitti.remove_trailing_zeros(pclSource)
    if params6:
        tMatP = kitti._get_tmat_from_params(targetP)
        tMatT = kitti._get_tmat_from_params(targetT)
        tMatPrevP = kitti._get_tmat_from_params(prevP)
    else:
        tMatP = targetP.reshape([3,4])
        tMatT = targetT.reshape([3,4])
        tMatPrevP = prevP.reshape([3,4])
    # get the transformation from space s to pi
    tMatS2P = np.matmul(kitti._add_row4_tmat(tMatPrevP), kitti._add_row4_tmat(tMatP))
    tMatS2P = kitti._remove_row4_tmat(tMatS2P)
    # get transformed pclSource based on targetP
    pclSourceTransformed = kitti.transform_pcl(pclSource, tMatS2P)
    # get new depth image of transformed pclSource
    depthImageB, _ = kitti.get_depth_image_pano_pclView(pclSourceTransformed)
    # get residual Target
    tMatResP2T = kitti.get_residual_tMat_p2t(tMatT, tMatS2P) # first is source2target, second is source2predicted
    if params6:
        targetResP2T = kitti._get_tmat_from_params(tMatResP2T)
    else:
        targetResP2T = tMatResP2T.reshape([12])
    return tMatS2P, targetResP2T, depthImageB
Esempio n. 12
0
def _apply_prediction(pclB, targetT, targetP, **kwargs):
    '''
    Transform pclB, Calculate new targetT based on targetP, Create new depth image
    Return:
        - New PCLB
        - New targetT
        - New depthImageB
    '''
    # remove trailing zeros
    pclA = kitti.remove_trailing_zeros(pclB)
    # get transformed pclB based on targetP
    tMatP = kitti._get_tmat_from_params(targetP)
    pclBTransformed = kitti.transform_pcl(pclB, tMatP)
    # get new depth image of transformed pclB
    depthImageB, _ = kitti.get_depth_image_pano_pclView(pclBTransformed)
    pclBTransformed = kitti._zero_pad(
        pclBTransformed,
        kwargs.get('pclCols') - pclBTransformed.shape[1])
    # get residual Target
    #tMatResB2A = kitti.get_residual_tMat_Bp2B2A(targetP, targetT) # first is A, second is B
    targetResP2T = targetT - targetP
    return pclBTransformed, targetResP2T, depthImageB