def process_dataset_colorOnly(startTime, durationSum, 
                    pclFolderList, imgFolderList, 
                    seqIDs, 
                    pclFilenamesList, imgFilenamesList, poseFileList, 
                    tfRecFolder,  
                    numTuples, i):
    '''
    pclFilenames: list of pcl file addresses
    poseFile: includes a list of pose files to read
    point cloud is moved to i+1'th frame:
        tMatAo (i): A->0
        tMatBo (i+1): B->0
        tMatAB (target): A->B  (i -> i+1) 
   ID '''
    '''
    Calculate the Yaw, Pitch, Roll from Rotation Matrix
    and extraxt dX, dY, dZ
    use them to train the network
    '''
    seqID = seqIDs[i]
    print("SeqID started : ", seqID)

    imgFolder = imgFolderList[i]
    imgFilenames = imgFilenamesList[i]
    poseFile = poseFileList[i]

    imgColorList = list()
    poseB2AList = list()
    bitB2AList = list()
    poseX20List = list()
    # pop the first in Tuples and append last as numTuple
    # j is the begining of the list
    # k is the end of the list
    k = -1
    for j in range(0, len(imgFilenames)-(numTuples-1)):
        if j%100==0:
            print("Sequence ",i,"  Progress ",j,"/",len(imgFilenames)-(numTuples-1))
        # if there are less numTuples in the list, fill the list
        # numTuples is at least 2
        while (len(imgColorList)<numTuples): # or could be said (k-j < numTuples-1)
            k+=1 # k starts at -1
            imgColor = cv2.imread(imgFolder+imgFilenames[k])
            imgColor = cv2.resize(imgColor, (370, 1226) )
            imgColor = cv2.resize(imgColor, (int(imgColor.shape[0]/4), int(imgColor.shape[1]/4)) )
            poseX20List.append(_get_3x4_tmat(poseFile[k])) # k is always one step ahead of nPose, and same step as nPCL
            imgColorList.append(imgColor)

            if k == 0:
                continue # only one PCL and Pose are read
                # makes sure first & second pcl and pose are read to have full transformation
            # get target pose  B->A also changes to abgxyz : get abgxyzb-abgxyza
            pose_B2A = _get_tMat_B_2_A(poseX20List[(k-j)-1], poseX20List[(k-j)]) # Use last two
            if WRITE_PARAMS:
                # 6-D parameters
                abgxyzB2A = kitti._get_params_from_tmat(pose_B2A)
            else:
                # Transformation matrix 12 values
                abgxyzB2A = pose_B2A
            if WRITE_CLSF:
                bit = kitti.get_multi_bit_target(abgxyzB2A, BIN_rng, BIN_SIZE)
                bitB2AList.append(bit)
                
            poseB2AList.append(abgxyzB2A.reshape(-1))
            
        else:
            # numTuples are read and ready to be dumped on permanent memory
            fileID = [100+int(seqID), 100000+j, 100000+(k)] # k=j+(numTuples-1)
            if WRITE_CLSF:
                odometery_writer(fileID,# 3 ints
                                 imgColorList,# ntuplexRRRxCCC
                                 poseB2AList,# (ntuple-1)x(6 or 12)
                                 bitB2AList,# (ntuple-1)x6xBIN_SIZE
                                 tfRecFolder,
                                 numTuples)
                # Oldest smaple is to be forgotten
                bitB2AList.pop(0)
            else:
                odometery_writer(fileID,# 3 ints
                                 imgColorList,# ntuplexRRRxCCC
                                 poseB2AList,# (ntuple-1)x(6 or 12)
                                 tfRecFolder,
                                 numTuples) 
            # Oldest smaple is to be forgotten
            imgColorList.pop(0)
            poseB2AList.pop(0)
            poseX20List.pop(0)
        
    print("SeqID completed : ", seqID)
    return
def output_loop_clsf(batchImages, batchPcl, bTargetVals, bTargetT, bTargetP,
                     bRngs, batchTFrecFileIDs, i, **kwargs):
    """
    TODO: SIMILAR TO DATA INPUT -> WE NEED A QUEUE RUNNER TO WRITE THIS OFF TO BE FASTER

    Everything evaluated
    Warp second image based on predicted HAB and write to the new address
    Args:
        batchImages:        img
        batchPcl:           3 x n 
        bTargetVals:        b * 6 * nt
        bTargetT:           b * 6 * 32 * nT targets
        bTargetP:           b * 6 * 32 * nT predictions
        bRngs:              b * 33 * nT ranges
        batchTFrecFileIDs:  fileID 
        i:                  ID of the batch
        **kwargs:           model parameters
    Returns:
        N/A
    Raises:
        ValueError: If no dataDir
    """
    numTuples = kwargs.get('numTuple')
    # Two tasks:
    #   1 - Use the predicted bTargetP[b, 6, 32, nt] and brngs[b, 6, 33, nt] to get the parameters
    #   2 - Update the rngs based on the predicted values for each image and save them
    # find argmax for the bTargeP and use it to get the corresponding params
    if kwargs.get('lastTuple'):
        # for training on last tuple
        predParam = kitti.get_params_from_binarylogits(
            bTargetP[i], bRngs[i, :, :, numTuples - 2:numTuples - 1])
        # get updated ranges
        newRanges = kitti.get_updated_ranges(
            bTargetP[i], bRngs[i, :, :, numTuples - 2:numTuples - 1],
            'squared')  # softmax | squared
        # get updated bit target
        newBitTarget = kitti.get_multi_bit_target(
            bTargetVals[i, :, numTuples - 2:numTuples - 1], newRanges,
            bTargetP.shape[2])
    else:
        # for training on all tuples
        predParam = kitti.get_params_from_binarylogits(bTargetP[i], bRngs[i])
        # get updated ranges
        newRanges = kitti.get_updated_ranges(bTargetP[i], bRngs[i],
                                             'squared')  # softmax | squared
        # get updated bit target
        newBitTarget = kitti.get_multi_bit_target(
            bTargetVals[i], newRanges, bTargetP.shape[2]
        )  # make sure this function is compatible with nT > 2

    # Apply the prediction from extracted parameters

    print("difRNG === ", newRanges[0, :, 0] - bRngs[i, 0, :, 3])
    print('tartParams ========= ', bTargetT.shape)
    print('tarPParams ========= ', bTargetP.shape)
    print('predParams ========= ', predParam.shape)
    print('old Ranges ========= ', bRngs.shape)
    print('new Ranges ========= ', newRanges.shape)
    print('target val ========= ', bTargetVals[i, 0, numTuples - 2])
    print('target val =5=28= ', newRanges[0, 5], newRanges[0, 28])
    print('target rng =-1=0=+1= ', newRanges[0, np.argmax(newBitTarget) - 1],
          newRanges[0, np.argmax(newBitTarget)],
          newRanges[0, np.argmax(newBitTarget) + 1])
    #print('pred   val ========= ', predParam[1])

    #### Shift the ranges so morphed image will have correct target
    #print('new Ranges ========= ', newRanges[1])
    for i in range(newRanges.shape[0]):
        newRanges[i] -= predParam[i]
    #print('new-pred   ========= ', newRanges[1])

    #print(bTargetP[i,0,:,0])
    import matplotlib.pyplot as plt
    plt.subplot(311)
    plt.plot(bTargetT[i, 0, :, 3])
    normP = bTargetP[i, 0, :, 0] / np.linalg.norm(bTargetP[i, 0, :, 0])
    plt.plot(normP)
    plt.title('Target Prediction')
    #plt.show()

    plt.subplot(312)
    plt.plot(bRngs[i, 0, :, 3])
    #plt.plot(newRanges[0,2:31,0]-newRanges[0,1:30,0])
    plt.plot(newRanges[0, :, 0])
    plt.title('Ranges')
    #plt.show()

    plt.subplot(313)
    plt.plot(newBitTarget[0])
    #plt.plot(newRanges[0,1:31,0]-newRanges[0,0:30,0])
    plt.title('Ranges diff')
    plt.show()

    if kwargs.get('lastTuple'):
        pclBTransformed, targetRes, depthBTransformed = _apply_prediction(
            batchPcl[i, :, :, numTuples - 1], bTargetVals[i, :, numTuples - 2],
            predParam[:, 0, 0], **kwargs)
        outBatchPcl = batchPcl.copy()
        outBatchImages = batchImages.copy()
        outTargetVals = bTargetVals.copy()
        outBatchPcl[i, :, :, numTuples - 1] = pclBTransformed
        outBatchImages[i, :, :, numTuples - 1] = depthBTransformed
        print('target val shape === ', bTargetVals.shape)
        print('targetRes  shape === ', targetRes.shape)
        outTargetVals[i, :, numTuples - 2] = targetRes
    else:
        print('do we update all???')
        # Do WE UPDATE ALL???

    ################## TO BE FIXED APPLYING THE PREDICTION BASED ON PREDPARAM
    # split for depth dimension
    #pclBTransformed, targetRes, depthBTransformed = _apply_prediction(batchPcl[i,:,:,numTuples-1], bTargetVals[i,:,numTuples-2], predParam, **kwargs)
    #outBatchPcl = batchPcl.copy()
    #outBatchImages = batchImages.copy()
    #bTargetVals = bTargetT.copy()
    #outBatchPcl[i,:,:,numTuples-1] = pclBTransformed
    #outBatchImages[i,:,:,numTuples-1] = depthBTransformed
    #outTargetT[i,:,numTuples-2] = targetRes
    # Write each Tensorflow record
    filename = str(batchTFrecFileIDs[i][0] + 100) + "_" + str(
        batchTFrecFileIDs[i][1] + 100000) + "_" + str(batchTFrecFileIDs[i][2] +
                                                      100000)
    tfrecord_io.tfrecord_writer_ntuple(batchTFrecFileIDs[i], outBatchPcl[i],
                                       outBatchImages[i], outTargetT[i],
                                       kwargs.get('warpedOutputFolder') + '/',
                                       numTuples, filename)

    # Write the predicted transformation to a folder
    if kwargs.get('phase') == 'train':
        folderTmat = kwargs.get('tMatTrainDir')
    else:
        folderTmat = kwargs.get('tMatTestDir')
    write_predictions(batchTFrecFileIDs[i], bTargetP[i], folderTmat)
    return
def output_loop_clsf(batchImages, batchPcl, bTargetVals, bTargetT, bTargetP,
                     bRngs, batchTFrecFileIDs, i, **kwargs):
    """
    TODO: SIMILAR TO DATA INPUT -> WE NEED A QUEUE RUNNER TO WRITE THIS OFF TO BE FASTER

    Everything evaluated
    Warp second image based on predicted HAB and write to the new address
    Args:
        batchImages:        img
        batchPcl:           3 x n 
        bTargetVals:        b * 6 * nt
        bTargetT:           b * 6 * 32 * nT targets
        bTargetP:           b * 6 * 32 * nT predictions
        bRngs:              b * 33 * nT ranges
        batchTFrecFileIDs:  fileID 
        i:                  ID of the batch
        **kwargs:           model parameters
    Returns:
        N/A
    Raises:
        ValueError: If no dataDir
    """
    numTuples = kwargs.get('numTuple')
    # Two tasks:
    #   1 - Use the predicted bTargetP[b, 6, 32, nt] and brngs[b, 6, 33, nt] to get the parameters
    #   2 - Update the rngs based on the predicted values for each image and save them
    # find argmax for the bTargeP and use it to get the corresponding params
    if kwargs.get('lastTuple'):
        # for training on last tuple
        predParam = kitti.get_params_from_binarylogits(
            bTargetP[i], bRngs[i, :, :, numTuples - 2:numTuples - 1])
        # get updated ranges
        newRanges = kitti.get_updated_ranges(
            bTargetP[i], bRngs[i, :, :, numTuples - 2:numTuples - 1],
            'squared')  # softmax | squared
        # get updated bit target
        newBitTarget = kitti.get_multi_bit_target(
            bTargetVals[i, :, numTuples - 2:numTuples - 1], newRanges,
            bTargetP.shape[2])
    else:
        # for training on all tuples
        predParam = kitti.get_params_from_binarylogits(bTargetP[i], bRngs[i])
        # get updated ranges
        newRanges = kitti.get_updated_ranges(bTargetP[i], bRngs[i],
                                             'squared')  # softmax | squared
        # get updated bit target
        newBitTarget = kitti.get_multi_bit_target(
            bTargetVals[i], newRanges, bTargetP.shape[2]
        )  # make sure this function is compatible with nT > 2

    # If True show the graphs, If False skip
    if (False):
        print("oldRNG === ", bRngs[i, 0, :, 3])
        print("newRNG === ", newRanges[0, :, 0])
        print("difRNG === ", newRanges[0, :, 0] - bRngs[i, 0, :, 3])
        print('tartParams ========= ', bTargetT.shape)
        print('tarPParams ========= ', bTargetP.shape)
        print('predParams ========= ', predParam.shape)
        print('old Ranges ========= ', bRngs.shape)
        print('new Ranges ========= ', newRanges.shape)
        print('target val ========= ', bTargetVals[i, 0, numTuples - 2])
        print('target val =5=28= ', newRanges[0, 5], newRanges[0, 28])
        print('target rng =-1=0=+1= ', newRanges[0,
                                                 np.argmax(newBitTarget) - 1],
              newRanges[0, np.argmax(newBitTarget)],
              newRanges[0, np.argmax(newBitTarget) + 1])
        print('pred   val ========= ', predParam[1])

        #print(bTargetP[i,0,:,0])
        import matplotlib.pyplot as plt
        plt.subplot(311)
        plt.plot(bTargetT[i, 0, :, 3])
        normP = bTargetP[i, 0, :, 0] / np.linalg.norm(bTargetP[i, 0, :, 0])
        plt.plot(normP)
        plt.title('Target Prediction')
        #plt.show()

        plt.subplot(312)
        plt.plot(bRngs[i, 0, :, 3])
        #plt.plot(newRanges[0,2:31,0]-newRanges[0,1:30,0])
        plt.plot(newRanges[0, :, 0])
        plt.title('Ranges')
        #plt.show()

        plt.subplot(313)
        plt.plot(newBitTarget[0])
        #plt.plot(newRanges[0,1:31,0]-newRanges[0,0:30,0])
        plt.title('Ranges diff')
        plt.show()
        print('TargetVals ====', bTargetVals.shape)
        print('BTarget    ====', bTargetT.shape)
        print('Ranges     ====', bRngs.shape)
        print('newBTarget    ====', newBitTarget.shape)
        print('newRanges     ====', newRanges.shape)

    # Update the target values and labels
    if kwargs.get('lastTuple'):
        outRanges = bRngs[i].copy()
        outRanges[:, :, numTuples - 2] = newRanges.reshape(
            [newRanges.shape[0], newRanges.shape[1]])
        outBitTarget = bTargetT[i].copy()
        outBitTarget[:, :, numTuples - 2] = newBitTarget
    else:
        print('do we update all???')
        # Do WE UPDATE ALL???

    #print('outBTarget    ====', outBitTarget.shape)
    #print('outRanges     ====', outRanges.shape)

    # Write each Tensorflow record
    filename = str(batchTFrecFileIDs[i][0] + 100) + "_" + str(
        batchTFrecFileIDs[i][1] + 100000) + "_" + str(batchTFrecFileIDs[i][2] +
                                                      100000)
    tfrecord_io.tfrecord_writer_ntuple_classification(
        batchTFrecFileIDs[i], batchPcl[i], batchImages[i], bTargetVals[i],
        outBitTarget, outRanges,
        kwargs.get('warpedOutputFolder') + '/', numTuples, filename)

    # Write the predicted transformation to a folder
    if kwargs.get('phase') == 'train':
        folderTmat = kwargs.get('tMatTrainDir')
    else:
        folderTmat = kwargs.get('tMatTestDir')

    if kwargs.get('lastTuple'):
        predParam = predParam.reshape(predParam.shape[0])

    write_predictions(batchTFrecFileIDs[i], predParam, folderTmat)
    return
def process_dataset(startTime, durationSum, pclFolderList, seqIDs,
                    pclFilenamesList, poseFileList, tfRecFolder, numTuples, i):
    '''
    pclFilenames: list of pcl file addresses
    poseFile: includes a list of pose files to read
    point cloud is moved to i+1'th frame:
        tMatAo (i): A->0
        tMatBo (i+1): B->0
        tMatAB (target): A->B  (i -> i+1) 
   ID '''
    '''
    Calculate the Yaw, Pitch, Roll from Rotation Matrix
    and extraxt dX, dY, dZ
    use them to train the network
    '''
    seqID = seqIDs[i]
    print("SeqID started : ", seqID)

    pclFolder = pclFolderList[i]
    pclFilenames = pclFilenamesList[i]
    poseFile = poseFileList[i]

    xyziList = list()
    imgDepthList = list()
    poseB2AList = list()
    bitB2AList = list()
    poseX20List = list()
    # pop the first in Tuples and append last as numTuple
    # j is the begining of the list
    # k is the end of the list
    k = -1
    for j in range(0, len(pclFilenames) - (numTuples - 1)):
        if j % 100 == 0:
            print("Sequence ", i, "  Progress ", j, "/",
                  len(pclFilenames) - (numTuples - 1))
        # if there are less numTuples in the list, fill the list
        # numTuples is at least 2
        while (len(xyziList) <
               numTuples):  # or could be said (k-j < numTuples-1)
            k += 1  # k starts at -1
            xyzi = _get_pcl_XYZ(pclFolder + pclFilenames[k])
            imgDepth, xyzi = get_depth_image_pano_pclView(xyzi)
            poseX20List.append(
                _get_3x4_tmat(poseFile[k])
            )  # k is always one step ahead of nPose, and same step as nPCL
            xyziList.append(xyzi)
            imgDepthList.append(imgDepth)
            if k == 0:
                continue  # only one PCL and Pose are read
                # makes sure first & second pcl and pose are read to have full transformation
            # get target pose  B->A also changes to abgxyz : get abgxyzb-abgxyza
            pose_B2A = _get_tMat_B_2_A(poseX20List[(k - j) - 1],
                                       poseX20List[(k - j)])  # Use last two
            abgxyzB2A = kitti._get_params_from_tmat(pose_B2A)
            bit = kitti.get_multi_bit_target(abgxyzB2A, BIN_rng, BIN_SIZE)
            poseB2AList.append(abgxyzB2A)
            bitB2AList.append(bit)
        else:
            # numTuples are read and ready to be dumped on permanent memory
            fileID = [100 + int(seqID), 100000 + j,
                      100000 + (k)]  # k=j+(numTuples-1)
            odometery_writer(
                fileID,  # 3 ints
                xyziList,  # ntuplex3xPCL_COLS
                imgDepthList,  # ntuplex128x512
                poseB2AList,  # (ntuple-1)x6
                bitB2AList,  # (ntuple-1)x6x32
                tfRecFolder,
                numTuples)
            # Oldest smaple is to be forgotten
            xyziList.pop(0)
            imgDepthList.pop(0)
            poseB2AList.pop(0)
            bitB2AList.pop(0)
            poseX20List.pop(0)

    print("SeqID completed : ", seqID)
    return