Exemplo n.º 1
0
def do_output(segmentPath, globalParams):
    segmentName = pipeline_commons.extractSegmentNameFromPath(segmentPath)
    segmentOutputMinimalPath = os.path.join(globalParams.MINIMAL_OUTPUT_PATH,
                                            segmentName)
    segmentOutputFullPath = os.path.join(globalParams.BASE_OUTPUT_PATH,
                                         segmentName)

    if not os.path.exists(segmentOutputMinimalPath):
        os.makedirs(segmentOutputMinimalPath, exist_ok=True)

    # PAIRs of: (filename to copy, optional or not)
    filesToCopyToOutputMin = [
        (ReconstructionUtils.FILENAME_CARS_TRAJECTORIES, False),
        (ReconstructionUtils.FILENAME_PEOPLE_TRAJECTORIES, False),
        (ReconstructionUtils.FILENAME_CARLA_BBOXES, True),
        (ReconstructionUtils.FILENAME_COMBINED_CARLA_ENV_POINTCLOUD, False),
        (ReconstructionUtils.FILENAME_COMBINED_CARLA_ENV_POINTCLOUD_SEGCOLOR,
         True), (ReconstructionUtils.FILENAME_CENTERING_ENV, False),
        (ReconstructionUtils.FILENAME_CAMERA_INTRISICS, True)
    ]

    for fileToCopy in filesToCopyToOutputMin:
        optional = fileToCopy[1]
        filename = fileToCopy[0]

        srcFullFilePath = os.path.join(segmentOutputFullPath, filename)
        dstFullFilePath = os.path.join(segmentOutputMinimalPath, filename)

        if os.path.exists(srcFullFilePath) == False:
            if optional == False:
                assert False, (
                    f"Can't copy filename {filename} because it doesn't exists !"
                )
        else:
            shutil.copyfile(srcFullFilePath, dstFullFilePath)
Exemplo n.º 2
0
def do_3DReconstruction(segmentPath, globalParams):
    segmentName = pipeline_commons.extractSegmentNameFromPath(segmentPath)
    folderOutput = os.path.join(globalParams.POINTCLOUD_OUTPUT_BASEFILEPATH,
                                segmentName)

    class DatasetOptions:
        def __init__(self):
            self.dataAlreadyInWorldSpace = True
            self.framesIndicesCaptured = list(
                np.arange(int(globalParams.FRAMEINDEX_MIN),
                          int(globalParams.FRAMEINDEX_MAX) + 1)
            )  # TODO: fix number of frames correctly by reading metadata !
            self.LIMIT_FRAME_NUMBER = globalParams.FRAMEINDEX_MAX
            # How many frames modulo to skip for reconstruction (if needed
            self.frameSkip = globalParams.FRAMES_SKIP
            self.numInitialFramesToIgnore = (
                globalParams.FRAMEINDEX_MIN - 1
            )  # How many frames in the beginning of the scene to ignore

    options = DatasetOptions()
    reconstruction, people_rec, cars_rec, scale, ped_dict, cars_2D, people_2D, valid_ids, cars_dict, init_frames, init_frames_cars, heroCarDetails = ReconstructionUtils.reconstruct3D_ply(
        folderOutput,
        local_scale_x=globalParams.SCALE_USED_IN_FINAL_RECONSTRUCTION,
        recalculate=globalParams.FORCE_RECOMPUTE,
        datasetOptions=options,
        params=globalParams)
def runSegmentationOps(segmentPath, globalParams):

    # Step 1: Do segmentation on all
    # Define base segmentation modelBaseParams such as model and configs
    segmentName = pipeline_commons.extractSegmentNameFromPath(segmentPath)
    #print(f"================= Segment {segmentName} ===============")

    # Setup segmentation for this segment from dataset
    segmentFiles_InputPath      = os.path.join(globalParams.SEG_INPUT_IMAGES_BASEPATH, segmentName, globalParams.SEG_INPUT_IMAGES_RGBFOLDER)
    segmentFiles_OutputPath     = os.path.join(globalParams.SEG_OUTPUT_LABELS_BASEFILEPATH, segmentName, globalParams.SEG_OUTPUT_LABELS_SEGFOLDER)
    segmentFiles_OutputCompPath = os.path.join(globalParams.SEG_OUTPUTCOMP_LABELS_BASEFILEPATH, segmentName, globalParams.SEG_OUTPUTCOMP_LABELS_RGBFOLDER)

    modelParams = []
    modelParams.extend(["--imgs", segmentFiles_InputPath])
    modelParams.extend(["--resourcesFolderPath", globalParams.SEGMENTATION_SETUP_DATA_PATH])

    if globalParams.USE_GPU_FOR_SEGMENTATION is not None:
        modelParams.extend(["--gpu", str(globalParams.USE_GPU_FOR_SEGMENTATION)])

    modelConfigPath = os.path.join(globalParams.SEGMENTATION_SETUP_DATA_PATH, "config/ade20k-resnet50dilated-ppm_deepsup.yaml")
    modelParams.extend(["--cfg", modelConfigPath])
    modelDirPath = os.path.join(globalParams.SEGMENTATION_SETUP_DATA_PATH, "ade20k-resnet50dilated-ppm_deepsup")
    modelCheckPoint = "epoch_20.pth"
    modelParams.extend(["DIR", modelDirPath])
    modelParams.extend(["TEST.checkpoint", modelCheckPoint])
    modelParams.extend(["TEST.result", segmentFiles_OutputPath])
    modelParams.extend(["TEST.resultComp", segmentFiles_OutputCompPath])
    modelParams.extend(["TEST.saveOnlyLabels", '1'])
    modelParams.extend(["DATASET.scaleFactor", '2.0'])

    # Create functors and run extraction
    extractFunctor, decisionFunctor = defineLabelsExtractor(5, globalParams)  # 5 images per frame expected
    SemanticSegSupport.runTestSample(modelParams, extractFunctor, decisionFunctor, globalParams.FORCE_RECOMPUTE)

    # The output of the above process are the pkl files labels_frameId, a dictionary indexed by camera id (starting from 0)
    # where each entry contains the segmented labels for that picture in the original image space (height,width)


    if len(outputDict) != 0:
        print("Missing frames ", outputDict.keys())
        assert False, (f"Incomplete sequence ! some frames are still there")
    print(f"Summary of inference: numFrames: {numFramesExtracted}")
def do_RGBExtraction(segmentPath, globalParams):
    segmentName = pipeline_commons.extractSegmentNameFromPath(segmentPath)
    assert os.path.exists(
        segmentPath), f'The file you specified {segmentPath} doesn\'t exist !'

    # 1. Iterate over frame by frame of a segment
    dataset = tf.data.TFRecordDataset(segmentPath, compression_type='')

    numFrames = 0
    for index, data in enumerate(dataset):
        numFrames += 1
    print(f"Num frames in {segmentName}: ", numFrames)

    allRGBImagesDict = {}
    for index, data in enumerate(dataset):
        if (globalParams.FRAMEINDEX_MIN > index) and (
                index != 0):  # We need first frame reference point !!:
            continue
        if globalParams.FRAMEINDEX_MAX < index:
            break

        if (index % pipeline_commons.FRAMESINFO_IMGS_DEBUG_RATE == 0):
            pipeline_commons.globalLogger.info(
                f"Parsing frame {index}/{numFrames}")

        # Read the frame in bytes
        frame = open_dataset.Frame()
        frame.ParseFromString(bytearray(data.numpy()))

        # Gather and decode all RGB images from this frame to the global store
        segInputFolder = os.path.join(globalParams.SEG_INPUT_IMAGES_BASEPATH,
                                      segmentName,
                                      globalParams.SEG_INPUT_IMAGES_RGBFOLDER)
        gatherImagesFromFrame(frame,
                              index,
                              allRGBImagesDict,
                              segInputFolder,
                              forceRecompute=globalParams.FORCE_RECOMPUTE)

        saveImagesAsSegmentationInput(allRGBImagesDict, segInputFolder)
Exemplo n.º 5
0
        def __init__(self):
            self.dataAlreadyInWorldSpace = True
            self.framesIndicesCaptured = list(
                np.arange(int(globalParams.FRAMEINDEX_MIN),
                          int(globalParams.FRAMEINDEX_MAX) + 1)
            )  # TODO: fix number of frames correctly by reading metadata !
            self.LIMIT_FRAME_NUMBER = globalParams.FRAMEINDEX_MAX
            # How many frames modulo to skip for reconstruction (if needed
            self.frameSkip = globalParams.FRAMES_SKIP
            self.numInitialFramesToIgnore = (
                globalParams.FRAMEINDEX_MIN - 1
            )  # How many frames in the beginning of the scene to ignore

    options = DatasetOptions()
    reconstruction, people_rec, cars_rec, scale, ped_dict, cars_2D, people_2D, valid_ids, cars_dict, init_frames, init_frames_cars, heroCarDetails = ReconstructionUtils.reconstruct3D_ply(
        folderOutput,
        local_scale_x=globalParams.SCALE_USED_IN_FINAL_RECONSTRUCTION,
        recalculate=globalParams.FORCE_RECOMPUTE,
        datasetOptions=options,
        params=globalParams)


if __name__ == "__main__":
    import pipeline_params
    pipeline_params.globalParams.FRAMEINDEX_MIN = 0
    pipeline_params.globalParams.FRAMEINDEX_MAX = 5
    segmentName = pipeline_commons.extractSegmentNameFromPath(
        pipeline_params.FILENAME_SAMPLE[0])

    do_3DReconstruction(segmentName, pipeline_params.globalParams)
Exemplo n.º 6
0
def workFunc(workData):
    allTasksInBatch = workData[0]
    indicesForThisWorker = workData[1]
    params = workData[2]
    stagesToRun = workData[3]
    id = workData[4]
    globalLogger.info(
        f"Id {id} got task to solve indices {indicesForThisWorker}")

    forceRecompute = params.FORCE_RECOMPUTE

    def writeCheckpoint(filenames: str, checkpointType: PipelineStages):
        with open(filenames[checkpointType], "w") as outputCheckpointHandle:
            #outputCheckpointHandle.write("")
            pass

    def hasCheckpoint(filenames: str, checkpointType: PipelineStages):
        return os.path.exists(filenames[checkpointType])

    for segment_index in indicesForThisWorker:
        # print("Line {}: {}".format(cnt, segmentPath.strip()))
        segmentPath = allTasksInBatch[segment_index]
        globalLogger.info(
            f"$$$$$$$$$$$$ Id {id} Processing segment index: {segment_index}, path: {segmentPath} $$$$$$$$$$$$"
        )

        segmentName = pipeline_commons.extractSegmentNameFromPath(segmentPath)
        outputCheckpointFilePrefix = os.path.join(
            params.BASE_OUTPUT_PATH,
            f"{COMPLETED_CHECKPOINT_PREFIXNAME}{segmentName}_")
        outputCheckpointFileNames = {}
        for item in PipelineStages:
            outputCheckpointFileNames[item] = outputCheckpointFilePrefix + str(
                item.name)

        start_time = time.time()
        if not isSegmentationTheOnlyStage(stagesToRun):
            #print("Finding num frames for ", stagesToRun)
            numFrames = pipeline_commons.getNumFramesInSegmentPath(segmentPath)
            params.FRAMEINDEX_MAX = min(params.FRAMEINDEX_MAX, numFrames - 1)
            params.FRAMEINDEX_MIN = max(0, params.FRAMEINDEX_MIN)
            globalLogger.info(
                f"Id {id} In segment index {segment_index} - There are {numFrames} frames. Starting the pipeline process between frames {pipeline_params.globalParams.FRAMEINDEX_MIN}-{pipeline_params.globalParams.FRAMEINDEX_MAX}, recompute: {pipeline_params.globalParams.FORCE_RECOMPUTE}"
            )
        else:
            globalLogger.info(
                f"Id {id} Starting the pipeline process between frames {pipeline_params.globalParams.FRAMEINDEX_MIN}-{pipeline_params.globalParams.FRAMEINDEX_MAX}, recompute: {pipeline_params.globalParams.FORCE_RECOMPUTE}"
            )

        hackEnabled = False
        if hackEnabled:
            writeCheckpoint(outputCheckpointFileNames,
                            PipelineStages.PIPELINE_RGB)

        if PipelineStages.PIPELINE_RGB in stagesToRun:
            if forceRecompute == True or not hasCheckpoint(
                    outputCheckpointFileNames, PipelineStages.PIPELINE_RGB):
                globalLogger.log(logging.INFO,
                                 f"Id {id} ##Stage 1: RGB Image extraction")
                pipeline_extractRGBImages.do_RGBExtraction(segmentPath, params)
                writeCheckpoint(outputCheckpointFileNames,
                                PipelineStages.PIPELINE_RGB)
            else:
                globalLogger.log(logging.INFO,
                                 f"Id {id} ##Stage 1: Already done !")

        if PipelineStages.PIPELINE_SEMANTIC in stagesToRun:
            if forceRecompute == True or not hasCheckpoint(
                    outputCheckpointFileNames,
                    PipelineStages.PIPELINE_SEMANTIC):
                globalLogger.log(logging.INFO,
                                 f"Id {id} ##Stage 2: Segmentation")
                pipeline_segmentation.runSegmentationOps(segmentPath, params)
                writeCheckpoint(outputCheckpointFileNames,
                                PipelineStages.PIPELINE_SEMANTIC)
            else:
                globalLogger.log(logging.INFO,
                                 f"Id {id} ##Stage 2: Already done !")

        # time.sleep(10)
        if PipelineStages.PIPELINE_EXTRACTION in stagesToRun:
            if forceRecompute == True or not hasCheckpoint(
                    outputCheckpointFileNames,
                    PipelineStages.PIPELINE_EXTRACTION):
                globalLogger.log(
                    logging.INFO,
                    f"Id {id} ##Stage 3: Peoples and vehicle trajectories extraction"
                )
                pipeline_extractPeopleAndCars.do_PeopleAndVehiclesExtraction(
                    segmentPath, params)
                writeCheckpoint(outputCheckpointFileNames,
                                PipelineStages.PIPELINE_EXTRACTION)
            else:
                globalLogger.log(logging.INFO,
                                 f"Id {id} ##Stage 3: Already done !")

        if PipelineStages.PIPELINE_POINTCLOUD in stagesToRun:
            if forceRecompute == True or not hasCheckpoint(
                    outputCheckpointFileNames,
                    PipelineStages.PIPELINE_POINTCLOUD):
                globalLogger.log(
                    logging.INFO,
                    f"Id {id} ##Stage 4: Point cloud reconstruction. Part 1: for environment"
                )
                pipeline_pointCloudReconstruction.do_PointCloudReconstruction(
                    segmentPath, params, useEnvironmentPoints=True)

                if params.POINT_CLOUD_FOR_MOTION_FRAMES:
                    globalLogger.log(
                        logging.INFO,
                        f"Id {id} ##Stage 4: Point cloud reconstruction. Part 2: for motion stuff"
                    )
                    pipeline_pointCloudReconstruction.do_PointCloudReconstruction(
                        segmentPath, params, useEnvironmentPoints=False)

                writeCheckpoint(outputCheckpointFileNames,
                                PipelineStages.PIPELINE_POINTCLOUD)
            else:
                globalLogger.log(logging.INFO,
                                 f"Id {id} ##Stage 4: Already done !")

        if PipelineStages.PIPELINE_3DRECONSTRUCTION in stagesToRun:
            if forceRecompute == True or not hasCheckpoint(
                    outputCheckpointFileNames,
                    PipelineStages.PIPELINE_3DRECONSTRUCTION):
                globalLogger.log(
                    logging.INFO,
                    f"Id {id} ##Stage 5: 3D Scene reconstruction")
                pipeline_3DReconstruction.do_3DReconstruction(
                    segmentPath, params)
                writeCheckpoint(outputCheckpointFileNames,
                                PipelineStages.PIPELINE_3DRECONSTRUCTION)
            else:
                globalLogger.log(logging.INFO,
                                 f"Id {id} ##Stage 5: Already done !")

        if PipelineStages.PIPELINE_OUTPUT in stagesToRun:
            if forceRecompute == True or not hasCheckpoint(
                    outputCheckpointFileNames, PipelineStages.PIPELINE_OUTPUT):
                globalLogger.log(logging.INFO,
                                 f"Id {id} ##Stage 6: Copy files to output")
                pipeline_outputProcessing.do_output(segmentPath, params)
                writeCheckpoint(outputCheckpointFileNames,
                                PipelineStages.PIPELINE_OUTPUT)
            else:
                globalLogger.log(logging.INFO,
                                 f"Id {id} ##Stage 6: Already done !")

        end_time = time.time()
        globalLogger.log(
            logging.INFO,
            f"Id {id} $$$ Processing of index {segment_index} finished, time spent {(end_time - start_time)}"
        )
    return 0
def do_PointCloudReconstruction(segmentPath,
                                globalParams,
                                useEnvironmentPoints=True):
    setupGlobals(globalParams)
    # These are the transformation from world frame to the reference vehicle frame (first pose of the vehicle in the world)
    worldToReferencePointTransform = None

    assert os.path.exists(segmentPath), (
        f'The file you specified {segmentPath} doesn\'t exist !')

    segmentName = pipeline_commons.extractSegmentNameFromPath(segmentPath)
    segmentedDataPath = os.path.join(
        globalParams.SEG_OUTPUT_LABELS_BASEFILEPATH, segmentName,
        globalParams.SEG_OUTPUT_LABELS_SEGFOLDER)

    # Reset some things that shouldn't be persistent between episodes
    global DEBUG_origSegLabel_Stats
    DEBUG_origSegLabel_Stats = {}

    # 1. Iterate over frame by frame of a segment
    dataset = tf.data.TFRecordDataset(segmentPath, compression_type='')
    for frameIndex, data in enumerate(dataset):
        if frameIndex != 0:  # We need first frame to get the camera reference point
            if (
                    globalParams.FRAMEINDEX_MIN is not None
                    and frameIndex < globalParams.FRAMEINDEX_MIN
            ) and frameIndex != 0:  # We need first frame reference point !!
                continue
            if globalParams.FRAMEINDEX_MAX is not None and frameIndex > globalParams.FRAMEINDEX_MAX:
                break

        # Check if the files already exists
        folderOutput = os.path.join(
            globalParams.POINTCLOUD_OUTPUT_BASEFILEPATH, segmentName)
        outputFramePath_rgb = os.path.join(
            folderOutput, ("{0:05d}.ply").format(frameIndex)
            if useEnvironmentPoints == True else
            ("{0:05d}_motion.ply").format(frameIndex))
        outputFramePath_seg = os.path.join(
            folderOutput, ("{0:05d}_seg.ply").format(frameIndex)
            if useEnvironmentPoints == True else
            ("{0:05d}_motionseg.ply").format(frameIndex))
        outputFramePath_segColored = os.path.join(
            folderOutput, ("{0:05d}_segColor.ply").format(frameIndex)
            if useEnvironmentPoints == True else
            ("{0:05d}_motionsegColor.ply").format(frameIndex))

        if frameIndex != 0 and globalParams.FORCE_RECOMPUTE == False and \
            os.path.exists(outputFramePath_rgb) and \
            os.path.exists(outputFramePath_seg) and \
            os.path.exists(outputFramePath_segColored):
            continue

        if frameIndex % pipeline_commons.FRAMESINFO_IMGS_DEBUG_RATE == 0:
            pipeline_commons.globalLogger.info(
                f"Extracted point cloud for frames {frameIndex}/{globalParams.FRAMEINDEX_MAX}..."
            )

        # Step 1: Read the frame in bytes
        # -------------------------------------------
        frame = open_dataset.Frame()
        frame.ParseFromString(bytearray(data.numpy()))

        # All point cloud data is relative to the car with sensors attached
        # We can transform to world positions using frame.pose.transform.
        # But to get coordinates back to the reference frame - WHICH IS THE INITIAL POSITION OF THE CAR - we use the below inverse matrix
        frame_thisPose = np.reshape(
            np.array(frame.pose.transform).astype(np.float32), [4, 4])
        if frameIndex == 0:
            worldToReferencePointTransform = np.linalg.inv(frame_thisPose)
        # -------------------------------------------

        # Step 2: Process the frame and get the 3d lidar points and camera projected points from the frame, for each return
        # -------------------------------------------
        points_byreturn, cp_points_byreturn = getPointCloudPointsAndCameraProjections(
            frame,
            frame_thisPose,
            worldToReferencePointTransform,
            useEnvPoints=useEnvironmentPoints)
        # -------------------------------------------

        # Step 3: iterate over all point cloud data and camera projections and fill a dictionary with each 3D point data
        # Here we gather all ply data in format: {(x,y,z) : [r g b label]], for all points in the point cloud.
        # Why dictionary ? Because we might want to discretize from original space to a lower dimensional space so same x,y,z from original data might go into the same chunk
        # -------------------------------------------
        plyDataPoints = {}

        # Read the RGB images and segmentation labels corresponding for this frame
        RGB_Data = readRGBDataForFrame(frameIndex, segmentName, globalParams)
        SEG_Data = readSEGDataForFrame(frameIndex, segmentName, globalParams)

        # Sort images by key index
        images = sorted(frame.images, key=lambda i: i.name)

        # For each return
        for returnIndex in returnsIndicesToUse:
            # Put together [projected camera data, 3D points] on the same row, convert to tensor
            cp_points_all_concat = np.concatenate([
                cp_points_byreturn[returnIndex], points_byreturn[returnIndex]
            ],
                                                  axis=-1)
            cp_points_all_concat_tensor = tf.constant(cp_points_all_concat)

            # Compute the distance between lidar points and vehicle frame origin for each 3d point
            distances_all_tensor = tf.norm(
                points_byreturn[returnIndex], axis=-1, keepdims=True
            ) if IS_RANGE_COLOR_ShOWING_ENABLED is True else None

            # Create a tensor with all projected [projected camera data] and one with 3d points
            cp_points_all_tensor = tf.constant(cp_points_byreturn[returnIndex],
                                               dtype=tf.int32)
            points_3D_all_tensor = tf.constant(points_byreturn[returnIndex],
                                               dtype=tf.float32)

            # For each camera image index
            image_projections_indices = [0, 1]
            for imageProjIndex in image_projections_indices:
                for image_index in cameraindices_to_use:
                    assert len(images) == len(cameraindices_to_use), (
                        "looks like you need to add more data in image_projection_indices and see if I didn't do any hacks with it ! Waymo data changed"
                    )

                    # A mask with True where the camera projection points where on this image index
                    mask = tf.equal(
                        cp_points_all_tensor[...,
                                             0 if imageProjIndex == 0 else 3],
                        images[image_index].name)

                    # Now select only camera projection data and 3D points in vehicle frame associated with this camera index
                    cp_points_all_tensor_camera = tf.cast(tf.gather_nd(
                        cp_points_all_tensor, tf.where(mask)),
                                                          dtype=tf.float32)
                    points_3D_all_tensor_camera = tf.cast(tf.gather_nd(
                        points_3D_all_tensor, tf.where(mask)),
                                                          dtype=tf.float32)

                    # Select only the cp points associated with the iterated camera projection index
                    cp_points_all_tensor_camera_byProjIndex = cp_points_all_tensor_camera[...,1:3] if imageProjIndex == 0 \
                                                                  else cp_points_all_tensor_camera[..., 4:6]

                    # Associate on each row one 3D point with its corresponding image camera projection (this index being iterated on)
                    # We have now on each row:  [(x,y,z, camX, camY)]
                    points3D_and_cp = tf.concat([
                        points_3D_all_tensor_camera,
                        cp_points_all_tensor_camera_byProjIndex
                    ],
                                                axis=-1).numpy()

                    # Gather these points in the output dictionary
                    processPoints(points3D_and_cp,
                                  plyDataPoints,
                                  imageCameraIndex=image_index,
                                  rgbData=RGB_Data,
                                  segData=SEG_Data,
                                  useEnvironmentPoints=useEnvironmentPoints,
                                  useRawCarlaSegLabels=False)

                    # Demo showing...
                    if IS_RANGE_COLOR_ShOWING_ENABLED:
                        distances_all_tensor_camera = tf.gather_nd(
                            distances_all_tensor, tf.where(mask))

                        # Associate on each row, with each x,y in camera space, the distance from vehicle frame to the 3D point lidar
                        projected_points_all_from_raw_data = tf.concat(
                            [cp_points_all_tensor_camera[..., 1:3],
                            distances_all_tensor_camera] if imageProjIndex == 0 else \
                                [cp_points_all_tensor_camera[..., 4:6], distances_all_tensor_camera],
                            axis=-1).numpy()

                        plot_points_on_image(
                            projected_points_all_from_raw_data,
                            images[image_index],
                            rgba,
                            point_size=5.0)

            # Add all point cloud points which are not not labeled (not found in a projected camera image)
            mask = tf.equal(
                cp_points_all_tensor[..., 0], 0
            )  # 0 is the index for unprojected camera point, on first cp index
            points_3D_all_unprojected_tensor = tf.cast(tf.gather_nd(
                points_3D_all_tensor, tf.where(mask)),
                                                       dtype=tf.float32)
            processPoints(points_3D_all_unprojected_tensor.numpy(),
                          plyDataPoints,
                          imageCameraIndex=NO_CAMERA_INDEX,
                          useRawCarlaSegLabels=False)

        plyDataPointsFlattened = convertDictPointsToList(plyDataPoints)

        save_3d_pointcloud_asRGB(
            plyDataPointsFlattened, outputFramePath_rgb
        )  # TODO: save one file with test_x.ply, using RGB values, another one test_x_seg.ply using segmented data.
        save_3d_pointcloud_asSegLabel(plyDataPointsFlattened,
                                      outputFramePath_seg)
        save_3d_pointcloud_asSegColored(plyDataPointsFlattened,
                                        outputFramePath_segColored)

        onFrameProcessedEnd(globalParams)
def do_PeopleAndVehiclesExtraction(segmentPath, globalParams):
    # 1. Iterate over frame by frame of a segment
    dataset = tf.data.TFRecordDataset(segmentPath, compression_type='')
    lidarLabels = None

    pedestrians_data = {}
    vehicle_data = {}
    useGlobalPoseTransform = True
    worldToReferencePointTransform = None

    for frameIndex, data in enumerate(dataset):
        if (globalParams.FRAMEINDEX_MIN > frameIndex
            ) and frameIndex != 0:  # We need first frame reference point !!:
            continue
        if globalParams.FRAMEINDEX_MAX < frameIndex:
            break

        # Read the frame in bytes
        frame = open_dataset.Frame()
        frame.ParseFromString(bytearray(data.numpy()))
        lidarLabels = frame.laser_labels

        # All cars and pedestrians are relative to the car with sensors attached
        # We can transform to world positions using frame.pose.transform.
        # But to get coordinates back to the reference frame - WHICH IS THE INITIAL POSITION OF THE CAR - we use the below inverse matrix
        frame_thisPose = np.reshape(
            np.array(frame.pose.transform).astype(np.float32), [4, 4])
        if frameIndex == 0:
            worldToReferencePointTransform = np.linalg.inv(frame_thisPose)

        #print(f'There are {len(lidarLabels)} lidar labels')

        # Transform all boxes for this frame to the global world coordinates and retain minMax points from each
        allBoxes = [None] * len(lidarLabels)
        for entityIndex, label in enumerate(lidarLabels):
            box = label.box
            allBoxes[entityIndex] = [
                box.center_x, box.center_y, box.center_z, box.length,
                box.width, box.height, box.heading
            ]

        allBoxesMinMax = get_min_max_3d_box_corners(
            allBoxes,
            vehicleToWorldTransform=frame_thisPose,
            worldToReferencePointTransform=worldToReferencePointTransform,
            useGlobalPoseTransform=useGlobalPoseTransform)
        assert allBoxesMinMax.shape[1] == 3 and allBoxesMinMax.shape[
            2] == 2, 'Incorrect format of data'

        # Write info for this frame
        if pedestrians_data.get(frameIndex) == None:
            pedestrians_data[frameIndex] = {}
        if vehicle_data.get(frameIndex) == None:
            vehicle_data[frameIndex] = {}

        for entityIndex, label in enumerate(lidarLabels):
            if label.type != label.TYPE_VEHICLE and label.type != label.TYPE_CYCLIST and label.type != label.TYPE_PEDESTRIAN:
                continue

            boxMinMax = allBoxesMinMax[entityIndex]

            isVehicle = label.type == label.TYPE_VEHICLE or label.type == label.TYPE_CYCLIST
            isPedestrian = label.type == label.TYPE_PEDESTRIAN
            targetOutputDict = vehicle_data[
                frameIndex] if isVehicle else pedestrians_data[frameIndex]

            assert targetOutputDict.get(
                label.id) is None, "This entity Id is already in the set !!"
            targetOutputDict[label.id] = {"BBMinMax": boxMinMax}

        #break

    #print("Vehicles...\n", vehicle_data)
    #print("Pedestrians...\n", pedestrians_data)

    # Save people.p and cars.p
    segmentName = pipeline_commons.extractSegmentNameFromPath(segmentPath)
    segmentFiles_OutputPath = os.path.join(
        globalParams.MOTION_OUTPUT_BASEFILEPATH, segmentName)

    filepathsAndDictionaries = {
        'pedestrians':
        (pedestrians_data, os.path.join(segmentFiles_OutputPath, "people.p")),
        'cars': (vehicle_data, os.path.join(segmentFiles_OutputPath, "cars.p"))
    }
    for key, value in filepathsAndDictionaries.items():
        dataObj = value[0]
        filePath = value[1]
        with open(filePath, mode="wb") as fileObj:
            pickle.dump(
                dataObj, fileObj, protocol=2
            )  # Protocol 2 because seems to be compatible between 2.x and 3.x !