예제 #1
0
def pcl_from_range_image(frame, lidar_name):

    # extract lidar data and range image
    lidar = waymo_utils.get(frame.lasers, lidar_name)
    range_image, camera_projection, range_image_pose = waymo_utils.parse_range_image_and_camera_projection(lidar)    # Parse the top laser range image and get the associated projection.

    # Convert the range image to a point cloud
    lidar_calib = waymo_utils.get(frame.context.laser_calibrations, lidar_name)
    pcl, pcl_attr = project_to_pointcloud(frame, range_image, camera_projection, range_image_pose, lidar_calib)

    # stack point cloud and lidar intensity
    points_all = np.column_stack((pcl, pcl_attr[:, 1]))

    return points_all
예제 #2
0
def extract_front_camera_image(frame):
    # extract camera and calibration from frame
    camera_name = dataset_pb2.CameraName.FRONT
    camera = waymo_utils.get(frame.images, camera_name)

    # get image and convert tom RGB
    image = waymo_utils.decode_image(camera)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

    return image
def show_range_image(frame):

    ####### ID_S1_EX1 START #######
    #######
    print("student task ID_S1_EX1")

    # step 1 : extract lidar data and range image for the roof-mounted lidar == Done
    lidar_data = waymo_utils.get(frame.lasers, dataset_pb2.LaserName.TOP)
    range_image, camera_projection, range_image_pose = waymo_utils.parse_range_image_and_camera_projection(
        lidar_data)

    # step 2 : extract the range and the intensity channel from the range image == Done
    range_image = range_image[:, :, :2]

    # step 3 : set values <0 to zero == Done
    range_image[range_image < 0] = 0

    # step 4 : map the range channel onto an 8-bit scale and make sure that the full range of values is appropriately considered == Done
    range_channel = range_image[:, :, 0]
    range_channel = (range_channel / range_channel.max() * 255).astype(
        np.uint8)

    # step 5 : map the intensity channel onto an 8-bit scale and normalize with the difference between the 1- and 99-percentile to mitigate the influence of outliers == Done
    intensity_channel = range_image[:, :, 1]
    per1, per99 = np.percentile(intensity_channel, [1, 99])
    intensity_channel[intensity_channel > per99] = per99

    intensity_channel = (intensity_channel / (per99 - per1) * 255).astype(
        np.uint8)
    # step 6 : stack the range and intensity image vertically using np.vstack and convert the result to an unsigned 8-bit integer == Done

    img_range_intensity = np.vstack([range_channel, intensity_channel])
    h, w = img_range_intensity.shape
    img_range_intensity = img_range_intensity[:,
                                              int((w / 4)):int((w * (3 / 4)))]
    #######
    ####### ID_S1_EX1 END #######

    return img_range_intensity
예제 #4
0
            cnt_frame = cnt_frame + 1
            continue
        elif cnt_frame > show_only_frames[1]:
            print('reached end of selected frames')
            break

        print('------------------------------')
        print('processing frame #' + str(cnt_frame))

        #################################
        ## Perform 3D object detection

        ## Extract calibration data and front camera image from frame
        lidar_name = dataset_pb2.LaserName.TOP
        camera_name = dataset_pb2.CameraName.FRONT
        lidar_calibration = waymo_utils.get(frame.context.laser_calibrations,
                                            lidar_name)
        camera_calibration = waymo_utils.get(frame.context.camera_calibrations,
                                             camera_name)
        if 'load_image' in exec_list:
            image = tools.extract_front_camera_image(frame)

        ## Compute lidar point-cloud from range image
        if 'pcl_from_rangeimage' in exec_list:
            print('computing point-cloud from lidar range image')
            lidar_pcl = tools.pcl_from_range_image(frame, lidar_name)
        else:
            print('loading lidar point-cloud from result file')
            lidar_pcl = load_object_from_file(results_fullpath, data_filename,
                                              'lidar_pcl', cnt_frame)

        ## Compute lidar birds-eye view (bev)