def get_image(self, frame, idx): '''Get image ''' camera_name = dataset_pb2.CameraName.FRONT camera_calibration = utils.get(frame.context.camera_calibrations, camera_name) camera = utils.get(frame.images, camera_name) vehicle_to_image = utils.get_image_transform(camera_calibration) # Transformation img = utils.decode_image(camera) self.image=img return img
def display_labels_on_image(camera_calibration, camera, labels, display_time = -1): # Get the image transformation matrix vehicle_to_image = utils.get_image_transform(camera_calibration) # Decode the JPEG image img = utils.decode_image(camera) # Draw all the groundtruth labels for label in labels: utils.draw_3d_box(img, vehicle_to_image, label) # Display the image cv2.imshow("Image", img) cv2.waitKey(display_time)
def display_labels_on_image(camera_calibration, camera, labels, camera_labels, display_time = -1): # Get the image transformation matrix vehicle_to_image = utils.get_image_transform(camera_calibration) # Decode the JPEG image img = utils.decode_image(camera) # Draw all the groundtruth labels box_3d_to_2d = [] class_labels = [l.type for l in labels] for label in labels: x1, y1, x2, y2 = utils.get_3d_boxes_to_2d(img, vehicle_to_image, label) box_3d_to_2d += [x1, y1, x2, y2] utils.draw_3d_box(img, vehicle_to_image, label) utils.draw_3d_box(img, vehicle_to_image, label, draw_2d_bounding_box=True, colour=(0, 255, 0)) for label in camera_labels: utils.draw_2d_box(img, label, colour=(255, 0, 255)) # Display the image cv2.imshow("Image", img) cv2.waitKey(display_time)
# Parse the top laser range image and get the associated projection. ri, camera_projection, range_image_pose = utils.parse_range_image_and_camera_projection(laser) # Convert the range image to a point cloud. pcl, pcl_attr = utils.project_to_pointcloud(frame, ri, camera_projection, range_image_pose, laser_calibration) # Get the front camera information camera_name = dataset_pb2.CameraName.FRONT camera_calibration = utils.get(frame.context.camera_calibrations, camera_name) camera = utils.get(frame.images, camera_name) # Get the transformation matrix for the camera. vehicle_to_image = utils.get_image_transform(camera_calibration) # Decode the image img = utils.decode_image(camera) # Some of the labels might be fully hidden therefore we attempt to compute the label visibility # by counting the number of LIDAR points inside each label bounding box. # For each label, compute the transformation matrix from the vehicle space to the box space. vehicle_to_labels = [np.linalg.inv(utils.get_box_transformation_matrix(label.box)) for label in frame.laser_labels] vehicle_to_labels = np.stack(vehicle_to_labels) # Convert the pointcloud to homogeneous coordinates. pcl1 = np.concatenate((pcl,np.ones_like(pcl[:,0:1])),axis=1) # Transform the point cloud to the label space for each label. # proj_pcl shape is [label, LIDAR point, coordinates] proj_pcl = np.einsum('lij,bj->lbi', vehicle_to_labels, pcl1)
def export(self, camera_calibration, camera, labels, camera_labels, frame_id=0, sequence_id=None): assert (os.path.exists(self.export_folder)) # Get the image transformation matrix vehicle_to_image = utils.get_image_transform(camera_calibration) # Decode the JPEG image img = utils.decode_image(camera) # Draw all the groundtruth labels box_3d_to_2d = [] box_3d_class_labels = [] for label in labels: obj = utils.get_3d_boxes_to_2d(img, vehicle_to_image, label) if obj is None: continue box_3d_to_2d.append(list(map(int, obj['box']))) box_3d_class_labels.append(obj['label']) box_2d = [] box_2d_labels = [] if camera_labels is not None: for label in camera_labels: obj = utils.get_2d_boxes(label) if obj is None: continue box_2d.append(obj['box']) box_2d_labels.append(obj['label']) boxes = { '2d_boxes': { 'boxes': box_2d, 'label': box_2d_labels }, '3d_boxes': { 'boxes': box_3d_to_2d, 'label': box_3d_class_labels } } velocities = {x[0].name: x[1] for x in camera.velocity.ListFields()} pose_data = {x[0].name: list(x[1]) for x in camera.pose.ListFields()} camera_info = { 'id': '{}_{:05d}'.format(sequence_id, frame_id), 'name': camera.name, 'velocities': velocities, 'pose': pose_data, 'shutter': camera.shutter, 'pose_timestamp': camera.pose_timestamp, 'camera_trigger_time': camera.camera_trigger_time, 'camera_readout_done_time': camera.camera_readout_done_time, 'width': img.shape[1], 'height': img.shape[0], 'depth': img.shape[2] } if not os.path.exists(os.path.join(EXPORT_FOLDER, 'rgb', sequence_id)): mkdir_p(os.path.join(EXPORT_FOLDER, 'rgb', sequence_id)) if not os.path.exists(os.path.join(EXPORT_FOLDER, 'label', sequence_id)): mkdir_p(os.path.join(EXPORT_FOLDER, 'label', sequence_id)) if not os.path.exists( os.path.join(EXPORT_FOLDER, 'img_info', sequence_id)): mkdir_p(os.path.join(EXPORT_FOLDER, 'img_info', sequence_id)) with open( os.path.join(EXPORT_FOLDER, 'label', sequence_id, 'frame_{:05d}.json'.format(frame_id)), 'w') as f: json.dump(boxes, f) with open( os.path.join(EXPORT_FOLDER, 'img_info', sequence_id, 'frame_{:05d}.json'.format(frame_id)), 'w') as f: json.dump(camera_info, f) _filename = os.path.join(EXPORT_FOLDER, 'rgb', sequence_id, 'frame_{:05d}.numpy'.format(frame_id)) img.dump(_filename) self.executor.submit(convert_to_png, _filename)