fr_cam_rec = nusc.get('sample_data', sample_rec['data']['CAM_FRONT_RIGHT']) fl_cam_rec = nusc.get('sample_data', sample_rec['data']['CAM_FRONT_LEFT']) br_cam_rec = nusc.get('sample_data', sample_rec['data']['CAM_BACK_RIGHT']) bl_cam_rec = nusc.get('sample_data', sample_rec['data']['CAM_BACK_LEFT']) # ann_recs = [nusc.get('sample_annotation', token) for token in sample_rec['anns']] has_more_frames = True while has_more_frames: ## FRONT impath, boxes, camera_intrinsic = nusc.get_sample_data( f_cam_rec['token']) points_f, coloring_f, im1 = nusc.explorer.map_pointcloud_to_image( f_rad_rec['token'], f_cam_rec['token']) points_fr, coloring_fr, im2 = nusc.explorer.map_pointcloud_to_image( fr_rad_rec['token'], f_cam_rec['token']) points_fl, coloring_fl, im3 = nusc.explorer.map_pointcloud_to_image( fl_rad_rec['token'], f_cam_rec['token']) print(points_fr) print(points_fl) ax1 = fig.add_subplot(1, 2, 1) ax2 = fig.add_subplot(1, 2, 2) # ax1.imshow(img1) ax1.imshow(im1) # nusc.render_sample_data(f_cam_rec['token'], True, ax=ax1) ax1.scatter(points_f[0, :], points_f[1, :], c=coloring_f, s=5)
scene_rec = nusc.get('scene', scene['token']) sample_rec = nusc.get('sample', scene_rec['first_sample_token']) f_rad_rec = nusc.get('sample_data', sample_rec['data']['RADAR_FRONT']) f_cam_rec = nusc.get('sample_data', sample_rec['data']['CAM_FRONT']) has_more_frames = True while has_more_frames: bboxes = [] ax1 = fig.add_subplot(1,1,1) # ax2 = fig.add_subplot(1,2,2) camera_token = f_cam_rec['token'] radar_token = f_rad_rec['token'] ## FRONT CAM + RADAR impath, boxes, camera_intrinsic = nusc.get_sample_data(camera_token) points, coloring, image = nusc.explorer.map_pointcloud_to_image(radar_token, camera_token) points[2, :] = coloring # Plot the 3D boxes and create 2D COCO bboxes # for box in boxes: # coco_cat, cat_id, coco_supercat = nuscene_cat_to_coco(box.name) # if coco_cat is None: # continue # c = np.array(nusc.explorer.get_color(box.name)) / 255.0 # box.render(ax1, view=camera_intrinsic, normalize=True, colors=[c, c, c]) # bboxes.append(box.to_coco_bbox(camera_intrinsic, image.size)) # # Plot the 3D boxes # nusc.render_sample_data(camera_token, ax=ax1)
output_path = 'tmp/' # iterate over all 3977 samples for idx, sample in enumerate(nusc.sample): sample_token = sample['token'] sample_record = nusc.get('sample', sample_token) selected_data = {} for channel, sample_data_token in sample_record['data'].items(): sample_data_record = nusc.get('sample_data', sample_data_token) sensor_modality = sample_data_record['sensor_modality'] if sensor_modality in ['lidar']: # create output directory os.makedirs(output_path + channel, exist_ok=True) selected_data[channel] = sample_data_token # boxes returned are transformed from global to camera/LIDAR space within get_sample_data data_path, boxes, camera_intrinsic = nusc.get_sample_data( sample_data_token, box_vis_level=BoxVisibility.IN_FRONT) with open(output_path + channel + '/' + str(idx).zfill(6) + '.txt', 'w') as file_writer: for box in boxes: category = box.name truncated = 0 occluded = 0 alpha = 0 width = box.wlh[0] length = box.wlh[1] height = box.wlh[2] posx = box.center[0] posy = box.center[1] posz = box.center[2] rotation_y = box.orientation.angle xmin = 0