def select_annotation_boxes(sample_token, lyftd: LyftDataset, box_vis_level: BoxVisibility = BoxVisibility.ALL, camera_type=['CAM_FRONT', 'CAM_BACK', 'CAM_FRONT_LEFT', 'CAM_FRONT_RIGHT', 'CAM_BACK_RIGHT', 'CAM_BACK_LEFT']) -> (str, str, Box): """ Select annotations that is a camera image defined by box_vis_level :param sample_token: :param box_vis_level:BoxVisbility.ALL or BoxVisibility.ANY :param camera_type: a list of camera that the token should be selected from :return: yield (sample_token,cam_token, Box) """ sample_record = lyftd.get('sample', sample_token) cams = [key for key in sample_record["data"].keys() if "CAM" in key] cams = [cam for cam in cams if cam in camera_type] for ann_token in sample_record['anns']: for cam in cams: cam_token = sample_record["data"][cam] _, boxes_in_sensor_coord, cam_intrinsic = lyftd.get_sample_data( cam_token, box_vis_level=box_vis_level, selected_anntokens=[ann_token] ) if len(boxes_in_sensor_coord) > 0: assert len(boxes_in_sensor_coord) == 1 box_in_world_coord = lyftd.get_box(boxes_in_sensor_coord[0].token) yield sample_token, cam_token, box_in_world_coord
def read_frustum_pointnet_output(ldt: LyftDataset, inference_pickle_file, token_pickle_file, from_rgb_detection: bool): with open(inference_pickle_file, 'rb') as fp: ps_list = pickle.load(fp) if not from_rgb_detection: seg_list = pickle.load(fp) segp_list = pickle.load(fp) center_list = pickle.load(fp) heading_cls_list = pickle.load(fp) heading_res_list = pickle.load(fp) size_cls_list = pickle.load(fp) size_res_list = pickle.load(fp) rot_angle_list = pickle.load(fp) score_list = pickle.load(fp) if from_rgb_detection: onehot_list = pickle.load(fp) with open(token_pickle_file, 'rb') as fp: sample_token_list = pickle.load(fp) annotation_token_list = pickle.load(fp) camera_data_token_list = pickle.load(fp) type_list = pickle.load(fp) ldt.get('sample', sample_token_list[0]) if annotation_token_list: ldt.get('sample_annotation', annotation_token_list[0]) print("lengh of sample token:", len(sample_token_list)) print("lengh of ps list:", len(ps_list)) assert len(sample_token_list) == len(ps_list) boxes = [] gt_boxes = [] for data_idx in range(len(ps_list)): inferred_box = get_box_from_inference( lyftd=ldt, heading_cls=heading_cls_list[data_idx], heading_res=heading_res_list[data_idx], rot_angle=rot_angle_list[data_idx], size_cls=size_cls_list[data_idx], size_res=size_res_list[data_idx], center_coord=center_list[data_idx], sample_data_token=camera_data_token_list[data_idx], score=score_list[data_idx]) inferred_box.name = type_list[data_idx] boxes.append(inferred_box) if not from_rgb_detection: gt_boxes.append(ldt.get_box(annotation_token_list[data_idx])) return boxes, gt_boxes, sample_token_list
axes_str = ['X', 'Y', 'Z'] display_frame_statistics(pc.points, points=0.5) lyftdata.render_sample_data(lidar_top['token'], with_anns=False) ## Ego pose lyftdata.get('ego_pose', cam_front['ego_pose_token']) ## caliberated_sensor lyftdata.get('calibrated_sensor', cam_front['calibrated_sensor_token']) ## Annotations my_annotation = lyftdata.get('sample_annotation', my_sample['anns'][0]) my_box = lyftdata.get_box(my_annotation['token']) lyftdata.render_annotation(my_annotation['token'], margin=10) ## Attributes my_attribute1 = lyftdata.get('attribute', my_annotation['attribute_tokens'][0]) my_attribute2 = lyftdata.get('attribute', my_annotation['attribute_tokens'][1]) ## Instances my_instance = lyftdata.get('instance', my_annotation['instance_token']) lyftdata.render_instance(my_instance['token']) print("First annotated sample of this instance:") lyftdata.render_annotation(my_instance['first_annotation_token']) print("Last annotated sample of this instance") lyftdata.render_annotation(my_instance['last_annotation_token'])