def main(args): logger = logging.getLogger(__name__) dummy_nucoco_dataset = dummy_datasets.get_nucoco_dataset() cfg_orig = load_cfg(envu.yaml_dump(cfg)) ## Load image coco = COCO_PLUS(args.ann_file, args.imgs_dir) image_id = coco.dataset['images'][args.im_ind]['id'] img_path = os.path.join(args.imgs_dir, coco.imgs[image_id]["file_name"]) im = cv2.imread(img_path) ## Get the proposals for this image proposals = rrpn_loader(args.rpn_pkl) proposal_boxes = proposals[image_id]['boxes'] _proposal_scores = proposals[image_id]['scores'] workspace.ResetWorkspace() ## run models cls_boxes, cls_segms, cls_keyps = None, None, None for i in range(0, len(args.models_to_run), 2): pkl = args.models_to_run[i] yml = args.models_to_run[i + 1] cfg.immutable(False) merge_cfg_from_cfg(cfg_orig) merge_cfg_from_file(yml) if len(pkl) > 0: weights_file = pkl else: weights_file = cfg.TEST.WEIGHTS cfg.NUM_GPUS = 1 assert_and_infer_cfg(cache_urls=False) model = model_engine.initialize_model_from_cfg(weights_file) with c2_utils.NamedCudaScope(0): cls_boxes_, cls_segms_, cls_keyps_ = \ model_engine.im_detect_all(model, im, proposal_boxes) cls_boxes = cls_boxes_ if cls_boxes_ is not None else cls_boxes cls_segms = cls_segms_ if cls_segms_ is not None else cls_segms cls_keyps = cls_keyps_ if cls_keyps_ is not None else cls_keyps workspace.ResetWorkspace() out_name = os.path.join( args.output_dir, '{}'.format(os.path.basename(img_path) + '.pdf') ) logger.info('Processing {} -> {}'.format(img_path, out_name)) vis_utils.vis_one_image( im[:, :, ::-1], img_path, args.output_dir, cls_boxes, cls_segms, cls_keyps, dataset=dummy_nucoco_dataset, box_alpha=0.3, show_class=True, thresh=0.7, kp_thresh=2 )
##------------------------------------------------------------------------------ if __name__ == '__main__': args = parse_args() output_file = args.output_file boxes = [] scores = [] ids = [] img_ind = 0 out_dir = os.path.dirname(output_file) os.makedirs(out_dir, exist_ok=True) ## Load the nucoco dataset coco = COCO_PLUS(args.ann_file) for img_id, img_info in tqdm(coco.imgs.items()): img_ind += 1 if int(args.include_depth) == 1: proposals = np.empty((0, 5), np.float32) else: proposals = np.empty((0, 4), np.float32) ## Generate proposals for points in pointcloud pointcloud = coco.imgToPc[img_id] for point in pointcloud['points']: rois = get_im_proposals(point, sizes=(128, 256, 512, 1024), aspect_ratios=(0.5, 1, 2),
def main(): args = parse_args() if "mini" in args.split: nusc_version = "v1.0-mini" elif "test" in args.split: nusc_version = "v1.0-test" else: nusc_version = "v1.0-trainval" ## Categories: [category, supercategory, category_id] categories = [['person', 'person', 1], ['bicylce', 'vehicle', 2], ['car', 'vehicle', 3], ['motorcycle', 'vehicle', 4], ['bus', 'vehicle', 5], ['truck', 'vehicle', 6]] ## Short split is used for filenames anns_file = os.path.join(args.out_dir, 'annotations', 'instances_' + args.split + '.json') nusc_dataset = NuscenesDataset(nusc_path=args.nusc_root, nusc_version=nusc_version, split=args.split, coordinates='vehicle', nsweeps_radar=args.nsweeps_radar, sensors_to_return=['camera', 'radar'], pc_mode='camera', logging_level=args.logging_level) coco_dataset = COCO_PLUS(logging_level="INFO") coco_dataset.create_new_dataset(dataset_dir=args.out_dir, split=args.split) ## add all category in order to have consistency between dataset splits for (coco_cat, coco_supercat, coco_cat_id) in categories: coco_dataset.addCategory(coco_cat, coco_supercat, coco_cat_id) ## Get samples from the Nuscenes dataset num_samples = len(nusc_dataset) for i in trange(num_samples): sample = nusc_dataset[i] img_ids = sample['img_id'] for i, cam_sample in enumerate(sample['camera']): if cam_sample['camera_name'] not in args.cameras: continue img_id = int(img_ids[i]) image = cam_sample['image'] pc = sample['radar'][i] cam_cs_record = cam_sample['cs_record'] img_height, img_width, _ = image.shape # Create annotation in coco_dataset format sample_anns = [] annotations = nusc_dataset.pc_to_sensor(sample['annotations'][i], cam_cs_record) for ann in annotations: coco_cat, coco_cat_id, coco_supercat = nuscene_cat_to_coco( ann.name) ## if not a valid category, go to the next annotation if coco_cat is None: coco_dataset.logger.debug( 'Skipping ann with category: {}'.format(ann.name)) continue cat_id = coco_dataset.addCategory(coco_cat, coco_supercat, coco_cat_id) bbox = nuscenes_box_to_coco( ann, np.array(cam_cs_record['camera_intrinsic']), (img_width, img_height)) coco_ann = coco_dataset.createAnn(bbox, cat_id) sample_anns.append(coco_ann) ## Map the Radar pointclouds to image pc_cam = nusc_dataset.pc_to_sensor(pc, cam_cs_record) pc_depth = pc_cam[2, :] pc_image = view_points(pc_cam[:3, :], np.array(cam_cs_record['camera_intrinsic']), normalize=True) ## Add the depth information to each point pc_coco = np.vstack((pc_image[:2, :], pc_depth)) pc_coco = np.transpose(pc_coco).tolist() ## Add sample to the COCO dataset coco_img_path = coco_dataset.addSample( img=image, anns=sample_anns, pointcloud=pc_coco, img_id=img_id, other=cam_cs_record, img_format='RGB', write_img=not args.use_symlinks, ) if args.use_symlinks: try: os.symlink(os.path.abspath(cam_sample['cam_path']), coco_img_path) except FileExistsError: pass ## Uncomment to visualize # coco_dataset.showImgAnn(np.asarray(image), sample_anns, bbox_only=True, BGR=False) coco_dataset.saveAnnsToDisk()
default='../data/nucoco/v1.0-mini/train') parser.add_argument('--out_dir', dest='out_dir', help='Test outputs directory', default='../data/test') args = parser.parse_args() return args ##------------------------------------------------------------------------------ if __name__ == '__main__': args = parse_args() os.makedirs(args.out_dir, exist_ok=True) coco = COCO_PLUS(args.ann_file) print("Object Categories:\n") for key, val in coco.cats.items(): pprint(val) # Open samples from the dataset num_imgs = len(coco.imgs) for i in tqdm(range(0, num_imgs)): img_id = coco.dataset['images'][i]['id'] ann_ids = coco.getAnnIds(img_id) anns = coco.loadAnns(ann_ids) pointcloud = coco.imgToPc[img_id]['points'] pc = np.array(pointcloud) img_path = os.path.join(args.imgs_dir, coco.imgs[img_id]["file_name"]) img = cv2.imread(img_path)