default='../output/results/detections') args = parser.parse_args() return args if __name__ == '__main__': args = parse_args() if not os.path.isdir(args.out_dir): os.makedirs(args.out_dir) # Load the nucoco dataset ann_file = args.ann_file img_dir = args.imgs_dir coco = COCO_PLUS(ann_file, img_dir) #img_ids=[10000323,10000193] img_ids = [10009844, 10026493] # print(coco.imgs) # input('something') for img_id in img_ids: print(img_id) #fig, ax = plt.subplots( nrows=1, ncols=1 ) points = coco.imgToPointcloud[img_id]['points'] ann_ids = coco.getAnnIds(img_id) anns = coco.loadAnns(ann_ids) # bboxes = anns['bbox'] img_path = os.path.join(img_dir, coco.imgs[img_id]["file_name"]) img = cv2.imread(img_path)
def main(args): logger = logging.getLogger(__name__) dummy_coco_dataset = dummy_datasets.get_coco_dataset() cfg_orig = load_cfg(envu.yaml_dump(cfg)) proposals = rrpn_loader(args.rpn_pkl) coco = COCO_PLUS(args.ann_file, args.imgs_dir) image_id = coco.dataset['images'][36]['id'] img_path = os.path.join(args.imgs_dir, coco.imgs[image_id]["file_name"]) if args.im_file is None: im = cv2.imread(img_path) args.im_file = img_path else: im = cv2.imread(args.im_file) # img2 = Image.fromarray(im[:,:,::-1], 'RGB') # img2.show() # input('something') ## Get the proposals for this image proposal_boxes = proposals[image_id]['boxes'] _proposal_scores = proposals[image_id]['scores'] workspace.ResetWorkspace() # print(proposal_boxes) # input('something') cls_boxes, cls_segms, cls_keyps = None, None, None for i in range(0, len(args.models_to_run), 2): pkl = args.models_to_run[i] yml = args.models_to_run[i + 1] cfg.immutable(False) merge_cfg_from_cfg(cfg_orig) merge_cfg_from_file(yml) if len(pkl) > 0: weights_file = pkl else: weights_file = cfg.TEST.WEIGHTS cfg.NUM_GPUS = 1 assert_and_infer_cfg(cache_urls=False) model = model_engine.initialize_model_from_cfg(weights_file) with c2_utils.NamedCudaScope(0): cls_boxes_, cls_segms_, cls_keyps_ = \ model_engine.im_detect_all(model, im, proposal_boxes) cls_boxes = cls_boxes_ if cls_boxes_ is not None else cls_boxes cls_segms = cls_segms_ if cls_segms_ is not None else cls_segms cls_keyps = cls_keyps_ if cls_keyps_ is not None else cls_keyps workspace.ResetWorkspace() out_name = os.path.join( args.output_dir, '{}'.format(os.path.basename(args.im_file) + '.pdf')) logger.info('Processing {} -> {}'.format(args.im_file, out_name)) vis_utils.vis_one_image(im[:, :, ::-1], args.im_file, args.output_dir, cls_boxes, cls_segms, cls_keyps, dataset=dummy_coco_dataset, box_alpha=0.3, show_class=True, thresh=0.7, kp_thresh=2)
import _init_paths import cv2 import os import sys import matplotlib # matplotlib.use('Agg') import matplotlib.pyplot as plt from PIL import Image from pprint import pprint from pycocotools_plus.coco import COCO_PLUS coco_ann_file = '/ssd_scratch/mrnabati/occlusionDetection/Code/data/coco/annotations/instances_train2017.json' coco_imgs_dir = '/ssd_scratch/mrnabati/occlusionDetection/Code/data/coco/images/train2017' coco = COCO_PLUS(coco_ann_file, coco_imgs_dir) count = 0 for key, val in coco.cats.items(): count += 1 print('{}:{}'.format(key, val['name'])) print(count) for image in coco.dataset['images']: img_id = image['id'] ann_ids = coco.getAnnIds([img_id]) anns = coco.loadAnns(ann_ids) print(anns[0]) print(anns[0]['bbox'][0].dtype) input('something')
assert args.train_ratio >= 0 and args.train_ratio <= 1, \ "--train_ratio must be in range [0 1]" assert args.include_sweeps in ['True','False'], \ "--include_sweeps must be 'True' or 'False'" return args #------------------------------------------------------------------------------- if __name__ == '__main__': random.seed(13) args = parse_args() nusc = NuScenes(version='v0.1', dataroot=args.dataroot, verbose=True) coco_train = COCO_PLUS(args.train_ann_file, args.train_imgs_dir, new_dataset=True) coco_val = COCO_PLUS(args.val_ann_file, args.val_imgs_dir, new_dataset=True) for i in tqdm(range(0, len(nusc.scene))): scene = nusc.scene[i] scene_rec = nusc.get('scene', scene['token']) sample_rec = nusc.get('sample', scene_rec['first_sample_token']) ## Get front sensors data f_cam_rec = nusc.get('sample_data', sample_rec['data']['CAM_FRONT']) f_rad_rec = nusc.get('sample_data', sample_rec['data']['RADAR_FRONT']) ## Get rear sensors data
default=0) args = parser.parse_args() return args if __name__ == '__main__': args = parse_args() output_file = args.output_file boxes = [] scores = [] ids = [] img_ind = 0 ## Load the nucoco dataset coco = COCO_PLUS(args.ann_file, args.imgs_dir) for img_id, img_info in tqdm(coco.imgs.items()): img_ind += 1 if int(args.include_depth)==1: proposals = np.empty((0,5), np.float32) else: proposals = np.empty((0,4), np.float32) img_width = img_info['width'] img_height = img_info['height'] pointcloud = coco.imgToPointcloud[img_id] # pointcloud = coco.pointclouds[pc_id]
help='Dataset name according to dataset_catalog', default='nucoco_train') args = parser.parse_args() return args if __name__ == '__main__': args = parse_args() # fig = plt.figure(figsize=(16, 6)) # Load the nucoco dataset dataset_name = args.dataset_name ann_file = dataset_catalog.get_ann_fn(dataset_name) img_dir = dataset_catalog.get_im_dir(dataset_name) coco = COCO_PLUS(ann_file, img_dir) # Load the proposals proposals = rrpn_loader(args.proposals_file) for i in range(1, len(coco.dataset['images']), 10): fig = plt.figure(figsize=(16, 6)) img_id = coco.dataset['images'][i]['id'] scores = proposals[img_id]['scores'] boxes = proposals[img_id]['boxes'] points = coco.imgToPointcloud[img_id]['points'] img_path = os.path.join(img_dir, coco.imgs[img_id]["file_name"]) # print(img_path) img = np.array(plt.imread(img_path))
parser = argparse.ArgumentParser(description='Explore NuCOCO') parser.add_argument('--ann_file', dest='ann_file', help='Annotations file', default='../output/datasets/nucoco_sw_f/annotations/instances_train.json') parser.add_argument('--imgs_dir', dest='imgs_dir', help='Images directory', default='../output/datasets/nucoco_sw_f/train') args = parser.parse_args() return args if __name__ == '__main__': args = parse_args() coco = COCO_PLUS(args.ann_file, args.imgs_dir) #fig, ax = plt.subplots(1, 1, figsize=(9, 9)) pprint(coco.cats) input('something') # Open samples from the NuScenes dataset for i in tqdm(range(0, len(coco.dataset['images']))): image_id = coco.dataset['images'][i]['id'] ann_ids = coco.getAnnIds(image_id) anns = coco.loadAnns(ann_ids) # print('Image ID: {}'.format(image_id)) # print('Ann IDs: {}'.format(ann_ids)) # print(anns[0]) img_path = os.path.join(args.imgs_dir, coco.imgs[image_id]["file_name"])