def _test_std(self): current_dir = osp.dirname(osp.realpath(__file__)) cfg_file = osp.join(current_dir, '..', 'configs', 'R-50_1x.yaml') merge_cfg_from_file(cfg_file) cfg.TEST.WEIGHTS = osp.join( current_dir, '..', 'outputs', 'train', 'coco_2014_train+coco_2014_valminusminival', 'R-50_1x', 'default', 'model_final.pkl') cfg.RETINANET.INFERENCE_TH = 0. dataset = JsonDataset('coco_2014_minival') roidb = dataset.get_roidb() model = model_builder.create(cfg.MODEL.TYPE, train=False, gpu_id=0) utils.net.initialize_gpu_from_weights_file(model, cfg.TEST.WEIGHTS, gpu_id=0) model_builder.add_inference_inputs(model) workspace.CreateNet(model.net) workspace.CreateNet(model.conv_body_net) num_images = len(roidb) num_classes = cfg.MODEL.NUM_CLASSES entry = roidb[0] im = cv2.imread(entry['image']) with utils.c2.NamedCudaScope(0): cls_boxes, cls_preds, cls_probs, box_preds, anchors, im_info = im_detect_bbox( model, im, debug=True) workspace.ResetWorkspace() return cls_preds, cls_probs, box_preds, anchors, im_info
def generate_rpn_on_dataset( weights_file, dataset_name, _proposal_file_ignored, output_dir, multi_gpu=False, gpu_id=0 ): """Run inference on a dataset.""" dataset = JsonDataset(dataset_name) test_timer = Timer() test_timer.tic() if multi_gpu: num_images = len(dataset.get_roidb()) _boxes, _scores, _ids, rpn_file = multi_gpu_generate_rpn_on_dataset( weights_file, dataset_name, _proposal_file_ignored, num_images, output_dir ) else: # Processes entire dataset range by default _boxes, _scores, _ids, rpn_file = generate_rpn_on_range( weights_file, dataset_name, _proposal_file_ignored, output_dir, gpu_id=gpu_id ) test_timer.toc() logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time)) return evaluate_proposal_file(dataset, rpn_file, output_dir)
def vis(dataset, detections_pkl, thresh, output_dir, limit=0): ds = JsonDataset(dataset) roidb = ds.get_roidb() with open(detections_pkl, 'r') as f: dets = pickle.load(f) all_boxes = dets['all_boxes'] if 'all_segms' in dets: all_segms = dets['all_segms'] else: all_segms = None if 'all_keyps' in dets: all_keyps = dets['all_keyps'] else: all_keyps = None def id_or_index(ix, val): if len(val) == 0: return val else: return val[ix] for ix, entry in enumerate(roidb): if limit > 0 and ix >= limit: break if ix % 10 == 0: print('{:d}/{:d}'.format(ix + 1, len(roidb))) im = cv2.imread(entry['image']) im_name = os.path.splitext(os.path.basename(entry['image']))[0] cls_boxes_i = [ id_or_index(ix, all_boxes[j]) for j in range(len(all_boxes)) ] if all_segms is not None: cls_segms_i = [ id_or_index(ix, all_segms[j]) for j in range(len(all_segms)) ] else: cls_segms_i = None if all_keyps is not None: cls_keyps_i = [ id_or_index(ix, all_keyps[j]) for j in range(len(all_keyps)) ] else: cls_keyps_i = None vis_utils.vis_one_image( im[:, :, ::-1], '{:d}_{:s}'.format(ix, im_name), os.path.join(output_dir, 'vis'), cls_boxes_i, segms=cls_segms_i, keypoints=cls_keyps_i, thresh=thresh, box_alpha=0.8, dataset=ds, show_class=True )
def get_roidb_and_dataset(ind_range, include_gt=False): """ include_gt is used by the eval_mpii code. Not here. """ dataset = JsonDataset(cfg.TEST.DATASET) if cfg.MODEL.FASTER_RCNN: roidb = dataset.get_roidb(gt=include_gt) else: roidb = dataset.get_roidb( gt=include_gt, proposal_file=cfg.TEST.PROPOSAL_FILE, proposal_limit=cfg.TEST.PROPOSAL_LIMIT) # Video processing (same as datasets/roidb.py) if cfg.MODEL.VIDEO_ON: roidb = video_utils.get_clip(roidb, remove_imperfect=False) if ind_range is not None: total_num_images = len(roidb) start, end = ind_range roidb = roidb[start:end] else: start = 0 end = len(roidb) total_num_images = end return roidb, dataset, start, end, total_num_images
def test_retinanet_on_dataset(multi_gpu=False): """ Main entry point for testing on a given dataset: whether multi_gpu or not """ output_dir = get_output_dir(training=False) dataset = JsonDataset(cfg.TEST.DATASET) test_timer = Timer() test_timer.tic() # for test-dev or full test dataset, we generate detections for all images if 'test-dev' in cfg.TEST.DATASET or 'test' in cfg.TEST.DATASET: cfg.TEST.NUM_TEST_IMAGES = len(dataset.get_roidb()) if multi_gpu: num_images = cfg.TEST.NUM_TEST_IMAGES all_boxes = multi_gpu_test_retinanet_on_dataset( num_images, output_dir, dataset) else: all_boxes = test_retinanet() test_timer.toc() logger.info('Total inference time: {:.3f}s'.format( test_timer.average_time)) results = task_evaluation.evaluate_all(dataset, all_boxes, None, None, output_dir) return results
def test_retinanet(ind_range=None): """ Test RetinaNet model either on the entire dataset or the subset of dataset specified by the index range """ assert cfg.RETINANET.RETINANET_ON, \ 'RETINANET_ON must be set for testing RetinaNet model' output_dir = get_output_dir(training=False) dataset = JsonDataset(cfg.TEST.DATASET) roidb = dataset.get_roidb() if ind_range is not None: start, end = ind_range roidb = roidb[start:end] # Create and load the model model = model_builder.create(cfg.MODEL.TYPE, train=False) if cfg.TEST.WEIGHTS: nu.initialize_from_weights_file( model, cfg.TEST.WEIGHTS, broadcast=False ) model_builder.add_inference_inputs(model) workspace.CreateNet(model.net) # Compute the detections all_boxes = im_list_detections(model, roidb) # Save the detections cfg_yaml = yaml.dump(cfg) if ind_range is not None: det_name = 'detection_range_%s_%s.pkl' % tuple(ind_range) else: det_name = 'detections.pkl' det_file = os.path.join(output_dir, det_name) save_object( dict(all_boxes=all_boxes, cfg=cfg_yaml), det_file) logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file))) return all_boxes
def main(): args = parse_args() det_file1 = args.baseline det_file2 = args.ours dataset_name = args.dataset work_dir = os.getcwd() save_dir = os.path.join(work_dir, '..', 'Outputs', 'case_study') if not os.path.exists(save_dir): os.mkdir(save_dir) else: shutil.rmtree(save_dir) os.mkdir(save_dir) # detections['all_boxes'][cls][image] = N x 5 array with columns (x1, y1, x2, y2, score) # only one class in DeepLesion. 0-background, 1-lesion. # all_boxes[image] = a N*5 list. with open(det_file1, 'rb') as f: detections = pickle.load(f) all_boxes1 = detections['all_boxes'] with open(det_file2, 'rb') as f: detections = pickle.load(f) all_boxes2 = detections['all_boxes'] dataset = JsonDataset(dataset_name) roidb = dataset.get_roidb(gt=True) find_diff_detections(all_boxes1[1], all_boxes2[1], roidb, save_dir, iou_th=0.5)
def test_net_on_dataset( args, dataset_name, proposal_file, output_dir, ind_range=None, multi_gpu=False, gpu_id=0): """Run inference on a dataset.""" dataset = JsonDataset(dataset_name) test_timer = Timer() test_timer.tic() if multi_gpu: num_images = len(dataset.get_roidb()) all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset( args, dataset_name, proposal_file, num_images, output_dir ) else: all_boxes, all_segms, all_keyps = test_net( args, dataset_name, proposal_file, output_dir, ind_range=ind_range, gpu_id=gpu_id ) test_timer.toc() logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time)) dataset.test_img_ids = sorted(dataset.COCO.getImgIds()) if ind_range is not None: dataset.test_img_ids = dataset.test_img_ids[ind_range[0]:ind_range[1]] results = task_evaluation.evaluate_all( dataset, all_boxes, all_segms, all_keyps, output_dir ) return results
def get_roidb_and_dataset(dataset_name, proposal_file, ind_range): """Get the roidb for the dataset specified in the global cfg. Optionally restrict it to a range of indices if ind_range is a pair of integers. """ dataset = JsonDataset(dataset_name) if cfg.TEST.PRECOMPUTED_PROPOSALS: assert proposal_file, 'No proposal file given' roidb = dataset.get_roidb(proposal_file=proposal_file, proposal_limit=cfg.TEST.PROPOSAL_LIMIT) else: '''in this case, roidb has contained all the images''' roidb = dataset.get_roidb() if ind_range is not None: total_num_images = len(roidb) start, end = ind_range '''get the range of images that we will be working on with this subprocess''' roidb = roidb[start:end] else: start = 0 end = len(roidb) total_num_images = end ''' roidb: contains all images in range, sorted by id dataset: a JsonDataset object start: the start ind end: the end ind total_num_images: end - start + 1 ''' return roidb, dataset, start, end, total_num_images
def test_net_on_dataset( args, dataset_name, proposal_file, output_dir, multi_gpu = False, gpu_id = 0): """Run inference on a dataset.""" dataset = JsonDataset(dataset_name) test_timer = Timer() test_timer.tic() if multi_gpu: num_images = len(dataset.get_roidb()) all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset( args, dataset_name, proposal_file, num_images, output_dir ) else: all_boxes, all_segms, all_keyps = test_net( args, dataset_name, proposal_file, output_dir, gpu_id = gpu_id ) test_timer.toc() logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time)) results = task_evaluation.evaluate_all( dataset, all_boxes, all_segms, all_keyps, output_dir ) return results
def get_roidb_and_dataset(dataset_name, proposal_file, ind_range): """Get the roidb for the dataset specified in the global cfg. Optionally restrict it to a range of indices if ind_range is a pair of integers. """ dataset = JsonDataset(dataset_name) if cfg.TEST.PRECOMPUTED_PROPOSALS: assert proposal_file, 'No proposal file given' roidb = dataset.get_roidb( proposal_file=proposal_file, proposal_limit=cfg.TEST.PROPOSAL_LIMIT ) else: roidb = dataset.get_roidb(gt=cfg.TEST.USE_GT_PROPOSALS) if ind_range is not None: total_num_images = len(roidb) start, end = ind_range roidb = roidb[start:end] else: start = 0 end = len(roidb) total_num_images = end roidb[0].update({k:v for k,v in dataset.COCO.dataset.items() if k not in ['images', 'annotations', 'categories']}) return roidb, dataset, start, end, total_num_images
def test_net_on_dataset( args, dataset_name, proposal_file, output_dir, multi_gpu=False, gpu_id=0): """Run inference on a dataset.""" dataset = JsonDataset(dataset_name) test_timer = Timer() test_timer.tic() if multi_gpu: num_images = len(dataset.get_roidb()) all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset( args, dataset_name, proposal_file, num_images, output_dir ) else: all_boxes, all_segms, all_keyps = test_net( args, dataset_name, proposal_file, output_dir, gpu_id=gpu_id ) test_timer.toc() logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time)) results = task_evaluation.evaluate_all( dataset, all_boxes, all_segms, all_keyps, output_dir ) return results
def generate_rpn_on_dataset(weights_file, dataset_name, _proposal_file_ignored, output_dir, multi_gpu=False, gpu_id=0): """Run inference on a dataset.""" dataset = JsonDataset(dataset_name) test_timer = Timer() test_timer.tic() if multi_gpu: num_images = len(dataset.get_roidb()) _boxes, _scores, _ids, rpn_file = multi_gpu_generate_rpn_on_dataset( weights_file, dataset_name, _proposal_file_ignored, num_images, output_dir) else: # Processes entire dataset range by default _boxes, _scores, _ids, rpn_file = generate_rpn_on_range( weights_file, dataset_name, _proposal_file_ignored, output_dir, gpu_id=gpu_id) test_timer.toc() logger.info('Total inference time: {:.3f}s'.format( test_timer.average_time)) return evaluate_proposal_file(dataset, rpn_file, output_dir)
def get_roidb_and_dataset(ind_range, include_gt=False): """ include_gt is used by the eval_mpii code. Not here. """ dataset = JsonDataset(cfg.TEST.DATASET) if cfg.MODEL.FASTER_RCNN: roidb = dataset.get_roidb(gt=include_gt) else: roidb = dataset.get_roidb(gt=include_gt, proposal_file=cfg.TEST.PROPOSAL_FILE, proposal_limit=cfg.TEST.PROPOSAL_LIMIT) # Video processing (same as datasets/roidb.py) if cfg.MODEL.VIDEO_ON: roidb = video_utils.get_clip(roidb, remove_imperfect=False) if ind_range is not None: total_num_images = len(roidb) start, end = ind_range roidb = roidb[start:end] else: start = 0 end = len(roidb) total_num_images = end return roidb, dataset, start, end, total_num_images
def result_getter(): if is_parent: # Parent case: # In this case we're either running inference on the entire dataset in a # single process or (if multi_gpu_testing is True) using this process to # launch subprocesses that each run inference on a range of the dataset all_results = {} for i in range(len(cfg.TEST.DATASETS)): dataset_name, proposal_file = get_inference_dataset(i) dataset = JsonDataset(dataset_name) output_dir = args.output_dir results = parent_func(args, dataset_name, proposal_file, output_dir, dataset, multi_gpu=multi_gpu_testing) all_results.update(results) return all_results else: # Subprocess child case: # In this case test_net was called via subprocess.Popen to execute on a # range of inputs on a single dataset dataset_name, proposal_file = get_inference_dataset( 0, is_parent=False) dataset = JsonDataset(dataset_name) output_dir = args.output_dir return child_func(args, dataset_name, proposal_file, output_dir, dataset, ind_range=ind_range, gpu_id=gpu_id)
def _test_std(self): root_dir = osp.join('/private', 'home', 'xinleic', 'pyramid') cfg_file = osp.join(root_dir, 'configs', 'visual_genome', 'e2e_faster_rcnn_R-50-FPN_1x.yaml') merge_cfg_from_file(cfg_file) cfg.NUM_GPUS = 1 cfg.TEST.RPN_PRE_NMS_TOP_N = 100 cfg.TEST.RPN_POST_NMS_TOP_N = 20 assert_and_infer_cfg() test_weight = osp.join(root_dir, 'outputs', 'train', 'visual_genome_train', 'e2e_faster_rcnn_R-50-FPN_1x', 'RNG_SEED#3', 'model_final.pkl') model = test_engine.initialize_model_from_cfg(test_weight, gpu_id=0) dataset = JsonDataset('visual_genome_val') roidb = dataset.get_roidb() num_images = len(roidb) num_classes = cfg.MODEL.NUM_CLASSES entry = roidb[1] im = cv2.imread(entry['image']) max_level = cfg.FPN.RPN_MAX_LEVEL min_level = cfg.FPN.RPN_MIN_LEVEL # input: rpn_cls_probs_fpn2, rpn_bbox_pred_fpn2 # output: rpn_rois_fpn2, rpn_roi_probs_fpn2 with utils.c2.NamedCudaScope(0): # let's manually do the testing here inputs, im_scale = _get_blobs(im, None, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE) for k, v in inputs.items(): workspace.FeedBlob(core.ScopedName(k), v) workspace.RunNet(model.net.Proto().name) cls_probs = [core.ScopedName('rpn_cls_probs_fpn%d' % i) for i in range(min_level, max_level+1)] box_preds = [core.ScopedName('rpn_bbox_pred_fpn%d' % i) for i in range(min_level, max_level+1)] rpn_rois = [core.ScopedName('rpn_rois_fpn%d' % i) for i in range(min_level, max_level+1)] rpn_roi_probs = [core.ScopedName('rpn_roi_probs_fpn%d' % i) for i in range(min_level, max_level+1)] cls_probs = workspace.FetchBlobs(cls_probs) box_preds = workspace.FetchBlobs(box_preds) rpn_rois = workspace.FetchBlobs(rpn_rois) rpn_roi_probs = workspace.FetchBlobs(rpn_roi_probs) rpn_rois = np.vstack(rpn_rois) rpn_roi_probs = np.vstack(rpn_roi_probs) # remove the image dimension rpn_rois = rpn_rois[:, 1:] boxes = np.hstack([rpn_rois, rpn_roi_probs]) im_name = osp.splitext(osp.basename(entry['image']))[0] utils.vis.vis_one_image(im[:, :, ::-1], '{:s}-std-output'.format(im_name), osp.join(root_dir, 'tests'), boxes, segms=None, keypoints=None, thresh=0., box_alpha=0.8, dataset=dataset, show_class=False) workspace.ResetWorkspace() im_info = inputs['im_info'].astype(np.float32) return cls_probs, box_preds, im_info, im, im_name, root_dir, dataset
def vis(dataset, detections_pkl, thresh, output_dir, limit=0): ds = JsonDataset(dataset) roidb = ds.get_roidb() with open(detections_pkl, 'r') as f: dets = pickle.load(f) all_boxes = dets['all_boxes'] if 'all_segms' in dets: all_segms = dets['all_segms'] else: all_segms = None if 'all_keyps' in dets: all_keyps = dets['all_keyps'] else: all_keyps = None def id_or_index(ix, val): if len(val) == 0: return val else: return val[ix] for ix, entry in enumerate(roidb): if limit > 0 and ix >= limit: break if ix % 10 == 0: print('{:d}/{:d}'.format(ix + 1, len(roidb))) im = cv2.imread(entry['image']) im_name = os.path.splitext(os.path.basename(entry['image']))[0] cls_boxes_i = [ id_or_index(ix, all_boxes[j]) for j in range(len(all_boxes)) ] if all_segms is not None: cls_segms_i = [ id_or_index(ix, all_segms[j]) for j in range(len(all_segms)) ] else: cls_segms_i = None if all_keyps is not None: cls_keyps_i = [ id_or_index(ix, all_keyps[j]) for j in range(len(all_keyps)) ] else: cls_keyps_i = None vis_utils.vis_one_image(im[:, :, ::-1], '{:d}_{:s}'.format(ix, im_name), os.path.join(output_dir, 'vis'), cls_boxes_i, segms=cls_segms_i, keypoints=cls_keyps_i, thresh=thresh, box_alpha=0.8, dataset=ds, show_class=True)
def load_dataset(dataset_name='nuclei_stage_1_local_val_split'): dataset = JsonDataset(dataset_name) roidb = dataset.get_roidb(gt=True) roidb_map = {} for roi in roidb: im_id = os.path.splitext(os.path.basename(roi['image']))[0] roidb_map[im_id] = roi return roidb_map
def get_roidb(dataset_name, proposal_file): ds = JsonDataset(dataset_name) roidb = ds.get_roidb( gt=True, proposal_file=proposal_file, crowd_filter_thresh=cfg.TRAIN.CROWD_FILTER_THRESH) if cfg.TRAIN.USE_FLIPPED: logger.info('Appending horizontally-flipped training examples...') extend_with_flipped_entries(roidb, ds) logger.info('Loaded dataset: {:s}'.format(ds.name)) return roidb
def visualize_ground_truth(dataset_name='nuclei_stage_1_local_val_split', output_dir='vis'): TOL = 0.00000001 dataset = JsonDataset(dataset_name) roidb = dataset.get_roidb(gt=True) for entry in roidb: boxes = entry['boxes'] boxes = np.append(boxes, np.ones((len(boxes), 2)), 1) segms = entry['segms'] visualize_im_masks(entry, boxes, segms, output_dir, show_class=False)
def get_roidb(dataset_name, proposal_file): ds = JsonDataset(dataset_name) roidb = ds.get_roidb(gt=True, proposal_file=proposal_file, crowd_filter_thresh=cfg.TRAIN.CROWD_FILTER_THRESH) if cfg.TRAIN.USE_FLIPPED: logger.info('Appending horizontally-flipped training examples...') extend_with_flipped_entries(roidb, ds) logger.info('Loaded dataset: {:s}'.format(ds.name)) return roidb
def vis(dataset, detections_pkl, thresh, output_dir, limit=0): ds = JsonDataset(dataset) roidb = ds.get_roidb() with open(detections_pkl, 'r') as f: dets = pickle.load(f) assert all(k in dets for k in ['all_boxes', 'all_segms', 'all_keyps']), \ 'Expected detections pkl file in the format used by test_engine.py' all_boxes = dets['all_boxes'] all_segms = dets['all_segms'] all_keyps = dets['all_keyps'] def id_or_index(ix, val): if len(val) == 0: return val else: return val[ix] for ix, entry in enumerate(roidb): if limit > 0 and ix >= limit: break if ix % 10 == 0: print('{:d}/{:d}'.format(ix + 1, len(roidb))) im = cv2.imread(entry['image']) im_name = os.path.splitext(os.path.basename(entry['image']))[0] cls_boxes_i = [ id_or_index(ix, cls_k_boxes) for cls_k_boxes in all_boxes ] cls_segms_i = [ id_or_index(ix, cls_k_segms) for cls_k_segms in all_segms ] cls_keyps_i = [ id_or_index(ix, cls_k_keyps) for cls_k_keyps in all_keyps ] vis_utils.vis_one_image( im[:, :, ::-1], '{:d}_{:s}'.format(ix, im_name), os.path.join(output_dir, 'vis'), cls_boxes_i, segms=cls_segms_i, keypoints=cls_keyps_i, thresh=thresh, box_alpha=0.8, dataset=ds, show_class=True )
def get_image_list(ind_range): dataset = JsonDataset(cfg.TEST.DATASET) roidb = dataset.get_roidb() if ind_range is not None: total_num_images = len(roidb) start, end = ind_range roidb = roidb[start:end] else: start = 0 end = len(roidb) total_num_images = end return roidb, start, end, total_num_images
def vis(dataset, detections_pkl, thresh, output_dir, limit=0): ds = JsonDataset(dataset) roidb = ds.get_roidb() with open(detections_pkl, 'r') as f: dets = pickle.load(f) assert all(k in dets for k in ['all_boxes', 'all_segms', 'all_keyps']), \ 'Expected detections pkl file in the format used by test_engine.py' all_boxes = dets['all_boxes'] all_segms = dets['all_segms'] all_keyps = dets['all_keyps'] def id_or_index(ix, val): if len(val) == 0: return val else: return val[ix] for ix, entry in enumerate(roidb): if limit > 0 and ix >= limit: break if ix % 10 == 0: print('{:d}/{:d}'.format(ix + 1, len(roidb))) im = cv2.imread(entry['image']) im_name = os.path.splitext(os.path.basename(entry['image']))[0] cls_boxes_i = [ id_or_index(ix, cls_k_boxes) for cls_k_boxes in all_boxes ] cls_segms_i = [ id_or_index(ix, cls_k_segms) for cls_k_segms in all_segms ] cls_keyps_i = [ id_or_index(ix, cls_k_keyps) for cls_k_keyps in all_keyps ] vis_utils.vis_one_image(im[:, :, ::-1], '{:d}_{:s}'.format(ix, im_name), os.path.join(output_dir, 'vis'), cls_boxes_i, segms=cls_segms_i, keypoints=cls_keyps_i, thresh=thresh, box_alpha=0.8, dataset=ds, show_class=True)
def load_images(): all_roi = [] for d in ds: all_roi.append(JsonDataset(d).get_roidb(gt=True)) roidb = {} for a in all_roi: for roi in a: fname = roi['image'].rsplit('/', 1)[1] roidb[fname] = roi for roi in roidb.values(): M = mask_util.decode(roi['segms']) M = np.sum(M, -1) roi['M'] = M all_images = glob( os.path.join(DATASET_WORKING_DIR.as_posix(), 'images_train_fixed', '*.jpg')) img_df = pd.DataFrame({'path': all_images}) img_id = lambda in_path: in_path.rsplit('/', 1)[1] img_group = lambda in_path: 'Train' img_df['ImageId'] = img_df['path'].map(img_id) img_df['TrainingSplit'] = img_df['ImageId'].map(img_group) img_df.sample(2) img_df['images'] = img_df['path'].map(imread) img_df.drop(['path'], 1, inplace=True) img_df.sample(1) return img_df, roidb
def generate_rpn_on_dataset(multi_gpu=False): output_dir = get_output_dir(training=False) dataset = JsonDataset(cfg.TEST.DATASET) test_timer = Timer() test_timer.tic() if multi_gpu: num_images = len(dataset.get_roidb()) _boxes, _scores, _ids, rpn_file = multi_gpu_generate_rpn_on_dataset( num_images, output_dir) else: # Processes entire dataset range by default _boxes, _scores, _ids, rpn_file = generate_rpn_on_range() test_timer.toc() logger.info('Total inference time: {:.3f}s'.format( test_timer.average_time)) evaluate_proposal_file(dataset, rpn_file, output_dir)
def __init__(self, model_config): merge_cfg_from_file(model_config) cfg.NUM_GPUS = 1 assert_and_infer_cfg() print(cfg.TEST.WEIGHTS) self.__model = infer_engine.initialize_model_from_cfg(cfg.TEST.WEIGHTS) self.__dataset = JsonDataset(cfg.TRAIN.DATASETS[0])
def get_roidb_and_dataset(dataset_name, proposal_file, ind_range): """Get the roidb for the dataset specified in the global cfg. Optionally restrict it to a range of indices if ind_range is a pair of integers. """ if 'gqa' in dataset_name: dataset = GqaDataset(dataset_name) else: dataset = JsonDataset(dataset_name) # dataset = JsonDataset(dataset_name) if cfg.TEST.PRECOMPUTED_PROPOSALS: assert proposal_file, 'No proposal file given' roidb = dataset.get_roidb(proposal_file=proposal_file, proposal_limit=cfg.TEST.PROPOSAL_LIMIT) else: roidb = dataset.get_roidb() if ind_range is not None: total_num_images = len(roidb) start, end = ind_range roidb = roidb[start:end] else: start = 0 end = len(roidb) total_num_images = end return roidb, dataset, start, end, total_num_images
def get_roidb(ind_range): """Get the roidb for the dataset specified in the global cfg. Optionally restrict it to a range of indices if ind_range is a pair of integers. """ dataset = JsonDataset(cfg.TEST.DATASET) roidb = dataset.get_roidb() if ind_range is not None: total_num_images = len(roidb) start, end = ind_range roidb = roidb[start:end] else: start = 0 end = len(roidb) total_num_images = end return roidb, start, end, total_num_images
def test_net_on_dataset(multi_gpu=False): output_dir = get_output_dir(training=False) dataset = JsonDataset(cfg.TEST.DATASET) test_timer = Timer() test_timer.tic() if multi_gpu: num_images = len(dataset.get_roidb()) all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset( num_images, output_dir) else: all_boxes, all_segms, all_keyps = test_net() test_timer.toc() logger.info('Total inference time: {:.3f}s'.format( test_timer.average_time)) # Run tracking and eval for posetrack datasets if dataset.name.startswith('posetrack') or dataset.name.startswith( 'kinetics'): roidb, dataset, _, _, _ = get_roidb_and_dataset(None) if dataset.name.startswith('posetrack'): score_ap, score_mot, apAll, preAll, recAll, mota = run_posetrack_tracking( output_dir, roidb) ##################### add by jianbo ############# import re, os, json from core.config import get_log_dir_path tmp_dic = { "total_AP": score_ap.tolist(), "total_MOTA": score_mot.tolist(), "apAll": apAll.tolist(), "preAll": preAll.tolist(), "recAll": recAll.tolist(), "mota": mota.tolist() } dir_path = get_log_dir_path() if not os.path.exists(dir_path): os.mkdir(dir_path) f = open(dir_path + "/eval.json", "w") f.write(json.dumps(tmp_dic)) f.flush() f.close() ##################### add by jianbo ############# else: run_posetrack_tracking(output_dir, roidb) else: ###jianbo roidb, dataset, _, _, _ = get_roidb_and_dataset(None) run_posetrack_tracking(output_dir, roidb)
def generate_rpn_on_dataset(multi_gpu=False): """Run inference on a dataset.""" output_dir = get_output_dir(training=False) dataset = JsonDataset(cfg.TEST.DATASET) test_timer = Timer() test_timer.tic() if multi_gpu: num_images = len(dataset.get_roidb()) _boxes, _scores, _ids, rpn_file = multi_gpu_generate_rpn_on_dataset( num_images, output_dir ) else: # Processes entire dataset range by default _boxes, _scores, _ids, rpn_file = generate_rpn_on_range() test_timer.toc() logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time)) return evaluate_proposal_file(dataset, rpn_file, output_dir)
def get_roidb_and_dataset(dataset_name, proposal_file, ind_range): """Get the roidb for the dataset specified in the global cfg. Optionally restrict it to a range of indices if ind_range is a pair of integers. """ dataset = JsonDataset(dataset_name) if cfg.TEST.PRECOMPUTED_PROPOSALS: assert proposal_file, 'No proposal file given' roidb = dataset.get_roidb(proposal_file=proposal_file, proposal_limit=cfg.TEST.PROPOSAL_LIMIT) else: roidb = dataset.get_roidb(gt=True) for item in roidb: all_cls = item['gt_classes'] target_cls = item['target_cls'] target_idx = np.where(all_cls == target_cls)[0] item['boxes'] = item['boxes'][target_idx] item['gt_classes'] = item['gt_classes'][target_idx] print('testing annotation number: ', len(roidb)) roidb_img = [] roidb_cls = [] roidb_index = [] for item in roidb: roidb_img.append(item['image']) roidb_cls.append(item['target_cls']) roidb_index.append(item['index']) data_dict = { 'img_ls': roidb_img, 'cls_ls': roidb_cls, 'index': roidb_index } index_pd = pd.DataFrame.from_dict(data_dict) if ind_range is not None: total_num_images = len(roidb) start, end = ind_range roidb = roidb[start:end] for item in roidb: item['real_index'] -= start else: start = 0 end = len(roidb) total_num_images = end return roidb, dataset, start, end, total_num_images, index_pd
def get_roidb(dataset_name, ind_range): """Get the roidb for the dataset specified in the global cfg. Optionally restrict it to a range of indices if ind_range is a pair of integers. """ dataset = JsonDataset(dataset_name) roidb = dataset.get_roidb() if ind_range is not None: total_num_images = len(roidb) start, end = ind_range roidb = roidb[start:end] else: start = 0 end = len(roidb) total_num_images = end return roidb, start, end, total_num_images
def test_cls_net_on_dataset(output_dir, multi_gpu=False, gpu_id=0): """Run inference on a dataset.""" dataset = JsonDataset(cfg.TEST.DATASET) test_timer = Timer() test_timer.tic() if multi_gpu: num_images = len(dataset.get_roidb()) acc = multi_gpu_test_cls_net_on_dataset(num_images, output_dir) else: acc = test_cls_net(output_dir, gpu_id=gpu_id) test_timer.toc() logger.info('Total inference time: {:.3f}s'.format( test_timer.average_time)) logger.info('Classification Accuracy on TEST data is: {:.2f}%'.format(acc * 100)) return {"Accuracy": acc}
def get_roidb_and_dataset(dataset_name, proposal_file, ind_range): """Get the roidb for the dataset specified in the global cfg. Optionally restrict it to a range of indices if ind_range is a pair of integers. """ if cfg.DATA_SOURCE == 'coco': dataset = JsonDataset(dataset_name) elif cfg.DATA_SOURCE == 'mammo': dataset = MammoDataset(dataset_name) elif cfg.DATA_SOURCE == 'lesion': dataset = LesionDataset(dataset_name) if cfg.TEST.PRECOMPUTED_PROPOSALS: assert proposal_file, 'No proposal file given' roidb = dataset.get_roidb(proposal_file=proposal_file, proposal_limit=cfg.TEST.PROPOSAL_LIMIT) else: if cfg.DATA_SOURCE == 'coco': roidb = dataset.get_roidb(gt=True) elif cfg.DATA_SOURCE == 'mammo': roidb = dataset.get_roidb(gt=True, proposal_file='', crowd_filter_thresh=0) #elif cfg.DATA_SOURCE == 'lesion': # roidb = dataset.get_roidb( # gt=True) if ind_range is not None: total_num_images = len(roidb) start, end = ind_range roidb = roidb[start:end] else: start = 0 end = len(roidb) total_num_images = end return roidb, dataset, start, end, total_num_images
def test_net_on_dataset(output_dir, multi_gpu=False, gpu_id=0): """Run inference on a dataset.""" dataset = JsonDataset(cfg.TEST.DATASET) test_timer = Timer() test_timer.tic() if multi_gpu: assert cfg.TEST.IMS_PER_BATCH == 1, 'Single batch only' num_images = len(dataset.get_roidb()) all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset( num_images, output_dir) else: all_boxes, all_segms, all_keyps = test_net(output_dir, gpu_id=gpu_id) test_timer.toc() logger.info('Total inference time: {:.3f}s'.format( test_timer.average_time)) results = task_evaluation.evaluate_all(dataset, all_boxes, all_segms, all_keyps, output_dir) return results
def get_roidb(ind_range): """Get the roidb for the dataset specified in the global cfg. Optionally restrict it to a range of indices if ind_range is a pair of integers. """ dataset = JsonDataset(cfg.TEST.DATASET if isinstance( cfg.TEST.DATASET, tuple) else eval(cfg.TEST.DATASET)) roidb = dataset.get_roidb() if ind_range is not None: total_num_images = len(roidb) start, end = ind_range roidb = roidb[start:end] else: start = 0 end = len(roidb) total_num_images = end return roidb, start, end, total_num_images
def test_net_on_dataset(multi_gpu=False): """Run inference on a dataset.""" output_dir = get_output_dir(training=False) dataset = JsonDataset(cfg.TEST.DATASET) test_timer = Timer() test_timer.tic() if multi_gpu: num_images = len(dataset.get_roidb()) all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset( num_images, output_dir) else: all_boxes, all_segms, all_keyps = test_net() test_timer.toc() logger.info('Total inference time: {:.3f}s'.format( test_timer.average_time)) results = task_evaluation.evaluate_all(dataset, all_boxes, all_segms, all_keyps, output_dir) return results
def test_retinanet_on_dataset(multi_gpu=False): """ Main entry point for testing on a given dataset: whether multi_gpu or not """ output_dir = get_output_dir(training=False) logger.info('Output will be saved to: {:s}'.format(os.path.abspath(output_dir))) dataset = JsonDataset(cfg.TEST.DATASET) # for test-dev or full test dataset, we generate detections for all images if 'test-dev' in cfg.TEST.DATASET or 'test' in cfg.TEST.DATASET: cfg.TEST.NUM_TEST_IMAGES = len(dataset.get_roidb()) if multi_gpu: num_images = cfg.TEST.NUM_TEST_IMAGES boxes, scores, classes, image_ids = multi_gpu_test_retinanet_on_dataset( num_images, output_dir, dataset ) else: boxes, scores, classes, image_ids = test_retinanet() # write RetinaNet detections pkl file to be used for various purposes # dump the boxes first just in case there are spurious failures res_file = os.path.join(output_dir, 'retinanet_detections.pkl') logger.info( 'Writing roidb detections to file: {}'. format(os.path.abspath(res_file)) ) save_object( dict(boxes=boxes, scores=scores, classes=classes, ids=image_ids), res_file ) logger.info('Wrote RetinaNet detections to {}'.format(os.path.abspath(res_file))) # Write the detections to a file that can be uploaded to coco evaluation server # which takes a json file format res_file = write_coco_detection_results( output_dir, dataset, boxes, scores, classes, image_ids) # Perform coco evaluation coco_eval = coco_evaluate(dataset, res_file, image_ids) box_results = task_evaluation._coco_eval_to_box_results(coco_eval) return OrderedDict([(dataset.name, box_results)])
def test_net_on_dataset(multi_gpu=False): """Run inference on a dataset.""" output_dir = get_output_dir(training=False) dataset = JsonDataset(cfg.TEST.DATASET) test_timer = Timer() test_timer.tic() if multi_gpu: num_images = len(dataset.get_roidb()) all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset( num_images, output_dir ) else: all_boxes, all_segms, all_keyps = test_net() test_timer.toc() logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time)) results = task_evaluation.evaluate_all( dataset, all_boxes, all_segms, all_keyps, output_dir ) return results
def test_retinanet(ind_range=None): """ Test RetinaNet model either on the entire dataset or the subset of dataset specified by the index range """ assert cfg.RETINANET.RETINANET_ON, \ 'RETINANET_ON must be set for testing RetinaNet model' output_dir = get_output_dir(training=False) dataset = JsonDataset(cfg.TEST.DATASET) im_list = dataset.get_roidb() if ind_range is not None: start, end = ind_range im_list = im_list[start:end] logger.info('Testing on roidb range: {}-{}'.format(start, end)) else: # if testing over the whole dataset, use the NUM_TEST_IMAGES setting # the NUM_TEST_IMAGES could be over a small set of images for quick # debugging purposes im_list = im_list[0:cfg.TEST.NUM_TEST_IMAGES] model = model_builder.create(cfg.MODEL.TYPE, train=False) if cfg.TEST.WEIGHTS: nu.initialize_from_weights_file( model, cfg.TEST.WEIGHTS, broadcast=False ) model_builder.add_inference_inputs(model) workspace.CreateNet(model.net) boxes, scores, classes, image_ids = im_list_detections( model, im_list[0:cfg.TEST.NUM_TEST_IMAGES]) cfg_yaml = yaml.dump(cfg) if ind_range is not None: det_name = 'retinanet_detections_range_%s_%s.pkl' % tuple(ind_range) else: det_name = 'retinanet_detections.pkl' det_file = os.path.join(output_dir, det_name) save_object( dict(boxes=boxes, scores=scores, classes=classes, ids=image_ids, cfg=cfg_yaml), det_file) logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file))) return boxes, scores, classes, image_ids
def test_net_on_dataset(multi_gpu=False): output_dir = get_output_dir(training=False) dataset = JsonDataset(cfg.TEST.DATASET) test_timer = Timer() test_timer.tic() if multi_gpu: num_images = len(dataset.get_roidb()) all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset( num_images, output_dir) else: all_boxes, all_segms, all_keyps = test_net() test_timer.toc() logger.info('Total inference time: {:.3f}s'.format( test_timer.average_time)) # Run tracking and eval for posetrack datasets if dataset.name.startswith('posetrack') or dataset.name.startswith('kinetics'): roidb, dataset, _, _, _ = get_roidb_and_dataset(None) run_posetrack_tracking(output_dir, roidb) try: evaluate_all(dataset, all_boxes, all_segms, all_keyps, output_dir) except Exception as e: # Typically would crash as we don't have evaluators for each dataset logger.error('Evaluation crashed with exception {}'.format(e))
def get_roidb_and_dataset(ind_range): """Get the roidb for the dataset specified in the global cfg. Optionally restrict it to a range of indices if ind_range is a pair of integers. """ dataset = JsonDataset(cfg.TEST.DATASET) if cfg.TEST.PRECOMPUTED_PROPOSALS: roidb = dataset.get_roidb( proposal_file=cfg.TEST.PROPOSAL_FILE, proposal_limit=cfg.TEST.PROPOSAL_LIMIT ) else: roidb = dataset.get_roidb() if ind_range is not None: total_num_images = len(roidb) start, end = ind_range roidb = roidb[start:end] else: start = 0 end = len(roidb) total_num_images = end return roidb, dataset, start, end, total_num_images
from __future__ import print_function from __future__ import unicode_literals import cPickle as pickle import numpy as np import scipy.io as sio import sys from datasets.json_dataset import JsonDataset if __name__ == '__main__': dataset_name = sys.argv[1] file_in = sys.argv[2] file_out = sys.argv[3] ds = JsonDataset(dataset_name) roidb = ds.get_roidb() raw_data = sio.loadmat(file_in)['boxes'].ravel() assert raw_data.shape[0] == len(roidb) boxes = [] scores = [] ids = [] for i in range(raw_data.shape[0]): if i % 1000 == 0: print('{}/{}'.format(i + 1, len(roidb))) # selective search boxes are 1-indexed and (y1, x1, y2, x2) i_boxes = raw_data[i][:, (1, 0, 3, 2)] - 1 boxes.append(i_boxes.astype(np.float32)) scores.append(np.zeros((i_boxes.shape[0]), dtype=np.float32)) ids.append(roidb[i]['id'])