def get_roidb_and_dataset(dataset_name, proposal_file, ind_range, gt_cls=False): """Get the roidb for the dataset specified in the global cfg. Optionally restrict it to a range of indices if ind_range is a pair of integers. """ dataset = JsonDataset(dataset_name) if cfg.TEST.PRECOMPUTED_PROPOSALS: assert proposal_file, 'No proposal file given' roidb = dataset.get_roidb(proposal_file=proposal_file, proposal_limit=cfg.TEST.PROPOSAL_LIMIT) else: roidb = dataset.get_roidb(gt=gt_cls) if ind_range is not None: total_num_images = len(roidb) start, end = ind_range roidb = roidb[start:end] else: start = 0 end = len(roidb) total_num_images = end return roidb, dataset, start, end, total_num_images
def get_roidb_and_dataset(dataset_name, proposal_file, ind_range): """Get the roidb for the dataset specified in the global cfg. Optionally restrict it to a range of indices if ind_range is a pair of integers. """ if dataset_name == 'live_targets': from detectron.datasets.live_dataset import LiveRoidb roidb = LiveRoidb() import detectron.datasets.dummy_datasets as dummy_datasets json_dataset = dummy_datasets.get_coco_dataset() if not cfg.TRAIN.USE_FLIPPED: logger.info( 'Live target data set will use flipped examples anyway!') logger.info('"Loaded" dataset: {:s}'.format('live_targets')) return roidb, json_dataset, 0, len(roidb), len(roidb) dataset = JsonDataset(dataset_name) if cfg.TEST.PRECOMPUTED_PROPOSALS: assert proposal_file, 'No proposal file given' roidb = dataset.get_roidb(proposal_file=proposal_file, proposal_limit=cfg.TEST.PROPOSAL_LIMIT) else: roidb = dataset.get_roidb() if ind_range is not None: total_num_images = len(roidb) start, end = ind_range roidb = roidb[start:end] else: start = 0 end = len(roidb) total_num_images = end return roidb, dataset, start, end, total_num_images
def test_net_on_dataset(weights_file, dataset_name, proposal_file, output_dir, multi_gpu=False, gpu_id=0, subset_pointer=None): """Run inference on a dataset.""" if dataset_name[:5] != 'live_': dataset = JsonDataset(dataset_name) test_timer = Timer() test_timer.tic() if multi_gpu: num_images = len(dataset.get_roidb()) all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset( weights_file, dataset_name, proposal_file, num_images, output_dir) else: all_boxes, all_segms, all_keyps = test_net( weights_file, dataset_name, proposal_file, output_dir, gpu_id=gpu_id, subset_pointer=subset_pointer) test_timer.toc() logger.info('Total inference time: {:.3f}s'.format( test_timer.average_time)) if cfg.TEST.COCO_TO_VOC: all_boxes = coco_detects_to_voc(all_boxes) if dataset_name[:5] == 'live_': return None results = task_evaluation.evaluate_all(dataset, all_boxes, all_segms, all_keyps, output_dir, subset_pointer=subset_pointer) if subset_pointer is not None: # prune the subset for the following datasets: subset_pointer.subset = subset_pointer.subset[len(dataset.get_roidb() ):] print('remains', len(subset_pointer.subset) ) # should have 0 remains for the last set, voc_2012_train. return results
def test_cls_net_on_dataset(weights_file, dataset_name, proposal_file, output_dir, multi_gpu=False, gpu_id=0): """Run inference on a dataset.""" dataset = JsonDataset(dataset_name) test_timer = Timer() test_timer.tic() if multi_gpu: num_images = len(dataset.get_roidb()) acc = multi_gpu_test_cls_net_on_dataset(num_images, output_dir) else: acc = test_cls_net(weights_file, dataset_name, proposal_file, output_dir, gpu_id=gpu_id) test_timer.toc() logger.info('Total inference time: {:.3f}s'.format( test_timer.average_time)) logger.info('Classification Accuracy on TEST data is: {:.2f}%'.format(acc * 100)) return {"Accuracy": acc}
def accuracy(dataset, detections_pkl): # Load predictions and ground truths ds = JsonDataset(dataset) roidb = ds.get_roidb(gt=True) dets = load_object(detections_pkl) all_boxes = dets['all_boxes'] def id_or_index(ix, val): if len(val) == 0: return val else: return val[ix] trues = 0. # Iterate through all images for ix, entry in enumerate(roidb): cls_boxes_i = [ id_or_index(ix, cls_k_boxes) for cls_k_boxes in all_boxes ] true_boxes = entry['boxes'] if (true_boxes.shape[0] == 0) == (len(cls_boxes_i[3]) == 0): trues += 1 # Finally, calculate accuracy by dividing the sum of true predictions by total samples acc = trues/len(roidb) print("Accuracy: " + str(acc)) return acc
def generate_rpn_on_dataset( weights_file, dataset_name, _proposal_file_ignored, output_dir, multi_gpu=False, gpu_id=0 ): """Run inference on a dataset.""" dataset = JsonDataset(dataset_name) test_timer = Timer() test_timer.tic() if multi_gpu: num_images = len(dataset.get_roidb()) _boxes, _scores, _ids, rpn_file = multi_gpu_generate_rpn_on_dataset( weights_file, dataset_name, _proposal_file_ignored, num_images, output_dir ) else: # Processes entire dataset range by default _boxes, _scores, _ids, rpn_file = generate_rpn_on_range( weights_file, dataset_name, _proposal_file_ignored, output_dir, gpu_id=gpu_id ) test_timer.toc() logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time)) return evaluate_proposal_file(dataset, rpn_file, output_dir)
def test_net_on_dataset( weights_file, dataset_name, proposal_file, output_dir, multi_gpu=False, gpu_id=0 ): """Run inference on a dataset.""" dataset = JsonDataset(dataset_name) test_timer = Timer() test_timer.tic() if multi_gpu: num_images = len(dataset.get_roidb()) all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset( weights_file, dataset_name, proposal_file, num_images, output_dir ) else: all_boxes, all_segms, all_keyps = test_net( weights_file, dataset_name, proposal_file, output_dir, gpu_id=gpu_id ) test_timer.toc() logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time)) results = task_evaluation.evaluate_all( dataset, all_boxes, all_segms, all_keyps, output_dir ) return results
def test_net_on_dataset( weights_file, dataset_name, proposal_file, output_dir, multi_gpu=False, gpu_id=0 ): """Run inference on a dataset.""" dataset = JsonDataset(dataset_name) test_timer = Timer() test_timer.tic() if multi_gpu: num_images = len(dataset.get_roidb()) all_boxes, all_segms, all_keyps, all_bodys = \ multi_gpu_test_net_on_dataset( weights_file, dataset_name, proposal_file, num_images, output_dir ) else: all_boxes, all_segms, all_keyps, all_bodys = test_net( weights_file, dataset_name, proposal_file, output_dir, gpu_id=gpu_id ) test_timer.toc() logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time)) results = task_evaluation.evaluate_all( dataset, all_boxes, all_segms, all_keyps, all_bodys, output_dir ) return results
def generate_rpn_on_dataset(weights_file, dataset_name, _proposal_file_ignored, output_dir, multi_gpu=False, gpu_id=0): """Run inference on a dataset.""" dataset = JsonDataset(dataset_name) test_timer = Timer() test_timer.tic() if multi_gpu: num_images = len(dataset.get_roidb()) _boxes, _scores, _ids, rpn_file = multi_gpu_generate_rpn_on_dataset( weights_file, dataset_name, _proposal_file_ignored, num_images, output_dir) else: # Processes entire dataset range by default _boxes, _scores, _ids, rpn_file = generate_rpn_on_range( weights_file, dataset_name, _proposal_file_ignored, output_dir, gpu_id=gpu_id) test_timer.toc() logger.info('Total inference time: {:.3f}s'.format( test_timer.average_time)) return evaluate_proposal_file(dataset, rpn_file, output_dir)
def get_roidb(dataset_name, proposal_file, is_source=True): ds = JsonDataset(dataset_name) roidb = ds.get_roidb( gt=True, proposal_file=proposal_file, crowd_filter_thresh=cfg.TRAIN.CROWD_FILTER_THRESH, is_source=is_source ) if cfg.VOC_SUBSET != '' and subset_pointer is not None: # print(len(voc_subset)) voc_subset = subset_pointer.subset this_sub = voc_subset[:len(roidb)] subset_pointer.subset = voc_subset[len(roidb):] # print('remains',len(get_roidb.voc_subset)) # should have 0 remains for the last set, voc_2012_train. # # for pruning disk space: # import os # for taking, roi in zip(this_sub,roidb): # if not taking: # os.remove(roi['image']) # filter roidb: roidb = [roi for taking,roi in zip(this_sub,roidb) if taking] if cfg.TRAIN.USE_FLIPPED: logger.info('Appending horizontally-flipped training examples...') extend_with_flipped_entries(roidb, ds) logger.info('Loaded dataset: {:s}'.format(dataset_name)) return roidb
def get_roidb(dataset_name, proposal_file): ds = JsonDataset(dataset_name) roidb = ds.get_roidb(gt=True, proposal_file=proposal_file, crowd_filter_thresh=cfg.TRAIN.CROWD_FILTER_THRESH) if cfg.TRAIN.USE_FLIPPED: logger.info('Appending horizontally-flipped training examples...') extend_with_flipped_entries(roidb, ds) logger.info('Loaded dataset: {:s}'.format(ds.name)) return roidb
def get_roidb(dataset_name, proposal_file): ds = JsonDataset(dataset_name) roidb = ds.get_roidb( gt=True, proposal_file=proposal_file, crowd_filter_thresh=cfg.TRAIN.CROWD_FILTER_THRESH ) if cfg.TRAIN.USE_FLIPPED: logger.info('Appending horizontally-flipped training examples...') extend_with_flipped_entries(roidb, ds) logger.info('Loaded dataset: {:s}'.format(ds.name)) return roidb
def test_net_on_dataset(weights_file, dataset_name, proposal_file, output_dir, multi_gpu=False, gpu_id=0): """Run inference on a dataset.""" dataset = JsonDataset(dataset_name) test_timer = Timer() test_timer.tic() model = '' if multi_gpu: num_images = len(dataset.get_roidb()) all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset( weights_file, dataset_name, proposal_file, num_images, output_dir) else: all_boxes, all_segms, all_keyps, model = test_net(weights_file, dataset_name, proposal_file, output_dir, gpu_id=gpu_id) test_timer.toc() logger.info('Total inference time: {:.3f}s'.format( test_timer.average_time)) results = task_evaluation.evaluate_all(dataset, all_boxes, all_segms, all_keyps, output_dir) roc_data = metrics.calculate_roc(all_boxes, dataset, cfg.TEST.IOU) froc_data = metrics.calculate_froc(all_boxes, dataset, cfg.TEST.IOU) auc_score = { dataset.name: { u'box': { u'AUC': auc(roc_data[0], roc_data[1]) } } } afroc_score = np.trapz(froc_data[0], froc_data[2]) afroc = {dataset.name: {u'box': {u'AFROC': afroc_score}}} print('Afroc score: {:.4f}'.format(afroc_score)) plot.plot_roc(roc_data, auc_score[dataset.name][u'box'][u'AUC'], dataset, model, output_dir) plot.plot_froc(froc_data, dataset, model, output_dir) plot.plot_afroc(froc_data, dataset, model, output_dir) save.np_save(np.stack(roc_data), 'roc', dataset, model, output_dir) save.np_save(np.stack(froc_data), 'froc', dataset, model, output_dir) results[dataset_name][u'box'].update(auc_score[dataset.name][u'box']) results[dataset_name][u'box'].update(afroc[dataset.name][u'box']) return results, auc_score, afroc_score
def vis(dataset, detections_pkl, thresh, output_dir, limit=0): ds = JsonDataset(dataset) roidb = ds.get_roidb() with open(detections_pkl, 'r') as f: dets = pickle.load(f) assert all(k in dets for k in ['all_boxes', 'all_segms', 'all_keyps']), \ 'Expected detections pkl file in the format used by test_engine.py' all_boxes = dets['all_boxes'] all_segms = dets['all_segms'] all_keyps = dets['all_keyps'] def id_or_index(ix, val): if len(val) == 0: return val else: return val[ix] for ix, entry in enumerate(roidb): if limit > 0 and ix >= limit: break if ix % 10 == 0: print('{:d}/{:d}'.format(ix + 1, len(roidb))) im = cv2.imread(entry['image']) im_name = os.path.splitext(os.path.basename(entry['image']))[0] cls_boxes_i = [ id_or_index(ix, cls_k_boxes) for cls_k_boxes in all_boxes ] cls_segms_i = [ id_or_index(ix, cls_k_segms) for cls_k_segms in all_segms ] cls_keyps_i = [ id_or_index(ix, cls_k_keyps) for cls_k_keyps in all_keyps ] vis_utils.vis_one_image( im[:, :, ::-1], '{:d}_{:s}'.format(ix, im_name), os.path.join(output_dir, 'vis'), cls_boxes_i, segms=cls_segms_i, keypoints=cls_keyps_i, thresh=thresh, box_alpha=0.8, dataset=ds, show_class=True )
def vis(dataset, detections_pkl, thresh, output_dir, limit=0): ds = JsonDataset(dataset) roidb = ds.get_roidb() with open(detections_pkl, 'rb') as f: dets = pickle.load(f) assert all(k in dets for k in ['all_boxes', 'all_segms', 'all_keyps']), \ 'Expected detections pkl file in the format used by test_engine.py' all_boxes = dets['all_boxes'] all_segms = dets['all_segms'] all_keyps = dets['all_keyps'] def id_or_index(ix, val): if len(val) == 0: return val else: return val[ix] for ix, entry in enumerate(roidb): if limit > 0 and ix >= limit: break if ix % 10 == 0: print('{:d}/{:d}'.format(ix + 1, len(roidb))) im = cv2.imread(entry['image']) im_name = os.path.splitext(os.path.basename(entry['image']))[0] cls_boxes_i = [ id_or_index(ix, cls_k_boxes) for cls_k_boxes in all_boxes ] cls_segms_i = [ id_or_index(ix, cls_k_segms) for cls_k_segms in all_segms ] cls_keyps_i = [ id_or_index(ix, cls_k_keyps) for cls_k_keyps in all_keyps ] vis_utils.vis_one_image( im[:, :, ::-1], '{:d}_{:s}'.format(ix, im_name), os.path.join(output_dir, 'vis'), cls_boxes_i, segms=cls_segms_i, keypoints=cls_keyps_i, thresh=thresh, box_alpha=0.8, dataset=ds, show_class=True )
def test_net_on_dataset(weights_file, dataset_name, proposal_file, output_dir, multi_gpu=False, gpu_id=0): """Run inference on a dataset.""" dataset = JsonDataset(dataset_name) test_timer = Timer() test_timer.tic() ################################################################ import pickle res_file = os.path.join(output_dir, 'bbox_' + dataset_name + '_results.json') print("res_file = {}==========================".format(res_file)) if os.path.exists(res_file): import detectron.datasets.json_dataset_evaluator as json_dataset_evaluator print("res_file = {} exists! Loading res_file".format(res_file)) coco_eval = json_dataset_evaluator._do_detection_eval( dataset, res_file, output_dir) box_results = task_evaluation._coco_eval_to_box_results(coco_eval) results = OrderedDict([(dataset.name, box_results)]) return results ################################################################ det_name = "detections.pkl" det_file = os.path.join(output_dir, det_name) print("det_file = {}==========================".format(det_file)) if os.path.exists(det_file): print("{} exists! Loading detection results".format(det_file)) res = pickle.load(open(det_file)) all_boxes = res['all_boxes'] all_segms = res['all_segms'] all_keyps = res['all_keyps'] ################################################################ elif multi_gpu: num_images = len(dataset.get_roidb()) all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset( weights_file, dataset_name, proposal_file, num_images, output_dir) else: all_boxes, all_segms, all_keyps = test_net(weights_file, dataset_name, proposal_file, output_dir, gpu_id=gpu_id) test_timer.toc() logger.info('Total inference time: {:.3f}s'.format( test_timer.average_time)) results = task_evaluation.evaluate_all(dataset, all_boxes, all_segms, all_keyps, output_dir) return results
def get_roidb_and_dataset(dataset_name, proposal_file, ind_range): """Get the roidb for the dataset specified in the global cfg. Optionally restrict it to a range of indices if ind_range is a pair of integers. """ dataset = JsonDataset(dataset_name) if cfg.TEST.PRECOMPUTED_PROPOSALS: assert proposal_file, 'No proposal file given' roidb = dataset.get_roidb( proposal_file=proposal_file, proposal_limit=cfg.TEST.PROPOSAL_LIMIT ) else: roidb = dataset.get_roidb() if ind_range is not None: total_num_images = len(roidb) start, end = ind_range roidb = roidb[start:end] else: start = 0 end = len(roidb) total_num_images = end return roidb, dataset, start, end, total_num_images
def get_roidb(dataset_name, ind_range): """Get the roidb for the dataset specified in the global cfg. Optionally restrict it to a range of indices if ind_range is a pair of integers. """ dataset = JsonDataset(dataset_name) roidb = dataset.get_roidb() if ind_range is not None: total_num_images = len(roidb) start, end = ind_range roidb = roidb[start:end] else: start = 0 end = len(roidb) total_num_images = end return roidb, start, end, total_num_images
def get_roidb(dataset_info, proposal_file): ds = JsonDataset(dataset_info) roidb = ds.get_roidb(gt=True, proposal_file=proposal_file, crowd_filter_thresh=cfg.TRAIN.CROWD_FILTER_THRESH) if cfg.TRAIN.USE_FLIPPED: logger.info('Appending horizontally-flipped training examples...') extend_with_flipped_entries(roidb, ds) # TT: Augmentation if cfg.TRAIN.USE_TRANSFORMATION: logger.info('Appending augmented training examples...') transform_samples = int(cfg.TRAIN.TRANSFORM_SAMPLES) extend_with_augmented_entries(roidb, ds, aug_samples=transform_samples) # TT: end logger.info('Loaded dataset: {:s}'.format(ds.name)) return roidb
from __future__ import unicode_literals import _init_paths import argparse import cv2 # NOQA (Must import before importing caffe2 due to bug in cv2) import os import pprint import sys import time from caffe2.python import workspace from detectron.datasets.json_dataset import JsonDataset from detectron.core.config import assert_and_infer_cfg from detectron.core.config import cfg from detectron.core.config import merge_cfg_from_file from detectron.core.config import merge_cfg_from_list from detectron.core.test_engine import run_inference from detectron.utils.logging import setup_logging import detectron.utils.c2 as c2_utils dataset_name = 'nucoco_val' proposal_file = '/ssd_scratch/mrnabati/RRPN/output/proposals/nucoco_sw_fb/rrpn_v5/proposals_nucoco_val.pkl' proposal_limit = 2000 dataset = JsonDataset(dataset_name) roidb = dataset.get_roidb( proposal_file=proposal_file, proposal_limit=proposal_limit )
import _init_paths import numpy as np import h5py import sys from detectron.datasets.json_dataset import JsonDataset from detectron.utils.io import save_object if __name__ == '__main__': dataset_name = sys.argv[1] file_in = sys.argv[2] file_out = sys.argv[3] ds = JsonDataset(dataset_name) roidb = ds.get_roidb() boxes = [] scores = [] ids = [] with h5py.File(file_in, 'r') as f: raw_boxes = f['boxes'] num_imgs = len(raw_boxes) assert num_imgs == len(roidb) for ind in range(num_imgs): if ind % 1000 == 0: print('{}/{}'.format(ind + 1, len(roidb))) ## -------- Working down below
def main(args): MINIMAL = False TRAIN = False FORWARD = False SHAPES = False HIDE_PARAMS = True if args.opts is not None: if 'minimal' in args.opts: MINIMAL = True if 'train' in args.opts: TRAIN = True if 'forward' in args.opts: FORWARD = True if 'shapes' in args.opts: SHAPES = True if 'params' in args.opts: HIDE_PARAMS = False if SHAPES and args.model_file is None: raise ValueError('Specify model file') MODEL_FILE = args.model_file NET_NAMES = args.net_names if MINIMAL: get_dot_graph = lambda net, shapes: net_drawer.GetPydotGraphMinimal( net, rankdir="BT") else: get_dot_graph = lambda net, shapes: net_drawer.GetPydotGraph( net, rankdir="BT", shapes=shapes, hide_params=HIDE_PARAMS) # Get model if args.cfg_file is not None: merge_cfg_from_file(args.cfg_file) cfg.NUM_GPUS = 1 cfg.VIS_NET = True if FORWARD: cfg.MODEL.FORWARD_ONLY = True assert_and_infer_cfg(cache_urls=False) if SHAPES and TRAIN: raise NotImplementedError # Run model to get shape information of all blobs if SHAPES: model = infer_engine.initialize_model_from_cfg(MODEL_FILE) workspace.RunNetOnce(model.param_init_net) nu.broadcast_parameters(model) dataset = JsonDataset(cfg.TRAIN.DATASETS[0]) roidb = dataset.get_roidb() with c2_utils.NamedCudaScope(0): if cfg.MODEL.TRACKING_ON: roidb_min = [roidb[0], roidb[1]] im_list = [cv2.imread(e['image']) for e in roidb_min] infer_engine.multi_im_detect_all(model, im_list, [None, None]) else: infer_engine.im_detect_all(model, roidb[0]['image'], None) else: model = model_builder.create(cfg.MODEL.TYPE, train=TRAIN) subprocess.call(["killall", "xdot"]) # Visualize all specified nets for net_name in NET_NAMES: net = getattr(model, net_name, None) if net: print('processing graph {}...'.format(net_name)) g = get_dot_graph(net.Proto(), shapes=SHAPES) name = net_name if TRAIN: name_append = 'train' else: name_append = 'infer' # Save graph graph_dir = os.path.join(args.output_dir, cfg.MODEL.TYPE) if not os.path.exists(graph_dir): os.makedirs(graph_dir) dot_name = os.path.join(graph_dir, '{}_{}.dot'.format(net_name, name_append)) g.write_dot(dot_name) subprocess.Popen(['xdot', dot_name])
def get_roidb(dataset_name, proposal_file): ds = JsonDataset(dataset_name) roidb = ds.get_roidb(gt=True, proposal_file=proposal_file, crowd_filter_thresh=cfg.TRAIN.CROWD_FILTER_THRESH) sum1 = 0 roidb0 = roidb * 1 #for shanghai circuit if 0: #for shanghai ltps label =3 num_list = [[], [], []] for roidb_i, roidb_list in enumerate(roidb0): try: ppp = roidb_list['gt_classes'][0] except: continue if roidb_list['gt_classes'][0] == 1: num_list[0].append(roidb_list) if roidb_list['gt_classes'][0] == 2: num_list[1].append(roidb_list) if roidb_list['gt_classes'][0] == 3: num_list[2].append(roidb_list) num_list_num = [ len(num_list[0]), len(num_list[1]), len(num_list[2]) ] print(num_list_num) num_max = max(num_list_num) else_num = [num_max - sub_num for sub_num in num_list_num] up_int = int(0) for ii, sub_else_num in enumerate(else_num): if sub_else_num > len(num_list[ii]): up_int = math.ceil( float(sub_else_num) / len(num_list[ii])) #up get int vv = num_list[ii] * 1 for up_int_i in range(int(up_int) - 1): for sub_vv in vv: num_list[ii].append(sub_vv) #else: uu = random.sample(num_list[ii], sub_else_num) for sub_uu in uu: roidb.append(sub_uu) if 0: #for shanghai tp label =2 tp num_list = [[], []] for roidb_i, roidb_list in enumerate(roidb0): try: ppp = roidb_list['gt_classes'][0] except: continue if roidb_list['gt_classes'][0] == 1: num_list[0].append(roidb_list) if roidb_list['gt_classes'][0] == 2: num_list[1].append(roidb_list) num_list_num = [len(num_list[0]), len(num_list[1])] num_max = max(num_list_num) else_num = [num_max - sub_num for sub_num in num_list_num] up_int = int(0) for ii, sub_else_num in enumerate(else_num): if sub_else_num > len(num_list[ii]): up_int = math.ceil( float(sub_else_num) / len(num_list[ii])) #up get int vv = num_list[ii] * 1 for up_int_i in range(int(up_int) - 1): for sub_vv in vv: num_list[ii].append(sub_vv) #else: uu = random.sample(num_list[ii], sub_else_num) for sub_uu in uu: roidb.append(sub_uu) if 0: #for mask num_list = [[], [], [], []] for roidb_i, roidb_list in enumerate(roidb0): try: ppp = roidb_list['gt_classes'][0] except: continue if roidb_list['gt_classes'][0] == 1: num_list[0].append(roidb_list) if roidb_list['gt_classes'][0] == 2: num_list[1].append(roidb_list) if roidb_list['gt_classes'][0] == 3: num_list[2].append(roidb_list) if roidb_list['gt_classes'][0] == 4: num_list[3].append(roidb_list) #if roidb_list['gt_classes'][0] == 5: # aa=1 if 1: num_list_num = [ len(num_list[0]), len(num_list[1]), len(num_list[2]), len(num_list[3]) ] num_max = max(num_list_num) else_num = [num_max - sub_num for sub_num in num_list_num] up_int = int(0) for ii, sub_else_num in enumerate(else_num): if sub_else_num > len(num_list[ii]): up_int = math.ceil( float(sub_else_num) / len(num_list[ii])) #up get int vv = num_list[ii] * 1 for up_int_i in range(int(up_int) - 1): for sub_vv in vv: num_list[ii].append(sub_vv) #else: uu = random.sample(num_list[ii], sub_else_num) for sub_uu in uu: roidb.append(sub_uu) if 0: # for mask verify roidb0 = roidb * 1 num_list = [[], [], [], []] for roidb_i, roidb_list in enumerate(roidb0): try: ppp = roidb_list['gt_classes'][0] except: continue if roidb_list['gt_classes'][0] == 1: num_list[0].append(roidb_list) if roidb_list['gt_classes'][0] == 2: num_list[1].append(roidb_list) if roidb_list['gt_classes'][0] == 3: num_list[2].append(roidb_list) if roidb_list['gt_classes'][0] == 4: num_list[3].append(roidb_list) num_list_num = [ len(num_list[0]), len(num_list[1]), len(num_list[2]), len(num_list[3]) ] print('ok') if 0: #for S5 for roidb_i, roidb_list in enumerate(roidb0): try: ppp = roidb_list['gt_classes'][0] except: continue if roidb_list['gt_classes'][0] == 0: print('test.jpg') if 0: #not for S5 for roidb_i, roidb_list in enumerate(roidb0): try: ppp = roidb_list['gt_classes'][0] except: continue if roidb_list['gt_classes'][ 0] != 0: #and roidb_list['gt_classes'][0] != 1: #print('else') for mm in range(3): #for 3 S3 S4 5 S2 3 S1 ? 2 for S5 roidb.append(roidb_list) if 0: # for S4 if 1 in roidb_list['gt_classes']: #1 for baise for mm in range(8): #for 3 S3 S4 8 roidb.append(roidb_list) if 0: roidb.append(roidb_list) roidb.append(roidb_list) roidb.append(roidb_list) roidb.append(roidb_list) roidb.append(roidb_list) roidb.append(roidb_list) roidb.append(roidb_list) roidb.append(roidb_list) roidb.append(roidb_list) roidb.append(roidb_list) roidb.append(roidb_list) roidb.append(roidb_list) roidb.append(roidb_list) roidb.append(roidb_list) roidb.append(roidb_list) roidb.append(roidb_list) roidb.append(roidb_list) roidb.append(roidb_list) roidb.append(roidb_list) roidb.append(roidb_list) sum1 += 1 #if roidb_list['gt_classes'][0] == 1: # print('error') if cfg.TRAIN.USE_FLIPPED: logger.info('Appending horizontally-flipped training examples...') extend_with_flipped_entries(roidb, ds) logger.info('Loaded dataset: {:s}'.format(ds.name)) return roidb
def visualize_ranking(dataset, detections_pkl, opts): # Load predictions and ground truths ds = JsonDataset(dataset) roidb = ds.get_roidb(gt=True) dets = load_object(detections_pkl) all_boxes = dets['all_boxes'] def id_or_index(ix, val): if len(val) == 0: return val else: return val[ix] # Load coordinates with open(opts.coord_file) as json_file: coord_data = json.load(json_file) # Iterate through all images and note false positive and negatives, as well as entry scores false_positives = [] false_negatives = [] scores = [] for ix, entry in enumerate(roidb): cls_boxes_i = [ id_or_index(ix, cls_k_boxes) for cls_k_boxes in all_boxes ] preds = np.array(cls_boxes_i[opts.class_id]) entry['preds'] = preds true_boxes = entry['boxes'] if preds.shape[0] > 0 and np.max(preds[:, -1]) > opts.threshold: box_scores = preds[:, -1] box_scores = box_scores[np.where(box_scores > opts.threshold)] score = np.sum( box_scores[np.argsort(box_scores)[-opts.box_count:]]) scores.append([entry, score]) if true_boxes.shape[0] == 0: false_positives.append(entry) else: if true_boxes.shape[0] > 0: false_negatives = add_negative(false_negatives, entry, coord_data, opts.min_distance) # Find top rated entries scores = np.array(scores) scores = scores[np.argsort(scores[:, 1])[::-1]] for entry in scores[:, 0]: entry['coords'] = coord_data[os.path.split(entry['image'])[-1]] # Filter by proximity for i in range(scores.shape[0]): if scores[i][1] > 0: current_entry = scores[i][0] for j in range(i + 1, scores.shape[0]): second_entry = scores[j][0] dist = distance( (current_entry['coords'][0], current_entry['coords'][1]), (second_entry['coords'][0], second_entry['coords'][1])).km * 1000 if dist < opts.min_distance: scores[j][1] = 0 scores = scores[np.where(scores[:, 1] > 0)] top_entries = scores[np.argsort(scores[:, 1])[-opts.image_count:][::-1]] # Choose random negative samples false_samples = np.append(false_negatives, false_positives) np.random.shuffle(false_samples) # Visualize positive and negative samples rows_cols = (opts.image_count, 2) if opts.angle == 'ver' else (2, opts.image_count) plt_shape = (6., opts.image_count * 2.5) if opts.angle == 'ver' else (opts.image_count * 2.5, 6.) fig = plt.figure(1, plt_shape) grid = ImageGrid( fig, 111, nrows_ncols=rows_cols, axes_pad=0.03, label_mode='L', ) # Show top ranked images for i, result in enumerate(top_entries): entry = result[0] score = result[1] grid_idx = i if opts.angle == 'ver': grid_idx = i * 2 # Load image and add bounding boxes im = cv2.imread(entry['image']) preds = entry['preds'] true_boxes = entry['boxes'] for bbox in true_boxes: im = vis_bbox( im, (bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]), _GT_COLOR, opts.box_thickness) count = 0 for bbox in preds: if bbox[-1] > opts.threshold: count += 1 print( os.path.split(entry['image'])[-1] + ': ' + str(bbox[0:4])) im = vis_bbox( im, (bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]), _PRED_COLOR, opts.box_thickness) if count >= opts.box_count: break # Adjust grid setting im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) show_img(grid, im, grid_idx) t = grid[grid_idx].text(12, 42, "Score: " + str(round(score, 3)), fontsize=8, bbox=dict(boxstyle='square', fc='white', ec='none', alpha=0.6)) if i == 0: if opts.angle == 'ver': grid[grid_idx].set_title("Top\nPredictions", size=18) else: grid[grid_idx].set_ylabel("Top Predictions", fontsize=13) # Show random negative samples (false positive, false negative) for i, entry in enumerate(false_samples): if i >= opts.image_count: break grid_idx = opts.image_count + i if opts.angle == 'ver': grid_idx = 2 * i + 1 # Load image and add bounding boxes im = cv2.imread(entry['image']) preds = entry['preds'] true_boxes = entry['boxes'] for bbox in true_boxes: im = vis_bbox( im, (bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]), _GT_COLOR, opts.box_thickness) for bbox in preds: if bbox[-1] > opts.threshold: im = vis_bbox( im, (bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]), _PRED_COLOR, opts.box_thickness) # Adjust grid setting im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) grid[grid_idx].imshow(im) grid[grid_idx].grid(False) grid[grid_idx].set_xticks([]) grid[grid_idx].set_yticks([]) if i == 0: if opts.angle == 'ver': grid[grid_idx].set_title("Errors", size=18) else: grid[grid_idx].set_ylabel("Errors", fontsize=13) plt.axis('off') plt.subplots_adjust(hspace=1) plt.savefig("ranking.png", dpi=300, bbox_inches='tight')
def get_roidb(dataset_name, proposal_file): ds = JsonDataset(dataset_name) roidb = ds.get_roidb(gt=True,proposal_file=proposal_file,crowd_filter_thresh=cfg.TRAIN.CROWD_FILTER_THRESH) return roidb
ind_range=args.range, multi_gpu_testing=args.multi_gpu_testing, check_expected_results=False, evaluation=False) all_boxes = all_results['all_boxes'] test_dataset = JsonDataset(cfg.TEST.DATASETS[0]) image_set = test_dataset.name.split('_')[-1] root_path = DATASETS[test_dataset.name][ROOT_DIR] image_set_path = os.path.join(root_path, 'ImageSets', 'Main', image_set + '.txt') with open(image_set_path, 'r') as f: image_index = [x.strip() for x in f.readlines()] test_roidb = test_dataset.get_roidb() for i, entry in enumerate(test_roidb): index = os.path.splitext(os.path.split(entry['image'])[1])[0] assert index == image_index[i] # crop images based on detected boxes and store into imgs_crop imgs_crop = [] for cls_ind, cls in enumerate(test_dataset.classes): if cls == '__background__': continue for im_ind, index in enumerate(image_index): dets = all_boxes[cls_ind][im_ind] if type(dets) == list: assert len(dets) == 0, \ 'dets should be numpy.ndarray or empty list' continue
def complete_stats(dataset, detections_pkl, threshold, box_idx, long_output=True): # Load predictions and ground truths ds = JsonDataset(dataset) roidb = ds.get_roidb(gt=True) dets = load_object(detections_pkl) all_boxes = dets['all_boxes'] def id_or_index(ix, val): if len(val) == 0: return val else: return val[ix] true_positives = 0 true_negatives = 0 false_positives = 0 false_negatives = 0 total = len(roidb) # Iterate through all images for ix, entry in enumerate(roidb): cls_boxes_i = [ id_or_index(ix, cls_k_boxes) for cls_k_boxes in all_boxes ] preds = np.array(cls_boxes_i[box_idx]) true_boxes = entry['boxes'] # Check if the images resulted in a true/false positive/negative if (true_boxes.shape[0] == 0): if preds.shape[0] > 0 and np.max(preds[:,4]) > threshold: false_positives += 1 else: true_negatives += 1 else: if preds.shape[0] > 0 and np.max(preds[:,4]) > threshold: true_positives += 1 else: false_negatives += 1 # Calculate the statistics prec = float('nan') if true_positives+false_positives > 0: prec = true_positives/float(true_positives+false_positives) elif false_negatives == 0: prec = 1. rec = float('nan') if true_positives+false_negatives > 0: rec = true_positives/float(true_positives+false_negatives) elif false_positives == 0: rec = 1. acc = float(true_positives+true_negatives)/total fm = 0 if prec > 0 or rec > 0: fm = 2.0*prec*rec/(prec+rec) # Re-enable printing enablePrint() # Print results if (long_output): print("True positives: {}\tFalse positives: {}".format(true_positives, false_positives)) print("True negatives: {}\tFalse negatives: {}".format(true_negatives, false_negatives)) print("Total: {}".format(total)) print("Precision: " + str(prec*100)) print("Recall: " + str(rec*100)) print("F-measure: " + str(fm*100)) print("Accuracy: " + str(acc*100)) else: print("{};{};{};{};".format(acc, prec, rec, fm)) return acc
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix) return transform_matrix # input_dir = '/mnt/genesis/vegas/Databases/CBIS-DDSM_resized/Train/Ground Truth' # output = '/mnt/genesis/vegas/transformed' # test = '/mnt/genesis/vegas' input_dir = '/mnt/Cargo_2/Diploma_Thesis/Databases/CBIS-DDSM/Train/Ground Truth' # output = '/mnt/genesis/vegas/transformed' test = '/home/gru' ds = JsonDataset('CBIS_DDSM_train') roidb = ds.get_roidb(gt=True, proposal_file=None, crowd_filter_thresh=0.7 ) # if not os.path.isdir(output): # os.mkdir(output) images = [os.path.join(root, file) for root, _, files in os.walk(os.path.join(input_dir, 'Ground Truth')) for file in files] for i in range(len(roidb)): if '00465_LEFT_CC' not in roidb[i]['image']: continue # angle = uniform(-20, 20) angle = 17.87189165364299 # shear = uniform(-0.2, 0.2) shear = 0.18923481623384603 # zoom = uniform(0.8, 1.2) zoom = 1.2
from __future__ import unicode_literals import cPickle as pickle import numpy as np import scipy.io as sio import sys from detectron.datasets.json_dataset import JsonDataset if __name__ == '__main__': dataset_name = sys.argv[1] file_in = sys.argv[2] file_out = sys.argv[3] ds = JsonDataset(dataset_name) roidb = ds.get_roidb() raw_data = sio.loadmat(file_in)['boxes'].ravel() assert raw_data.shape[0] == len(roidb) boxes = [] scores = [] ids = [] for i in range(raw_data.shape[0]): if i % 1000 == 0: print('{}/{}'.format(i + 1, len(roidb))) # selective search boxes are 1-indexed and (y1, x1, y2, x2) i_boxes = raw_data[i][:, (1, 0, 3, 2)] - 1 boxes.append(i_boxes.astype(np.float32)) scores.append(np.zeros((i_boxes.shape[0]), dtype=np.float32)) ids.append(roidb[i]['id'])
def test_net_on_dataset(weights_file, dataset_name, proposal_file, output_dir, multi_gpu=False, gpu_id=0): load_from_tmp = False """Run inference on a dataset.""" dataset = JsonDataset(dataset_name) if not load_from_tmp: test_timer = Timer() test_timer.tic() if multi_gpu: num_images = len(dataset.get_roidb()) all_boxes, all_segms, all_keyps, all_personmasks, all_parss, all_bodys = \ multi_gpu_test_net_on_dataset( weights_file, dataset_name, proposal_file, num_images, output_dir ) else: all_boxes, all_segms, all_keyps, all_personmasks, all_parss, all_bodys = test_net( weights_file, dataset_name, proposal_file, output_dir, gpu_id=gpu_id) test_timer.toc() logger.info('Total inference time: {:.3f}s'.format( test_timer.average_time)) else: tmp_path = '/coco/results/detectron-output_mulres_intersup_mulsaclesup_lowfeat23_int05/test/dense_coco_2014_minival/generalized_rcnn/detections.pkl' #tmp_path = '/coco/results/detectron-output_mulres_intersup/test/dense_coco_2014_minival/generalized_rcnn/detections.pkl' #tmp_path = '/coco/results/detectron-output_mulres_intersup_onlysegpart_fliped/test/MHP_seg_val/generalized_rcnn/detections.pkl' #tmp_path = '/coco/results/detectron-output_mulres_intersup_onlysegpart_fliped/test/CIHP_seg_val/generalized_rcnn/detections.pkl' tmp_file = open(tmp_path, 'r') print('detections results from: ', tmp_path) tmp_pkl = pickle.load(tmp_file) all_boxes = tmp_pkl['all_boxes'] all_segms = tmp_pkl['all_segms'] all_keyps = tmp_pkl['all_keyps'] if 'all_personmasks' not in tmp_pkl.keys(): all_personmasks = None else: all_personmasks = tmp_pkl['all_personmasks'] all_parss = tmp_pkl['all_parss'] all_bodys = tmp_pkl['all_bodys'] ''' for i in range(len(all_bodys[1])): for j in range(len(all_bodys[1][i])): #print("all_bodys[1][i][j]: ",all_bodys[1][i][j].shape) all_bodys[1][i][j][0][np.where(all_bodys[1][i][j][0] == 3)] = 4 all_bodys[1][i][j][0][np.where(all_bodys[1][i][j][0] == 4)] = 3 all_bodys[1][i][j][0][np.where(all_bodys[1][i][j][0] == 6)] = 5 all_bodys[1][i][j][0][np.where(all_bodys[1][i][j][0] == 5)] = 6 ''' if cfg.VIS: vis_wholedataset( dataset_name, proposal_file, output_dir, all_boxes=all_boxes, all_segms=all_segms, all_keyps=all_keyps, all_personmasks=all_personmasks, all_parss=all_parss, all_bodys=all_bodys, img_name=['COCO_val2014_000000464089.jpg'], show_box=False, ) results = task_evaluation.evaluate_all(dataset, all_boxes, all_segms, all_keyps, all_personmasks, all_parss, all_bodys, output_dir) return results