Exemplo n.º 1
0
def test_net_on_dataset(
    weights_file,
    dataset_name,
    proposal_file,
    output_dir,
    multi_gpu=False,
    gpu_id=0
):
    """Run inference on a dataset."""
    dataset = JsonDataset(dataset_name)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        all_boxes, all_segms, all_keyps, all_bodys = \
            multi_gpu_test_net_on_dataset(
                weights_file, dataset_name, proposal_file,
                num_images, output_dir
            )
    else:
        all_boxes, all_segms, all_keyps, all_bodys = test_net(
            weights_file, dataset_name, proposal_file, output_dir, gpu_id=gpu_id
        )
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))
    results = task_evaluation.evaluate_all(
        dataset, all_boxes, all_segms, all_keyps, all_bodys, output_dir
    )
    return results
Exemplo n.º 2
0
def generate_rpn_on_dataset(
    weights_file,
    dataset_name,
    _proposal_file_ignored,
    output_dir,
    multi_gpu=False,
    gpu_id=0
):
    """Run inference on a dataset."""
    dataset = JsonDataset(dataset_name)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        _boxes, _scores, _ids, rpn_file = multi_gpu_generate_rpn_on_dataset(
            weights_file, dataset_name, _proposal_file_ignored, num_images,
            output_dir
        )
    else:
        # Processes entire dataset range by default
        _boxes, _scores, _ids, rpn_file = generate_rpn_on_range(
            weights_file,
            dataset_name,
            _proposal_file_ignored,
            output_dir,
            gpu_id=gpu_id
        )
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))
    return evaluate_proposal_file(dataset, rpn_file, output_dir)
 def get_roidb(dataset_name, proposal_file, is_source=True):
     ds = JsonDataset(dataset_name)
     roidb = ds.get_roidb(
         gt=True,
         proposal_file=proposal_file,
         crowd_filter_thresh=cfg.TRAIN.CROWD_FILTER_THRESH,
         is_source=is_source
     )
     
     if cfg.VOC_SUBSET != '' and subset_pointer is not None:
         # print(len(voc_subset))
         voc_subset = subset_pointer.subset
         this_sub = voc_subset[:len(roidb)]
         subset_pointer.subset = voc_subset[len(roidb):]
         # print('remains',len(get_roidb.voc_subset)) # should have 0 remains for the last set, voc_2012_train.
         
         # # for pruning disk space:
         # import os
         # for taking, roi in zip(this_sub,roidb):
         #     if not taking:
         #         os.remove(roi['image'])
         
         # filter roidb:
         roidb = [roi for taking,roi in zip(this_sub,roidb) if taking]
     
     if cfg.TRAIN.USE_FLIPPED:
         logger.info('Appending horizontally-flipped training examples...')
         extend_with_flipped_entries(roidb, ds)
     logger.info('Loaded dataset: {:s}'.format(dataset_name))
     return roidb
def generate_rpn_on_dataset(weights_file,
                            dataset_name,
                            _proposal_file_ignored,
                            output_dir,
                            multi_gpu=False,
                            gpu_id=0):
    """Run inference on a dataset."""
    dataset = JsonDataset(dataset_name)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        _boxes, _scores, _ids, rpn_file = multi_gpu_generate_rpn_on_dataset(
            weights_file, dataset_name, _proposal_file_ignored, num_images,
            output_dir)
    else:
        # Processes entire dataset range by default
        _boxes, _scores, _ids, rpn_file = generate_rpn_on_range(
            weights_file,
            dataset_name,
            _proposal_file_ignored,
            output_dir,
            gpu_id=gpu_id)
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(
        test_timer.average_time))
    return evaluate_proposal_file(dataset, rpn_file, output_dir)
Exemplo n.º 5
0
def get_roidb_and_dataset(dataset_name,
                          proposal_file,
                          ind_range,
                          gt_cls=False):
    """Get the roidb for the dataset specified in the global cfg. Optionally
    restrict it to a range of indices if ind_range is a pair of integers.
    """
    dataset = JsonDataset(dataset_name)
    if cfg.TEST.PRECOMPUTED_PROPOSALS:
        assert proposal_file, 'No proposal file given'
        roidb = dataset.get_roidb(proposal_file=proposal_file,
                                  proposal_limit=cfg.TEST.PROPOSAL_LIMIT)
    else:
        roidb = dataset.get_roidb(gt=gt_cls)

    if ind_range is not None:
        total_num_images = len(roidb)
        start, end = ind_range
        roidb = roidb[start:end]
    else:
        start = 0
        end = len(roidb)
        total_num_images = end

    return roidb, dataset, start, end, total_num_images
Exemplo n.º 6
0
def test_cls_net_on_dataset(weights_file,
                            dataset_name,
                            proposal_file,
                            output_dir,
                            multi_gpu=False,
                            gpu_id=0):
    """Run inference on a dataset."""
    dataset = JsonDataset(dataset_name)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        acc = multi_gpu_test_cls_net_on_dataset(num_images, output_dir)
    else:
        acc = test_cls_net(weights_file,
                           dataset_name,
                           proposal_file,
                           output_dir,
                           gpu_id=gpu_id)
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(
        test_timer.average_time))
    logger.info('Classification Accuracy on TEST data is: {:.2f}%'.format(acc *
                                                                          100))

    return {"Accuracy": acc}
Exemplo n.º 7
0
def test_net_on_dataset(
    weights_file,
    dataset_name,
    proposal_file,
    output_dir,
    multi_gpu=False,
    gpu_id=0
):
    """Run inference on a dataset."""
    dataset = JsonDataset(dataset_name)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(
            weights_file, dataset_name, proposal_file, num_images, output_dir
        )
    else:
        all_boxes, all_segms, all_keyps = test_net(
            weights_file, dataset_name, proposal_file, output_dir, gpu_id=gpu_id
        )
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))
    results = task_evaluation.evaluate_all(
        dataset, all_boxes, all_segms, all_keyps, output_dir
    )
    return results
def get_roidb_and_dataset(dataset_name, proposal_file, ind_range):
    """Get the roidb for the dataset specified in the global cfg. Optionally
    restrict it to a range of indices if ind_range is a pair of integers.
    """

    if dataset_name == 'live_targets':
        from detectron.datasets.live_dataset import LiveRoidb
        roidb = LiveRoidb()
        import detectron.datasets.dummy_datasets as dummy_datasets
        json_dataset = dummy_datasets.get_coco_dataset()
        if not cfg.TRAIN.USE_FLIPPED:
            logger.info(
                'Live target data set will use flipped examples anyway!')
        logger.info('"Loaded" dataset: {:s}'.format('live_targets'))
        return roidb, json_dataset, 0, len(roidb), len(roidb)

    dataset = JsonDataset(dataset_name)
    if cfg.TEST.PRECOMPUTED_PROPOSALS:
        assert proposal_file, 'No proposal file given'
        roidb = dataset.get_roidb(proposal_file=proposal_file,
                                  proposal_limit=cfg.TEST.PROPOSAL_LIMIT)
    else:
        roidb = dataset.get_roidb()

    if ind_range is not None:
        total_num_images = len(roidb)
        start, end = ind_range
        roidb = roidb[start:end]
    else:
        start = 0
        end = len(roidb)
        total_num_images = end

    return roidb, dataset, start, end, total_num_images
def accuracy(dataset, detections_pkl):
	# Load predictions and ground truths
	ds = JsonDataset(dataset)
	roidb = ds.get_roidb(gt=True)
	dets = load_object(detections_pkl)
	all_boxes = dets['all_boxes']

	def id_or_index(ix, val):
		if len(val) == 0:
			return val
		else:
			return val[ix]

	trues = 0.
	# Iterate through all images
	for ix, entry in enumerate(roidb):
		cls_boxes_i = [
			id_or_index(ix, cls_k_boxes) for cls_k_boxes in all_boxes
		]
		true_boxes = entry['boxes']
		if (true_boxes.shape[0] == 0) == (len(cls_boxes_i[3]) == 0):
			trues += 1
	# Finally, calculate accuracy by dividing the sum of true predictions by total samples
	acc = trues/len(roidb)
	print("Accuracy: " + str(acc))
	return acc
Exemplo n.º 10
0
 def get_roidb(dataset_name, proposal_file):
     ds = JsonDataset(dataset_name)
     roidb = ds.get_roidb(gt=True,
                          proposal_file=proposal_file,
                          crowd_filter_thresh=cfg.TRAIN.CROWD_FILTER_THRESH)
     if cfg.TRAIN.USE_FLIPPED:
         logger.info('Appending horizontally-flipped training examples...')
         extend_with_flipped_entries(roidb, ds)
     logger.info('Loaded dataset: {:s}'.format(ds.name))
     return roidb
Exemplo n.º 11
0
 def get_roidb(dataset_name, proposal_file):
     ds = JsonDataset(dataset_name)
     roidb = ds.get_roidb(
         gt=True,
         proposal_file=proposal_file,
         crowd_filter_thresh=cfg.TRAIN.CROWD_FILTER_THRESH
     )
     if cfg.TRAIN.USE_FLIPPED:
         logger.info('Appending horizontally-flipped training examples...')
         extend_with_flipped_entries(roidb, ds)
     logger.info('Loaded dataset: {:s}'.format(ds.name))
     return roidb
Exemplo n.º 12
0
def test_net_on_dataset(weights_file,
                        dataset_name,
                        proposal_file,
                        output_dir,
                        multi_gpu=False,
                        gpu_id=0):
    """Run inference on a dataset."""
    dataset = JsonDataset(dataset_name)
    test_timer = Timer()
    test_timer.tic()
    model = ''
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(
            weights_file, dataset_name, proposal_file, num_images, output_dir)
    else:
        all_boxes, all_segms, all_keyps, model = test_net(weights_file,
                                                          dataset_name,
                                                          proposal_file,
                                                          output_dir,
                                                          gpu_id=gpu_id)

    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(
        test_timer.average_time))
    results = task_evaluation.evaluate_all(dataset, all_boxes, all_segms,
                                           all_keyps, output_dir)

    roc_data = metrics.calculate_roc(all_boxes, dataset, cfg.TEST.IOU)
    froc_data = metrics.calculate_froc(all_boxes, dataset, cfg.TEST.IOU)
    auc_score = {
        dataset.name: {
            u'box': {
                u'AUC': auc(roc_data[0], roc_data[1])
            }
        }
    }
    afroc_score = np.trapz(froc_data[0], froc_data[2])
    afroc = {dataset.name: {u'box': {u'AFROC': afroc_score}}}
    print('Afroc score: {:.4f}'.format(afroc_score))

    plot.plot_roc(roc_data, auc_score[dataset.name][u'box'][u'AUC'], dataset,
                  model, output_dir)
    plot.plot_froc(froc_data, dataset, model, output_dir)
    plot.plot_afroc(froc_data, dataset, model, output_dir)

    save.np_save(np.stack(roc_data), 'roc', dataset, model, output_dir)
    save.np_save(np.stack(froc_data), 'froc', dataset, model, output_dir)

    results[dataset_name][u'box'].update(auc_score[dataset.name][u'box'])
    results[dataset_name][u'box'].update(afroc[dataset.name][u'box'])
    return results, auc_score, afroc_score
Exemplo n.º 13
0
def vis(dataset, detections_pkl, thresh, output_dir, limit=0):
    ds = JsonDataset(dataset)
    roidb = ds.get_roidb()

    with open(detections_pkl, 'r') as f:
        dets = pickle.load(f)

    assert all(k in dets for k in ['all_boxes', 'all_segms', 'all_keyps']), \
        'Expected detections pkl file in the format used by test_engine.py'

    all_boxes = dets['all_boxes']
    all_segms = dets['all_segms']
    all_keyps = dets['all_keyps']

    def id_or_index(ix, val):
        if len(val) == 0:
            return val
        else:
            return val[ix]

    for ix, entry in enumerate(roidb):
        if limit > 0 and ix >= limit:
            break
        if ix % 10 == 0:
            print('{:d}/{:d}'.format(ix + 1, len(roidb)))

        im = cv2.imread(entry['image'])
        im_name = os.path.splitext(os.path.basename(entry['image']))[0]

        cls_boxes_i = [
            id_or_index(ix, cls_k_boxes) for cls_k_boxes in all_boxes
        ]
        cls_segms_i = [
            id_or_index(ix, cls_k_segms) for cls_k_segms in all_segms
        ]
        cls_keyps_i = [
            id_or_index(ix, cls_k_keyps) for cls_k_keyps in all_keyps
        ]

        vis_utils.vis_one_image(
            im[:, :, ::-1],
            '{:d}_{:s}'.format(ix, im_name),
            os.path.join(output_dir, 'vis'),
            cls_boxes_i,
            segms=cls_segms_i,
            keypoints=cls_keyps_i,
            thresh=thresh,
            box_alpha=0.8,
            dataset=ds,
            show_class=True
        )
Exemplo n.º 14
0
def vis(dataset, detections_pkl, thresh, output_dir, limit=0):
    ds = JsonDataset(dataset)
    roidb = ds.get_roidb()

    with open(detections_pkl, 'rb') as f:
        dets = pickle.load(f)

    assert all(k in dets for k in ['all_boxes', 'all_segms', 'all_keyps']), \
        'Expected detections pkl file in the format used by test_engine.py'

    all_boxes = dets['all_boxes']
    all_segms = dets['all_segms']
    all_keyps = dets['all_keyps']

    def id_or_index(ix, val):
        if len(val) == 0:
            return val
        else:
            return val[ix]

    for ix, entry in enumerate(roidb):
        if limit > 0 and ix >= limit:
            break
        if ix % 10 == 0:
            print('{:d}/{:d}'.format(ix + 1, len(roidb)))

        im = cv2.imread(entry['image'])
        im_name = os.path.splitext(os.path.basename(entry['image']))[0]

        cls_boxes_i = [
            id_or_index(ix, cls_k_boxes) for cls_k_boxes in all_boxes
        ]
        cls_segms_i = [
            id_or_index(ix, cls_k_segms) for cls_k_segms in all_segms
        ]
        cls_keyps_i = [
            id_or_index(ix, cls_k_keyps) for cls_k_keyps in all_keyps
        ]

        vis_utils.vis_one_image(
            im[:, :, ::-1],
            '{:d}_{:s}'.format(ix, im_name),
            os.path.join(output_dir, 'vis'),
            cls_boxes_i,
            segms=cls_segms_i,
            keypoints=cls_keyps_i,
            thresh=thresh,
            box_alpha=0.8,
            dataset=ds,
            show_class=True
        )
Exemplo n.º 15
0
def test_net_on_dataset(weights_file,
                        dataset_name,
                        proposal_file,
                        output_dir,
                        multi_gpu=False,
                        gpu_id=0):
    """Run inference on a dataset."""
    dataset = JsonDataset(dataset_name)
    test_timer = Timer()
    test_timer.tic()
    ################################################################
    import pickle
    res_file = os.path.join(output_dir,
                            'bbox_' + dataset_name + '_results.json')
    print("res_file = {}==========================".format(res_file))
    if os.path.exists(res_file):
        import detectron.datasets.json_dataset_evaluator as json_dataset_evaluator
        print("res_file = {} exists! Loading res_file".format(res_file))
        coco_eval = json_dataset_evaluator._do_detection_eval(
            dataset, res_file, output_dir)
        box_results = task_evaluation._coco_eval_to_box_results(coco_eval)
        results = OrderedDict([(dataset.name, box_results)])
        return results
    ################################################################
    det_name = "detections.pkl"
    det_file = os.path.join(output_dir, det_name)
    print("det_file = {}==========================".format(det_file))
    if os.path.exists(det_file):
        print("{} exists! Loading detection results".format(det_file))
        res = pickle.load(open(det_file))
        all_boxes = res['all_boxes']
        all_segms = res['all_segms']
        all_keyps = res['all_keyps']
    ################################################################
    elif multi_gpu:
        num_images = len(dataset.get_roidb())
        all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(
            weights_file, dataset_name, proposal_file, num_images, output_dir)
    else:
        all_boxes, all_segms, all_keyps = test_net(weights_file,
                                                   dataset_name,
                                                   proposal_file,
                                                   output_dir,
                                                   gpu_id=gpu_id)
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(
        test_timer.average_time))
    results = task_evaluation.evaluate_all(dataset, all_boxes, all_segms,
                                           all_keyps, output_dir)
    return results
def test_net_on_dataset(weights_file,
                        dataset_name,
                        proposal_file,
                        output_dir,
                        multi_gpu=False,
                        gpu_id=0,
                        subset_pointer=None):
    """Run inference on a dataset."""
    if dataset_name[:5] != 'live_':
        dataset = JsonDataset(dataset_name)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(
            weights_file, dataset_name, proposal_file, num_images, output_dir)
    else:
        all_boxes, all_segms, all_keyps = test_net(
            weights_file,
            dataset_name,
            proposal_file,
            output_dir,
            gpu_id=gpu_id,
            subset_pointer=subset_pointer)
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(
        test_timer.average_time))

    if cfg.TEST.COCO_TO_VOC:
        all_boxes = coco_detects_to_voc(all_boxes)

    if dataset_name[:5] == 'live_':
        return None

    results = task_evaluation.evaluate_all(dataset,
                                           all_boxes,
                                           all_segms,
                                           all_keyps,
                                           output_dir,
                                           subset_pointer=subset_pointer)

    if subset_pointer is not None:
        # prune the subset for the following datasets:
        subset_pointer.subset = subset_pointer.subset[len(dataset.get_roidb()
                                                          ):]
        print('remains', len(subset_pointer.subset)
              )  # should have 0 remains for the last set, voc_2012_train.

    return results
Exemplo n.º 17
0
def do_reval(dataset_name, output_dir, args):
    dataset = JsonDataset(dataset_name)
    dets = load_object(os.path.join(output_dir, 'detections.pkl'))

    # Override config with the one saved in the detections file
    if args.cfg_file is not None:
        core_config.merge_cfg_from_cfg(core_config.load_cfg(dets['cfg']))
    else:
        core_config._merge_a_into_b(core_config.load_cfg(dets['cfg']), cfg)

    # re-filter on score threshold:
    dets['all_boxes'] = \
        [
            [
                im[im[:,4] > cfg.TEST.SCORE_THRESH,:] if len(im) != 0 else []
                for im in cls
            ]
            for cls in dets['all_boxes']
        ]

    results = task_evaluation.evaluate_all(dataset,
                                           dets['all_boxes'],
                                           dets['all_segms'],
                                           dets['all_keyps'],
                                           output_dir,
                                           use_matlab=args.matlab_eval)
    task_evaluation.log_copy_paste_friendly_results(results)
def validate_tracking_params(weights_file,
                             dataset_name):
    
    dataset = JsonDataset(dataset_name)
    
    output_dir = os.path.abspath(get_output_dir(dataset.name, training=False))
    
    #tracking parameter files
    class_thresh_file = os.path.join(output_dir, 'AP_thresholds.txt')
    obs_model_file = os.path.join(output_dir, 'observation_model.txt')
    meas_cov_file = os.path.join(output_dir, 'meas_cov.txt')
    
    #generate param files if they do not exist
    if (not os.path.exists(class_thresh_file) or 
        not os.path.exists(obs_model_file) or 
        not os.path.exists(meas_cov_file)):
            logger.info('validation files for validation dataset %s do not '
                    'exist. Generating them now' % (dataset.name))
            
            #this performs inference on all images and calls the evaluation 
            #script, generating the validation files
            test_net_on_dataset(weights_file,dataset.name,None,output_dir)
            
    #read parameters from files
    cla_thresh = load_class_thresholds(class_thresh_file, dataset)
    observation_model = np.loadtxt(obs_model_file, delimiter=',')
    ekf_sensor_noise = np.loadtxt(meas_cov_file, delimiter=',')
    
    return cla_thresh, observation_model, ekf_sensor_noise
Exemplo n.º 19
0
def get_roidb(dataset_name, ind_range):
    """Get the roidb for the dataset specified in the global cfg. Optionally
    restrict it to a range of indices if ind_range is a pair of integers.
    """
    dataset = JsonDataset(dataset_name)
    roidb = dataset.get_roidb()

    if ind_range is not None:
        total_num_images = len(roidb)
        start, end = ind_range
        roidb = roidb[start:end]
    else:
        start = 0
        end = len(roidb)
        total_num_images = end

    return roidb, start, end, total_num_images
Exemplo n.º 20
0
def get_roidb(dataset_name, ind_range):
    """Get the roidb for the dataset specified in the global cfg. Optionally
    restrict it to a range of indices if ind_range is a pair of integers.
    """
    dataset = JsonDataset(dataset_name)
    roidb = dataset.get_roidb()

    if ind_range is not None:
        total_num_images = len(roidb)
        start, end = ind_range
        roidb = roidb[start:end]
    else:
        start = 0
        end = len(roidb)
        total_num_images = end

    return roidb, start, end, total_num_images
Exemplo n.º 21
0
 def get_roidb(dataset_info, proposal_file):
     ds = JsonDataset(dataset_info)
     roidb = ds.get_roidb(gt=True,
                          proposal_file=proposal_file,
                          crowd_filter_thresh=cfg.TRAIN.CROWD_FILTER_THRESH)
     if cfg.TRAIN.USE_FLIPPED:
         logger.info('Appending horizontally-flipped training examples...')
         extend_with_flipped_entries(roidb, ds)
     # TT: Augmentation
     if cfg.TRAIN.USE_TRANSFORMATION:
         logger.info('Appending augmented training examples...')
         transform_samples = int(cfg.TRAIN.TRANSFORM_SAMPLES)
         extend_with_augmented_entries(roidb,
                                       ds,
                                       aug_samples=transform_samples)
     # TT: end
     logger.info('Loaded dataset: {:s}'.format(ds.name))
     return roidb
def main(args):
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(args.weights)
    dataset_name = cfg.TEST.DATASETS[0]
    dummy_coco_dataset = JsonDataset(dataset_name)
    # dummy_coco_dataset = dummy_datasets.get_paris_dataset()

    vid_dir = '/coco/paris_dataset/PARIS_demo.mp4'
    cap = cv2.VideoCapture(vid_dir)
    ret, im = cap.read()
    count = 0
    while ret:
        im_name = str(count)
        out_name = os.path.join(args.output_dir,
                                '{}'.format(str(count) + '.jpg'))
        logger.info('Processing frame -> {}'.format(count))
        timers = defaultdict(Timer)
        t = time.time()
        with c2_utils.NamedCudaScope(0):
            cls_boxes, cls_segms, cls_keyps, cls_bodys = infer_engine.im_detect_all(
                model, im, None, timers=timers)
        logger.info('Inference time: {:.3f}s'.format(time.time() - t))
        for k, v in timers.items():
            logger.info(' | {}: {:.3f}s'.format(k, v.average_time))

        vis_im = vis_utils.vis_one_image(
            im[:, :, ::-1],  # BGR -> RGB for visualization
            im_name,
            args.output_dir,
            cls_boxes,
            cls_segms,
            cls_keyps,
            cls_bodys,
            dataset=dummy_coco_dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=0.7,
            kp_thresh=2)
        # cv2.imshow('frame', vis_im)
        # cv2.waitKey(10)

        ret, im = cap.read()
        count += 1
    cap.release()
    cv2.destroyAllWindows()
Exemplo n.º 23
0
def get_roidb_and_dataset(dataset_name, proposal_file, ind_range):
    """Get the roidb for the dataset specified in the global cfg. Optionally
    restrict it to a range of indices if ind_range is a pair of integers.
    """
    dataset = JsonDataset(dataset_name)
    if cfg.TEST.PRECOMPUTED_PROPOSALS:
        assert proposal_file, 'No proposal file given'
        roidb = dataset.get_roidb(
            proposal_file=proposal_file,
            proposal_limit=cfg.TEST.PROPOSAL_LIMIT
        )
    else:
        roidb = dataset.get_roidb()

    if ind_range is not None:
        total_num_images = len(roidb)
        start, end = ind_range
        roidb = roidb[start:end]
    else:
        start = 0
        end = len(roidb)
        total_num_images = end

    return roidb, dataset, start, end, total_num_images
Exemplo n.º 24
0
def do_reval(dataset_name, output_dir, args):
    dataset = JsonDataset(dataset_name)
    dets = load_object(os.path.join(output_dir, 'detections.pkl'))

    # Override config with the one saved in the detections file
    if args.cfg_file is not None:
        core_config.merge_cfg_from_cfg(core_config.load_cfg(dets['cfg']))
    else:
        core_config._merge_a_into_b(core_config.load_cfg(dets['cfg']), cfg)
    results = task_evaluation.evaluate_all(dataset,
                                           dets['all_boxes'],
                                           dets['all_segms'],
                                           dets['all_keyps'],
                                           output_dir,
                                           use_matlab=args.matlab_eval)
    task_evaluation.log_copy_paste_friendly_results(results)
Exemplo n.º 25
0
def do_reval(dataset_name, output_dir, args):
    dataset = JsonDataset(dataset_name)
    with open(os.path.join(output_dir, 'detections.pkl'), 'rb') as f:
        dets = pickle.load(f)
    # Override config with the one saved in the detections file
    if args.cfg_file is not None:
        # bug: loads only already stored cfg
        # core_config.merge_cfg_from_cfg(core_config.load_cfg(dets['cfg']))
        # merge config from passed config file!!
        core_config.merge_cfg_from_file(args.cfg_file)
    else:
        core_config._merge_a_into_b(core_config.load_cfg(dets['cfg']), cfg)
    results = task_evaluation.evaluate_all(dataset,
                                           dets['all_boxes'],
                                           dets['all_segms'],
                                           dets['all_keyps'],
                                           output_dir,
                                           use_matlab=args.matlab_eval)
    task_evaluation.log_copy_paste_friendly_results(results)
def run_tracking(validation_dataset, tracking_datasets, weights_file, timestep,
                 use_hmm=True, no_filtering=False, visualize=False, step=False):
    
    class_thresh, obs_model, meas_cov  = validate_tracking_params(weights_file, 
                                                                  validation_dataset)
    
    res_files = []
    json_datasets = []
    
    for i in range(len(tracking_datasets)):
        
        tracking_set_name = tracking_datasets[i]
        tracking_set_json = JsonDataset(tracking_set_name)
        
        detections, res_file = get_detections(weights_file,
                                              tracking_set_json,
                                              class_thresh)
        
        if not no_filtering:
            filtered_boxes, filtered_depths = do_kalman_filtering(detections, 
                                                                  tracking_set_json, 
                                                                  timestep, 
                                                                  meas_cov, 
                                                                  obs_model, 
                                                                  use_hmm=use_hmm, 
                                                                  viz=visualize, 
                                                                  step=step)
        
            res_file = write_filtered_detections(tracking_set_json, 
                                                 filtered_boxes, filtered_depths, 
                                                 use_hmm)
        
        res_files.append(res_file)
        json_datasets.append(tracking_set_json)
        
    #perform the evaluation for all datasets
    tracking_dataset_name = '-'.join(tracking_datasets)
    tracking_output_dir = os.path.abspath(get_output_dir(tracking_dataset_name, training=False))
    evaluate_tracking(res_files, json_datasets, tracking_output_dir, use_matlab=True)
Exemplo n.º 27
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import os
import cv2
from PIL import Image
import numpy as np
from detectron.datasets.json_dataset import JsonDataset
example_json = json.load(
    open('/coco/annotations/person_keypoints_minival2014.json'))
dataset = JsonDataset('paris_2019_train')
dataset_dir = '/coco/paris_dataset'
res_json = json.load(
    open(os.path.join(dataset_dir, 'paris_building_train.json')))
json_keys = [u'images', u'annotations', u'categories']
im_info = [u'file_name', u'height', u'width', u'id']
ann_info = [
    u'bbox', u'segmentation', u'num_keypoints', u'area', u'iscrowd',
    u'image_id', u'category_id', u'id', u'keypoints'
]
cat_info = []
paris_data = []
classes = {}
folder_idx = 5
ann_info = 6
images_list = []
anns_list = []
new_cat_id = 0
im_id = 0
Exemplo n.º 28
0
def main(args):
    MINIMAL = False
    TRAIN = False
    FORWARD = False
    SHAPES = False
    HIDE_PARAMS = True
    if args.opts is not None:
        if 'minimal' in args.opts:
            MINIMAL = True
        if 'train' in args.opts:
            TRAIN = True
        if 'forward' in args.opts:
            FORWARD = True
        if 'shapes' in args.opts:
            SHAPES = True
        if 'params' in args.opts:
            HIDE_PARAMS = False

    if SHAPES and args.model_file is None:
        raise ValueError('Specify model file')
    MODEL_FILE = args.model_file
    NET_NAMES = args.net_names

    if MINIMAL:
        get_dot_graph = lambda net, shapes: net_drawer.GetPydotGraphMinimal(
            net, rankdir="BT")
    else:
        get_dot_graph = lambda net, shapes: net_drawer.GetPydotGraph(
            net, rankdir="BT", shapes=shapes, hide_params=HIDE_PARAMS)

    # Get model
    if args.cfg_file is not None:
        merge_cfg_from_file(args.cfg_file)
    cfg.NUM_GPUS = 1
    cfg.VIS_NET = True
    if FORWARD:
        cfg.MODEL.FORWARD_ONLY = True
    assert_and_infer_cfg(cache_urls=False)

    if SHAPES and TRAIN:
        raise NotImplementedError

    # Run model to get shape information of all blobs
    if SHAPES:
        model = infer_engine.initialize_model_from_cfg(MODEL_FILE)
        workspace.RunNetOnce(model.param_init_net)
        nu.broadcast_parameters(model)

        dataset = JsonDataset(cfg.TRAIN.DATASETS[0])
        roidb = dataset.get_roidb()

        with c2_utils.NamedCudaScope(0):
            if cfg.MODEL.TRACKING_ON:
                roidb_min = [roidb[0], roidb[1]]
                im_list = [cv2.imread(e['image']) for e in roidb_min]
                infer_engine.multi_im_detect_all(model, im_list, [None, None])
            else:
                infer_engine.im_detect_all(model, roidb[0]['image'], None)
    else:
        model = model_builder.create(cfg.MODEL.TYPE, train=TRAIN)

    subprocess.call(["killall", "xdot"])

    # Visualize all specified nets
    for net_name in NET_NAMES:
        net = getattr(model, net_name, None)
        if net:
            print('processing graph {}...'.format(net_name))
            g = get_dot_graph(net.Proto(), shapes=SHAPES)
            name = net_name
            if TRAIN:
                name_append = 'train'
            else:
                name_append = 'infer'
            # Save graph
            graph_dir = os.path.join(args.output_dir, cfg.MODEL.TYPE)
            if not os.path.exists(graph_dir):
                os.makedirs(graph_dir)
            dot_name = os.path.join(graph_dir,
                                    '{}_{}.dot'.format(net_name, name_append))
            g.write_dot(dot_name)
            subprocess.Popen(['xdot', dot_name])
Exemplo n.º 29
0
 def get_roidb(dataset_name, proposal_file):
     ds = JsonDataset(dataset_name)
     roidb = ds.get_roidb(gt=True,proposal_file=proposal_file,crowd_filter_thresh=cfg.TRAIN.CROWD_FILTER_THRESH)
     return roidb
Exemplo n.º 30
0
from __future__ import unicode_literals

import _init_paths
import numpy as np
import h5py
import sys

from detectron.datasets.json_dataset import JsonDataset
from detectron.utils.io import save_object

if __name__ == '__main__':
    dataset_name = sys.argv[1]
    file_in = sys.argv[2]
    file_out = sys.argv[3]

    ds = JsonDataset(dataset_name)
    roidb = ds.get_roidb()

    boxes = []
    scores = []
    ids = []

    with h5py.File(file_in, 'r') as f:
        raw_boxes = f['boxes']
        num_imgs = len(raw_boxes)
        assert num_imgs == len(roidb)

        for ind in range(num_imgs):
            if ind % 1000 == 0:
                print('{}/{}'.format(ind + 1, len(roidb)))
Exemplo n.º 31
0
def visualize_ranking(dataset, detections_pkl, opts):

    # Load predictions and ground truths
    ds = JsonDataset(dataset)
    roidb = ds.get_roidb(gt=True)
    dets = load_object(detections_pkl)
    all_boxes = dets['all_boxes']

    def id_or_index(ix, val):
        if len(val) == 0:
            return val
        else:
            return val[ix]

    # Load coordinates
    with open(opts.coord_file) as json_file:
        coord_data = json.load(json_file)

    # Iterate through all images and note false positive and negatives, as well as entry scores
    false_positives = []
    false_negatives = []
    scores = []
    for ix, entry in enumerate(roidb):
        cls_boxes_i = [
            id_or_index(ix, cls_k_boxes) for cls_k_boxes in all_boxes
        ]
        preds = np.array(cls_boxes_i[opts.class_id])
        entry['preds'] = preds
        true_boxes = entry['boxes']
        if preds.shape[0] > 0 and np.max(preds[:, -1]) > opts.threshold:
            box_scores = preds[:, -1]
            box_scores = box_scores[np.where(box_scores > opts.threshold)]
            score = np.sum(
                box_scores[np.argsort(box_scores)[-opts.box_count:]])
            scores.append([entry, score])
            if true_boxes.shape[0] == 0:
                false_positives.append(entry)
        else:
            if true_boxes.shape[0] > 0:
                false_negatives = add_negative(false_negatives, entry,
                                               coord_data, opts.min_distance)

    # Find top rated entries
    scores = np.array(scores)
    scores = scores[np.argsort(scores[:, 1])[::-1]]

    for entry in scores[:, 0]:
        entry['coords'] = coord_data[os.path.split(entry['image'])[-1]]
    # Filter by proximity
    for i in range(scores.shape[0]):
        if scores[i][1] > 0:
            current_entry = scores[i][0]
            for j in range(i + 1, scores.shape[0]):
                second_entry = scores[j][0]
                dist = distance(
                    (current_entry['coords'][0], current_entry['coords'][1]),
                    (second_entry['coords'][0],
                     second_entry['coords'][1])).km * 1000
                if dist < opts.min_distance:
                    scores[j][1] = 0
    scores = scores[np.where(scores[:, 1] > 0)]
    top_entries = scores[np.argsort(scores[:, 1])[-opts.image_count:][::-1]]

    # Choose random negative samples
    false_samples = np.append(false_negatives, false_positives)
    np.random.shuffle(false_samples)

    # Visualize positive and negative samples
    rows_cols = (opts.image_count,
                 2) if opts.angle == 'ver' else (2, opts.image_count)
    plt_shape = (6., opts.image_count *
                 2.5) if opts.angle == 'ver' else (opts.image_count * 2.5, 6.)
    fig = plt.figure(1, plt_shape)

    grid = ImageGrid(
        fig,
        111,
        nrows_ncols=rows_cols,
        axes_pad=0.03,
        label_mode='L',
    )
    # Show top ranked images
    for i, result in enumerate(top_entries):
        entry = result[0]
        score = result[1]
        grid_idx = i
        if opts.angle == 'ver':
            grid_idx = i * 2
        # Load image and add bounding boxes
        im = cv2.imread(entry['image'])
        preds = entry['preds']
        true_boxes = entry['boxes']
        for bbox in true_boxes:
            im = vis_bbox(
                im, (bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]),
                _GT_COLOR, opts.box_thickness)
        count = 0
        for bbox in preds:
            if bbox[-1] > opts.threshold:
                count += 1
                print(
                    os.path.split(entry['image'])[-1] + ': ' + str(bbox[0:4]))
                im = vis_bbox(
                    im,
                    (bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]),
                    _PRED_COLOR, opts.box_thickness)
            if count >= opts.box_count:
                break
        # Adjust grid setting
        im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
        show_img(grid, im, grid_idx)
        t = grid[grid_idx].text(12,
                                42,
                                "Score: " + str(round(score, 3)),
                                fontsize=8,
                                bbox=dict(boxstyle='square',
                                          fc='white',
                                          ec='none',
                                          alpha=0.6))
        if i == 0:
            if opts.angle == 'ver':
                grid[grid_idx].set_title("Top\nPredictions", size=18)
            else:
                grid[grid_idx].set_ylabel("Top Predictions", fontsize=13)
    # Show random negative samples (false positive, false negative)
    for i, entry in enumerate(false_samples):
        if i >= opts.image_count:
            break
        grid_idx = opts.image_count + i
        if opts.angle == 'ver':
            grid_idx = 2 * i + 1
        # Load image and add bounding boxes
        im = cv2.imread(entry['image'])
        preds = entry['preds']
        true_boxes = entry['boxes']
        for bbox in true_boxes:
            im = vis_bbox(
                im, (bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]),
                _GT_COLOR, opts.box_thickness)
        for bbox in preds:
            if bbox[-1] > opts.threshold:
                im = vis_bbox(
                    im,
                    (bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]),
                    _PRED_COLOR, opts.box_thickness)
        # Adjust grid setting
        im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
        grid[grid_idx].imshow(im)
        grid[grid_idx].grid(False)
        grid[grid_idx].set_xticks([])
        grid[grid_idx].set_yticks([])
        if i == 0:
            if opts.angle == 'ver':
                grid[grid_idx].set_title("Errors", size=18)
            else:
                grid[grid_idx].set_ylabel("Errors", fontsize=13)
    plt.axis('off')
    plt.subplots_adjust(hspace=1)
    plt.savefig("ranking.png", dpi=300, bbox_inches='tight')
Exemplo n.º 32
0
    def get_roidb(dataset_name, proposal_file):
        ds = JsonDataset(dataset_name)
        roidb = ds.get_roidb(gt=True,
                             proposal_file=proposal_file,
                             crowd_filter_thresh=cfg.TRAIN.CROWD_FILTER_THRESH)
        sum1 = 0
        roidb0 = roidb * 1  #for shanghai circuit
        if 0:  #for shanghai ltps label =3
            num_list = [[], [], []]
            for roidb_i, roidb_list in enumerate(roidb0):
                try:
                    ppp = roidb_list['gt_classes'][0]
                except:
                    continue
                if roidb_list['gt_classes'][0] == 1:
                    num_list[0].append(roidb_list)
                if roidb_list['gt_classes'][0] == 2:
                    num_list[1].append(roidb_list)
                if roidb_list['gt_classes'][0] == 3:
                    num_list[2].append(roidb_list)
            num_list_num = [
                len(num_list[0]),
                len(num_list[1]),
                len(num_list[2])
            ]
            print(num_list_num)
            num_max = max(num_list_num)

            else_num = [num_max - sub_num for sub_num in num_list_num]
            up_int = int(0)
            for ii, sub_else_num in enumerate(else_num):
                if sub_else_num > len(num_list[ii]):
                    up_int = math.ceil(
                        float(sub_else_num) / len(num_list[ii]))  #up get int
                    vv = num_list[ii] * 1
                for up_int_i in range(int(up_int) - 1):
                    for sub_vv in vv:
                        num_list[ii].append(sub_vv)
                #else:
                uu = random.sample(num_list[ii], sub_else_num)
                for sub_uu in uu:
                    roidb.append(sub_uu)
        if 0:  #for shanghai tp label =2 tp
            num_list = [[], []]
            for roidb_i, roidb_list in enumerate(roidb0):
                try:
                    ppp = roidb_list['gt_classes'][0]
                except:
                    continue
                if roidb_list['gt_classes'][0] == 1:
                    num_list[0].append(roidb_list)
                if roidb_list['gt_classes'][0] == 2:
                    num_list[1].append(roidb_list)
            num_list_num = [len(num_list[0]), len(num_list[1])]
            num_max = max(num_list_num)

            else_num = [num_max - sub_num for sub_num in num_list_num]
            up_int = int(0)
            for ii, sub_else_num in enumerate(else_num):
                if sub_else_num > len(num_list[ii]):
                    up_int = math.ceil(
                        float(sub_else_num) / len(num_list[ii]))  #up get int
                    vv = num_list[ii] * 1
                for up_int_i in range(int(up_int) - 1):
                    for sub_vv in vv:
                        num_list[ii].append(sub_vv)
                #else:
                uu = random.sample(num_list[ii], sub_else_num)
                for sub_uu in uu:
                    roidb.append(sub_uu)
        if 0:  #for mask
            num_list = [[], [], [], []]
            for roidb_i, roidb_list in enumerate(roidb0):
                try:
                    ppp = roidb_list['gt_classes'][0]
                except:
                    continue
                if roidb_list['gt_classes'][0] == 1:
                    num_list[0].append(roidb_list)
                if roidb_list['gt_classes'][0] == 2:
                    num_list[1].append(roidb_list)
                if roidb_list['gt_classes'][0] == 3:
                    num_list[2].append(roidb_list)
                if roidb_list['gt_classes'][0] == 4:
                    num_list[3].append(roidb_list)
                #if roidb_list['gt_classes'][0] == 5:
                #    aa=1
            if 1:
                num_list_num = [
                    len(num_list[0]),
                    len(num_list[1]),
                    len(num_list[2]),
                    len(num_list[3])
                ]
                num_max = max(num_list_num)

                else_num = [num_max - sub_num for sub_num in num_list_num]
                up_int = int(0)
                for ii, sub_else_num in enumerate(else_num):
                    if sub_else_num > len(num_list[ii]):
                        up_int = math.ceil(
                            float(sub_else_num) /
                            len(num_list[ii]))  #up get int
                        vv = num_list[ii] * 1
                    for up_int_i in range(int(up_int) - 1):
                        for sub_vv in vv:
                            num_list[ii].append(sub_vv)
                    #else:
                    uu = random.sample(num_list[ii], sub_else_num)
                    for sub_uu in uu:
                        roidb.append(sub_uu)

            if 0:  # for mask verify
                roidb0 = roidb * 1
                num_list = [[], [], [], []]
                for roidb_i, roidb_list in enumerate(roidb0):
                    try:
                        ppp = roidb_list['gt_classes'][0]
                    except:
                        continue
                    if roidb_list['gt_classes'][0] == 1:
                        num_list[0].append(roidb_list)
                    if roidb_list['gt_classes'][0] == 2:
                        num_list[1].append(roidb_list)
                    if roidb_list['gt_classes'][0] == 3:
                        num_list[2].append(roidb_list)
                    if roidb_list['gt_classes'][0] == 4:
                        num_list[3].append(roidb_list)
                num_list_num = [
                    len(num_list[0]),
                    len(num_list[1]),
                    len(num_list[2]),
                    len(num_list[3])
                ]
        print('ok')

        if 0:  #for S5
            for roidb_i, roidb_list in enumerate(roidb0):
                try:
                    ppp = roidb_list['gt_classes'][0]
                except:
                    continue
                if roidb_list['gt_classes'][0] == 0:
                    print('test.jpg')
        if 0:  #not for S5
            for roidb_i, roidb_list in enumerate(roidb0):
                try:
                    ppp = roidb_list['gt_classes'][0]
                except:
                    continue
                if roidb_list['gt_classes'][
                        0] != 0:  #and roidb_list['gt_classes'][0] != 1:
                    #print('else')
                    for mm in range(3):  #for 3 S3  S4 5  S2 3 S1 ?  2 for S5
                        roidb.append(roidb_list)
                if 0:  # for S4
                    if 1 in roidb_list['gt_classes']:  #1 for baise
                        for mm in range(8):  #for 3 S3  S4 8
                            roidb.append(roidb_list)
                    if 0:
                        roidb.append(roidb_list)
                        roidb.append(roidb_list)
                        roidb.append(roidb_list)
                        roidb.append(roidb_list)
                        roidb.append(roidb_list)
                        roidb.append(roidb_list)
                        roidb.append(roidb_list)
                        roidb.append(roidb_list)
                        roidb.append(roidb_list)
                        roidb.append(roidb_list)
                        roidb.append(roidb_list)
                        roidb.append(roidb_list)
                        roidb.append(roidb_list)
                        roidb.append(roidb_list)
                        roidb.append(roidb_list)
                        roidb.append(roidb_list)
                        roidb.append(roidb_list)
                        roidb.append(roidb_list)
                        roidb.append(roidb_list)
                        roidb.append(roidb_list)
                    sum1 += 1
            #if roidb_list['gt_classes'][0] == 1:
            #   print('error')
        if cfg.TRAIN.USE_FLIPPED:
            logger.info('Appending horizontally-flipped training examples...')
            extend_with_flipped_entries(roidb, ds)
        logger.info('Loaded dataset: {:s}'.format(ds.name))
        return roidb
Exemplo n.º 33
0
    offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
    reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
    transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
    return transform_matrix


# input_dir = '/mnt/genesis/vegas/Databases/CBIS-DDSM_resized/Train/Ground Truth'
# output = '/mnt/genesis/vegas/transformed'
# test = '/mnt/genesis/vegas'

input_dir = '/mnt/Cargo_2/Diploma_Thesis/Databases/CBIS-DDSM/Train/Ground Truth'
# output = '/mnt/genesis/vegas/transformed'
test = '/home/gru'


ds = JsonDataset('CBIS_DDSM_train')
roidb = ds.get_roidb(gt=True,
                     proposal_file=None,
                     crowd_filter_thresh=0.7
                     )

# if not os.path.isdir(output):
#     os.mkdir(output)

images = [os.path.join(root, file) for root, _, files in os.walk(os.path.join(input_dir, 'Ground Truth')) for file in files]
for i in range(len(roidb)):
    if '00465_LEFT_CC' not in roidb[i]['image']:
        continue
    # angle = uniform(-20, 20)
    angle = 17.87189165364299
    # shear = uniform(-0.2, 0.2)
Exemplo n.º 34
0
from __future__ import print_function
from __future__ import unicode_literals

import cPickle as pickle
import numpy as np
import scipy.io as sio
import sys

from detectron.datasets.json_dataset import JsonDataset

if __name__ == '__main__':
    dataset_name = sys.argv[1]
    file_in = sys.argv[2]
    file_out = sys.argv[3]

    ds = JsonDataset(dataset_name)
    roidb = ds.get_roidb()
    raw_data = sio.loadmat(file_in)['boxes'].ravel()
    assert raw_data.shape[0] == len(roidb)

    boxes = []
    scores = []
    ids = []
    for i in range(raw_data.shape[0]):
        if i % 1000 == 0:
            print('{}/{}'.format(i + 1, len(roidb)))
        # selective search boxes are 1-indexed and (y1, x1, y2, x2)
        i_boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
        boxes.append(i_boxes.astype(np.float32))
        scores.append(np.zeros((i_boxes.shape[0]), dtype=np.float32))
        ids.append(roidb[i]['id'])
Exemplo n.º 35
0
def main(args):
    datasetName = 'logo_1048_test'  #'furniture_val'
    logger = logging.getLogger(__name__)
    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    vis = True  #False
    shuffleList = False
    args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    model = infer_engine.initialize_model_from_cfg(args.weights)
    if args.cls_thrsh_file is not None:
        class_thresholds = {
            l.split('\t')[0]: float(l.rstrip().split('\t')[1])
            for l in open(args.cls_thrsh_file, 'r').readlines()
        }
        print(class_thresholds)
    else:
        class_thresholds = None
    #dummy_coco_dataset = dummy_datasets.get_coco_dataset()
    dataset = JsonDataset(datasetName)
    print(args.im_or_folder)
    #if os.path.isdir(args.im_or_folder):
    #    im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
    #else:
    #    im_list = [args.im_or_folder]

    if osp.isdir(args.im_or_folder):
        if args.im_list is None:
            im_list = glob.glob(args.im_or_folder + '/*.' + args.image_ext)
            im_list = [osp.basename(n) for n in im_list]
        else:
            im_list = [
                l.rstrip().split('\t')[0] + '.jpg'
                for l in open(args.im_list, 'r').readlines()
            ]
            im_list = [osp.join(args.im_or_folder, n) for n in im_list]
            print(im_list[0])

    if shuffleList:
        from random import shuffle
        shuffle(im_list)
    checkMkdir(args.output_dir)
    outTable = osp.join(args.output_dir,
                        'HF_CT_Measurement_Detected_Boxes.tsv')
    with open(outTable, 'wb') as fout:
        for i, im_name in enumerate(im_list):
            out_name = os.path.join(
                args.output_dir,
                '{}'.format(os.path.basename(im_name) + '.pdf'))
            #logger.info('Processing {} -> {}'.format(im_name, out_name))
            im = cv2.imread(im_name)
            timers = defaultdict(Timer)
            t = time.time()
            with c2_utils.NamedCudaScope(0):
                cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
                    model, im, None, timers=timers)
            #logger.info('Inference time: {:.3f}s'.format(time.time() - t))
            #for k, v in timers.items():
            #    logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
            if i == 0:
                logger.info(
                    ' \ Note: inference on the first image will be slower than the '
                    'rest (caches and auto-tuning need to warm up)')

            outStrings = output_detbbox_one_image(
                im[:, :, ::-1],
                im_name,
                args.output_dir,
                cls_boxes,
                cls_segms,
                cls_keyps,
                dataset=dataset,  #dummy_coco_dataset,
                box_alpha=0.3,
                show_class=True,
                class_thresholds=class_thresholds,
                thresh=0.5,
                kp_thresh=2)
            if outStrings is not None:
                fout.write(outStrings)

            if vis:
                vis_utils.vis_detbbox_one_image(  #vis_one_image(
                    im[:, :, ::-1],  # BGR -> RGB for visualization
                    im_name,
                    args.output_dir,
                    cls_boxes,
                    cls_segms,
                    cls_keyps,
                    dataset=dataset,  #dummy_coco_dataset,
                    box_alpha=0.3,
                    show_class=True,
                    class_thresholds=class_thresholds,
                    thresh=0.5,
                    kp_thresh=2)
Exemplo n.º 36
0
    assert_and_infer_cfg()
    logger.info('Testing with config:')
    logger.info(pprint.pformat(cfg))

    while not os.path.exists(cfg.TEST.WEIGHTS) and args.wait:
        logger.info('Waiting for \'{}\' to exist...'.format(cfg.TEST.WEIGHTS))
        time.sleep(10)

    all_results = run_inference(cfg.TEST.WEIGHTS,
                                ind_range=args.range,
                                multi_gpu_testing=args.multi_gpu_testing,
                                check_expected_results=False,
                                evaluation=False)
    all_boxes = all_results['all_boxes']

    test_dataset = JsonDataset(cfg.TEST.DATASETS[0])

    image_set = test_dataset.name.split('_')[-1]
    root_path = DATASETS[test_dataset.name][ROOT_DIR]
    image_set_path = os.path.join(root_path, 'ImageSets', 'Main',
                                  image_set + '.txt')
    with open(image_set_path, 'r') as f:
        image_index = [x.strip() for x in f.readlines()]

    test_roidb = test_dataset.get_roidb()
    for i, entry in enumerate(test_roidb):
        index = os.path.splitext(os.path.split(entry['image'])[1])[0]
        assert index == image_index[i]

    # crop images based on detected boxes and store into imgs_crop
    imgs_crop = []
Exemplo n.º 37
0
def test_net_on_dataset(weights_file,
                        dataset_name,
                        proposal_file,
                        output_dir,
                        multi_gpu=False,
                        gpu_id=0):

    load_from_tmp = False
    """Run inference on a dataset."""
    dataset = JsonDataset(dataset_name)
    if not load_from_tmp:
        test_timer = Timer()
        test_timer.tic()
        if multi_gpu:
            num_images = len(dataset.get_roidb())
            all_boxes, all_segms, all_keyps, all_personmasks, all_parss, all_bodys = \
                multi_gpu_test_net_on_dataset(
                    weights_file, dataset_name, proposal_file,
                    num_images, output_dir
                )
        else:
            all_boxes, all_segms, all_keyps, all_personmasks, all_parss, all_bodys = test_net(
                weights_file,
                dataset_name,
                proposal_file,
                output_dir,
                gpu_id=gpu_id)
        test_timer.toc()
        logger.info('Total inference time: {:.3f}s'.format(
            test_timer.average_time))
    else:
        tmp_path = '/coco/results/detectron-output_mulres_intersup_mulsaclesup_lowfeat23_int05/test/dense_coco_2014_minival/generalized_rcnn/detections.pkl'
        #tmp_path = '/coco/results/detectron-output_mulres_intersup/test/dense_coco_2014_minival/generalized_rcnn/detections.pkl'
        #tmp_path = '/coco/results/detectron-output_mulres_intersup_onlysegpart_fliped/test/MHP_seg_val/generalized_rcnn/detections.pkl'
        #tmp_path = '/coco/results/detectron-output_mulres_intersup_onlysegpart_fliped/test/CIHP_seg_val/generalized_rcnn/detections.pkl'

        tmp_file = open(tmp_path, 'r')
        print('detections results from: ', tmp_path)
        tmp_pkl = pickle.load(tmp_file)
        all_boxes = tmp_pkl['all_boxes']
        all_segms = tmp_pkl['all_segms']
        all_keyps = tmp_pkl['all_keyps']
        if 'all_personmasks' not in tmp_pkl.keys():
            all_personmasks = None
        else:
            all_personmasks = tmp_pkl['all_personmasks']
        all_parss = tmp_pkl['all_parss']
        all_bodys = tmp_pkl['all_bodys']
        '''
        for i in range(len(all_bodys[1])):
            for j in range(len(all_bodys[1][i])):
                #print("all_bodys[1][i][j]: ",all_bodys[1][i][j].shape)

                all_bodys[1][i][j][0][np.where(all_bodys[1][i][j][0] == 3)] = 4
                all_bodys[1][i][j][0][np.where(all_bodys[1][i][j][0] == 4)] = 3
                all_bodys[1][i][j][0][np.where(all_bodys[1][i][j][0] == 6)] = 5
                all_bodys[1][i][j][0][np.where(all_bodys[1][i][j][0] == 5)] = 6
        '''

        if cfg.VIS:
            vis_wholedataset(
                dataset_name,
                proposal_file,
                output_dir,
                all_boxes=all_boxes,
                all_segms=all_segms,
                all_keyps=all_keyps,
                all_personmasks=all_personmasks,
                all_parss=all_parss,
                all_bodys=all_bodys,
                img_name=['COCO_val2014_000000464089.jpg'],
                show_box=False,
            )
    results = task_evaluation.evaluate_all(dataset, all_boxes, all_segms,
                                           all_keyps, all_personmasks,
                                           all_parss, all_bodys, output_dir)
    return results