示例#1
0
def multi_gpu_test_net_on_dataset(num_images, output_dir):
    """Multi-gpu inference on a dataset."""
    binary_dir = envu.get_runtime_dir()
    binary_ext = envu.get_py_bin_ext()
    binary = os.path.join(binary_dir, 'test_net' + binary_ext)
    assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary)

    # Run inference in parallel in subprocesses
    # Outputs will be a list of outputs from each subprocess, where the output
    # of each subprocess is the dictionary saved by test_net().
    outputs = subprocess_utils.process_in_parallel('detection', num_images,
                                                   binary, output_dir)

    # Collate the results from each subprocess
    all_boxes = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
    all_segms = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
    all_keyps = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
    for det_data in outputs:
        all_boxes_batch = det_data['all_boxes']
        all_segms_batch = det_data['all_segms']
        all_keyps_batch = det_data['all_keyps']
        for cls_idx in range(1, cfg.MODEL.NUM_CLASSES):
            all_boxes[cls_idx] += all_boxes_batch[cls_idx]
            all_segms[cls_idx] += all_segms_batch[cls_idx]
            all_keyps[cls_idx] += all_keyps_batch[cls_idx]
    det_file = os.path.join(output_dir, 'detections.pkl')
    cfg_yaml = yaml.dump(cfg)
    save_object(
        dict(all_boxes=all_boxes,
             all_segms=all_segms,
             all_keyps=all_keyps,
             cfg=cfg_yaml), det_file)
    logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))

    return all_boxes, all_segms, all_keyps
示例#2
0
def save_model_to_weights_file(weights_file, model):
    """Stash model weights in a dictionary and pickle them to a file. We map
    GPU device scoped names to unscoped names (e.g., 'gpu_0/conv1_w' ->
    'conv1_w').
    """
    logger.info(
        'Saving parameters and momentum to {}'.format(
            os.path.abspath(weights_file)))
    blobs = {}
    # Save all parameters
    for param in model.params:
        scoped_name = str(param)
        unscoped_name = c2_utils.UnscopeName(scoped_name)
        if unscoped_name not in blobs:
            logger.debug(' {:s} -> {:s}'.format(scoped_name, unscoped_name))
            blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
    # Save momentum
    for param in model.TrainableParams():
        scoped_name = str(param) + '_momentum'
        unscoped_name = c2_utils.UnscopeName(scoped_name)
        if unscoped_name not in blobs:
            logger.debug(' {:s} -> {:s}'.format(scoped_name, unscoped_name))
            blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
    # Save preserved blobs
    for scoped_name in workspace.Blobs():
        if scoped_name.startswith('__preserve__/'):
            unscoped_name = c2_utils.UnscopeName(scoped_name)
            if unscoped_name not in blobs:
                logger.debug(
                    ' {:s} -> {:s} (preserved)'.format(
                        scoped_name, unscoped_name))
                blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
    cfg_yaml = yaml.dump(cfg)
    save_object(dict(blobs=blobs, cfg=cfg_yaml), weights_file)
示例#3
0
def multi_gpu_generate_rpn_on_dataset(num_images, output_dir):
    """Multi-gpu inference on a dataset."""
    # Retrieve the test_net binary path
    binary_dir = envu.get_runtime_dir()
    binary_ext = envu.get_py_bin_ext()
    binary = os.path.join(binary_dir, 'test_net' + binary_ext)
    assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary)

    # Run inference in parallel in subprocesses
    outputs = subprocess_utils.process_in_parallel(
        'rpn_proposals', num_images, binary, output_dir
    )

    # Collate the results from each subprocess
    boxes, scores, ids = [], [], []
    for rpn_data in outputs:
        boxes += rpn_data['boxes']
        scores += rpn_data['scores']
        ids += rpn_data['ids']
    rpn_file = os.path.join(output_dir, 'rpn_proposals.pkl')
    cfg_yaml = yaml.dump(cfg)
    save_object(
        dict(boxes=boxes, scores=scores, ids=ids, cfg=cfg_yaml), rpn_file
    )
    logger.info('Wrote RPN proposals to {}'.format(os.path.abspath(rpn_file)))
    return boxes, scores, ids, rpn_file
示例#4
0
def _do_python_eval(json_dataset, output_dir='output'):
    info = voc_info(json_dataset)
    data_path = info['data_path']
    image_set = info['image_set']
    imagesetfile = os.path.join(data_path, image_set + '.txt')
    # read list of images
    with open(imagesetfile, 'r') as f:
        lines = f.readlines()
    imagenames = [x.strip() for x in lines]

    # load annots
    recs = {}
    for i, imagename in enumerate(imagenames):
        recs[imagename] = parse_rec(json_dataset, imagename)
        if i % 100 == 0:
            print('Reading annotation for {:d}/{:d}'.format(
                i + 1, len(imagenames)))

    aps = []
    if not os.path.isdir(output_dir):
        os.mkdir(output_dir)
    for _, cls in enumerate(json_dataset.classes):
        if cls == '__background__':
            continue
        filename = _get_voc_results_file_template(json_dataset).format(cls)
        rec, prec, ap = voc_eval(filename, imagenames, cls, recs, ovthresh=0.5)
        aps += [ap]
        logger.info('AP for {} = {:.4f}'.format(cls, ap))
        res_file = os.path.join(output_dir, cls + '_pr.pkl')
        save_object({'rec': rec, 'prec': prec, 'ap': ap}, res_file)
    A = np.array(aps)
    m_ap = A[~np.isnan(A)].mean()
    logger.info('Mean [email protected] = {:.4f}'.format(m_ap))
示例#5
0
def multi_gpu_test_net_on_dataset(
    weights_file, dataset_name, proposal_file, num_images, output_dir
):
    """Multi-gpu inference on a dataset."""
    binary_dir = envu.get_runtime_dir()
    binary_ext = envu.get_py_bin_ext()
    binary = os.path.join(binary_dir, 'test_net' + binary_ext)
    assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary)

    # Run inference in parallel in subprocesses
    # Outputs will be a list of outputs from each subprocess, where the output
    # of each subprocess is the dictionary saved by test_net().
    outputs = subprocess_utils.process_in_parallel(cfg.CFG_FILE, num_images, binary, output_dir, weights_file)

    # Collate the results from each subprocess
    all_scores = []
    for det_data in outputs:
        all_scores_batch = det_data['all_scores']
        all_scores += all_scores_batch

    det_file = os.path.join(output_dir, 'detections.pkl')
    cfg_yaml = yaml.dump(cfg)
    save_object(
        dict(
            all_scores=all_scores,
            cfg=cfg_yaml
        ), det_file
    )
    logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))

    return all_scores
def _do_detection_eval(json_dataset, res_file, output_dir):
    coco_dt = json_dataset.COCO.loadRes(str(res_file))
    coco_eval = COCOeval(json_dataset.COCO, coco_dt, 'bbox')
    coco_eval.evaluate()
    coco_eval.accumulate()

    _log_detection_eval_metrics(json_dataset,
                                coco_eval,
                                iou_low=0.5,
                                iou_high=0.95,
                                output_dir=output_dir)
    _log_detection_eval_metrics(json_dataset,
                                coco_eval,
                                iou_low=0.5,
                                iou_high=0.5,
                                output_dir=output_dir)
    _log_detection_eval_metrics(json_dataset,
                                coco_eval,
                                iou_low=0.75,
                                iou_high=0.75,
                                output_dir=output_dir)

    eval_file = os.path.join(output_dir, 'detection_results.pkl')
    save_object(coco_eval, eval_file)
    logger.info('Wrote json eval results to: {}'.format(eval_file))
    return coco_eval
示例#7
0
def test_retinanet(ind_range=None):
    """
    Test RetinaNet model either on the entire dataset or the subset of dataset
    specified by the index range
    """
    assert cfg.RETINANET.RETINANET_ON, \
        'RETINANET_ON must be set for testing RetinaNet model'
    output_dir = get_output_dir(training=False)
    dataset = JsonDataset(cfg.TEST.DATASET)
    roidb = dataset.get_roidb()
    if ind_range is not None:
        start, end = ind_range
        roidb = roidb[start:end]
    # Create and load the model
    model = model_builder.create(cfg.MODEL.TYPE, train=False)
    if cfg.TEST.WEIGHTS:
        nu.initialize_from_weights_file(
            model, cfg.TEST.WEIGHTS, broadcast=False
        )
    model_builder.add_inference_inputs(model)
    workspace.CreateNet(model.net)
    # Compute the detections
    all_boxes = im_list_detections(model, roidb)
    # Save the detections
    cfg_yaml = yaml.dump(cfg)
    if ind_range is not None:
        det_name = 'detection_range_%s_%s.pkl' % tuple(ind_range)
    else:
        det_name = 'detections.pkl'
    det_file = os.path.join(output_dir, det_name)
    save_object(
        dict(all_boxes=all_boxes, cfg=cfg_yaml),
        det_file)
    logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))
    return all_boxes
def multi_gpu_generate_rpn_on_dataset(weights_file, dataset_name,
                                      _proposal_file_ignored, num_images,
                                      output_dir):
    """Multi-gpu inference on a dataset."""
    # Retrieve the test_net binary path
    binary_dir = envu.get_runtime_dir()
    binary_ext = envu.get_py_bin_ext()
    binary = os.path.join(binary_dir, 'test_net' + binary_ext)
    assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary)

    # Pass the target dataset via the command line
    opts = ['TEST.DATASETS', '("{}",)'.format(dataset_name)]
    opts += ['TEST.WEIGHTS', weights_file]

    # Run inference in parallel in subprocesses
    outputs = subprocess_utils.process_in_parallel('rpn_proposals', num_images,
                                                   binary, output_dir, opts)

    # Collate the results from each subprocess
    boxes, scores, ids = [], [], []
    for rpn_data in outputs:
        boxes += rpn_data['boxes']
        scores += rpn_data['scores']
        ids += rpn_data['ids']
    rpn_file = os.path.join(output_dir, 'rpn_proposals.pkl')
    cfg_yaml = yaml.dump(cfg)
    save_object(dict(boxes=boxes, scores=scores, ids=ids, cfg=cfg_yaml),
                rpn_file)
    logger.info('Wrote RPN proposals to {}'.format(os.path.abspath(rpn_file)))
    return boxes, scores, ids, rpn_file
def _eval_discovery(json_dataset, salt, output_dir='output'):
    info = voc_info(json_dataset)
    year = info['year']
    anno_path = info['anno_path']
    image_set_path = info['image_set_path']
    devkit_path = info['devkit_path']

    cachedir = os.path.join(devkit_path,
                            'annotations_dis_cache_{}'.format(year))
    corlocs = []
    if not os.path.isdir(output_dir):
        os.mkdir(output_dir)
    for _, cls in enumerate(json_dataset.classes):
        if cls == '__background__':
            continue
        filename = _get_voc_results_file_template(json_dataset,
                                                  salt).format(cls)
        corloc = dis_eval(filename,
                          anno_path,
                          image_set_path,
                          cls,
                          cachedir,
                          ovthresh=0.5)
        corlocs += [corloc]
        logger.info('CorLoc for {} = {:.4f}'.format(cls, corloc))
        res_file = os.path.join(output_dir, cls + '_corloc.pkl')
        save_object({'corloc': corloc}, res_file)
    logger.info('Mean CorLoc = {:.4f}'.format(np.mean(corlocs)))

    print('~' * 20)
    print('Final CorLoc Results:')
    for corloc in corlocs:
        print('{:.1f}'.format(corloc * 100), end=', ')
    print('{:.1f}'.format(np.mean(corlocs) * 100))
    print('~' * 20)
示例#10
0
def multi_gpu_test_retinanet_on_dataset(num_images, output_dir, dataset):
    """
    If doing multi-gpu testing, we need to divide the data on various gpus and
    make the subprocess call for each child process that'll run test_retinanet()
    on its subset data. After all the subprocesses finish, we combine the results
    and return
    """
    # Retrieve the test_net binary path
    binary_dir = envu.get_runtime_dir()
    binary_ext = envu.get_py_bin_ext()
    binary = os.path.join(binary_dir, 'test_net' + binary_ext)
    assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary)

    # Run inference in parallel in subprocesses
    outputs = subprocess_utils.process_in_parallel('detection', num_images,
                                                   binary, output_dir)

    # Combine the results from each subprocess
    all_boxes = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
    for det_data in outputs:
        all_boxes_batch = det_data['all_boxes']
        for cls_idx in range(1, cfg.MODEL.NUM_CLASSES):
            all_boxes[cls_idx] += all_boxes_batch[cls_idx]

    # Save the computed detections
    det_file = os.path.join(output_dir, 'detections.pkl')
    cfg_yaml = yaml.dump(cfg)
    save_object(dict(all_boxes=all_boxes, cfg=cfg_yaml), det_file)
    logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))

    return all_boxes
示例#11
0
def save_model_to_weights_file(weights_file, model):
    """Stash model weights in a dictionary and pickle them to a file. We map
    GPU device scoped names to unscoped names (e.g., 'gpu_0/conv1_w' ->
    'conv1_w').
    """
    logger.info(
        'Saving parameters and momentum to {}'.format(
            os.path.abspath(weights_file)))
    blobs = {}
    # Save all parameters
    for param in model.params:
        scoped_name = str(param)
        unscoped_name = c2_utils.UnscopeName(scoped_name)
        if unscoped_name not in blobs:
            logger.debug(' {:s} -> {:s}'.format(scoped_name, unscoped_name))
            blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
    # Save momentum
    for param in model.TrainableParams():
        scoped_name = str(param) + '_momentum'
        unscoped_name = c2_utils.UnscopeName(scoped_name)
        if unscoped_name not in blobs:
            logger.debug(' {:s} -> {:s}'.format(scoped_name, unscoped_name))
            blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
    # Save preserved blobs
    for scoped_name in workspace.Blobs():
        if scoped_name.startswith('__preserve__/'):
            unscoped_name = c2_utils.UnscopeName(scoped_name)
            if unscoped_name not in blobs:
                logger.debug(
                    ' {:s} -> {:s} (preserved)'.format(
                        scoped_name, unscoped_name))
                blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
    cfg_yaml = yaml.dump(cfg)
    save_object(dict(blobs=blobs, cfg=cfg_yaml), weights_file)
示例#12
0
def evaluate_proposal_file(dataset, proposal_file, output_dir):
    """Evaluate box proposal average recall."""
    roidb = dataset.get_roidb(gt=True, proposal_file=proposal_file)
    results = task_evaluation.evaluate_box_proposals(dataset, roidb)
    task_evaluation.log_box_proposal_results(results)
    recall_file = os.path.join(output_dir, 'rpn_proposal_recall.pkl')
    save_object(results, recall_file)
    return results
示例#13
0
def evaluate_proposal_file(dataset, proposal_file, output_dir):
    """Evaluate box proposal average recall."""
    roidb = dataset.get_roidb(gt=True, proposal_file=proposal_file)
    results = task_evaluation.evaluate_box_proposals(dataset, roidb)
    task_evaluation.log_box_proposal_results(results)
    recall_file = os.path.join(output_dir, 'rpn_proposal_recall.pkl')
    save_object(results, recall_file)
    return results
示例#14
0
def _do_detection_eval(json_dataset, res_file, output_dir):
    coco_dt = json_dataset.COCO.loadRes(str(res_file))
    coco_eval = COCOeval(json_dataset.COCO, coco_dt, 'bbox')
    coco_eval.evaluate()
    coco_eval.accumulate()
    _log_detection_eval_metrics(json_dataset, coco_eval)
    eval_file = os.path.join(output_dir, 'detection_results.pkl')
    save_object(coco_eval, eval_file)
    print('Wrote json eval results to: {}'.format(eval_file))
示例#15
0
def multi_gpu_test_net_on_dataset(args, dataset_name, proposal_file,
                                  num_images, output_dir):
    """Multi-gpu inference on a dataset."""
    binary_dir = envu.get_runtime_dir()
    binary_ext = envu.get_py_bin_ext()
    binary = os.path.join(binary_dir, args.test_net_file + binary_ext)
    assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary)

    # Pass the target dataset and proposal file (if any) via the command line
    opts = ['TEST.DATASETS', '("{}",)'.format(dataset_name)]
    if proposal_file:
        opts += ['TEST.PROPOSAL_FILES', '("{}",)'.format(proposal_file)]

    OVERWRITE = True
    if OVERWRITE:
        # Run inference in parallel in subprocesses. Outputs will be a list of outputs from each subprocess,
        # where the output of each subprocess is the dictionary saved by test_net().
        outputs = subprocess_utils.process_in_parallel(  # call 'test_net', see below
            'detection', num_images, binary, output_dir, args.load_ckpt,
            args.load_detectron, opts)
    else:
        if '_part' in dataset_name:
            pkl_file = 'detections_part.pkl'
        else:
            pkl_file = 'detections.pkl'
        outputs = []
        with open(os.path.join(output_dir, pkl_file), 'rb') as f:
            outputs.append(pickle.load(f))

    # Collate the results from each subprocess
    all_boxes = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
    all_segms = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
    all_keyps = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
    for det_data in outputs:
        all_boxes_batch = det_data['all_boxes']
        all_segms_batch = det_data['all_segms']
        all_keyps_batch = det_data['all_keyps']
        for cls_idx in range(
                1, cfg.MODEL.NUM_CLASSES):  # exclude 'background' class here
            all_boxes[cls_idx] += all_boxes_batch[cls_idx]
            all_segms[cls_idx] += all_segms_batch[cls_idx]
            all_keyps[cls_idx] += all_keyps_batch[cls_idx]

    if OVERWRITE:
        if '_part' in dataset_name:
            det_file = os.path.join(output_dir, 'detections_part.pkl')
        else:
            det_file = os.path.join(output_dir, 'detections.pkl')
        cfg_yaml = yaml.dump(cfg)
        save_object(
            dict(all_boxes=all_boxes,
                 all_segms=all_segms,
                 all_keyps=all_keyps,
                 cfg=cfg_yaml), det_file)
        logger.info('Wrote detections to: {}'.format(
            os.path.abspath(det_file)))
    return all_boxes, all_segms, all_keyps
示例#16
0
def _do_segmentation_eval(json_dataset, res_file, output_dir, c_type=None):
    coco_dt = json_dataset.COCO.loadRes(str(res_file))
    coco_eval = COCOeval(json_dataset.COCO, coco_dt, 'segm', c_type)
    coco_eval.evaluate()
    coco_eval.accumulate()
    _log_detection_eval_metrics(json_dataset, coco_eval)
    eval_file = os.path.join(output_dir, 'segmentation_results.pkl')
    save_object(coco_eval, eval_file)
    logger.info('Wrote json eval results to: {}'.format(eval_file))
    return coco_eval
def _do_detection_eval(json_dataset, res_file, output_dir):
    coco_dt = json_dataset.COCO.loadRes(str(res_file))
    coco_eval = COCOeval(json_dataset.COCO, coco_dt, 'bbox')
    coco_eval.evaluate()
    coco_eval.accumulate()
    _log_detection_eval_metrics(json_dataset, coco_eval)
    eval_file = os.path.join(output_dir, 'detection_results.pkl')
    save_object(coco_eval, eval_file)
    logger.info('Wrote json eval results to: {}'.format(eval_file))
    return coco_eval
示例#18
0
def multi_gpu_test_net_on_dataset(args, dataset_name, proposal_file,
                                  num_images, output_dir):
    """Multi-gpu inference on a dataset."""
    binary_dir = envu.get_runtime_dir()
    binary_ext = envu.get_py_bin_ext()
    binary = os.path.join(binary_dir, args.test_net_file + binary_ext)
    assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary)

    # Pass the target dataset and proposal file (if any) via the command line
    opts = ['TEST.DATASETS', '("{}",)'.format(dataset_name)]
    if proposal_file:
        opts += ['TEST.PROPOSAL_FILES', '("{}",)'.format(proposal_file)]

    # Run inference in parallel in subprocesses
    # Outputs will be a list of outputs from each subprocess, where the output
    # of each subprocess is the dictionary saved by test_net().
    outputs = subprocess_utils.process_in_parallel(
        'detection', num_images, binary, output_dir, args.load_ckpt,
        args.load_detectron, args.net_name, args.mlp_head_dim,
        args.heatmap_kernel_size, args.part_crop_size, args.use_kps17, opts)

    # Collate the results from each subprocess
    all_boxes = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
    all_segms = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
    all_keyps = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
    all_hois = {}
    all_losses = defaultdict(list)
    all_keyps_vcoco = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
    for det_data in outputs:
        all_boxes_batch = det_data['all_boxes']
        all_segms_batch = det_data['all_segms']
        all_keyps_batch = det_data['all_keyps']
        all_hois = {**all_hois, **det_data['all_hois']}
        for k, v in det_data['all_losses'].items():
            all_losses[k].extend(v)

        all_keyps_vcoco_batch = det_data['all_keyps_vcoco']
        for cls_idx in range(1, cfg.MODEL.NUM_CLASSES):
            all_boxes[cls_idx] += all_boxes_batch[cls_idx]
            all_segms[cls_idx] += all_segms_batch[cls_idx]
            all_keyps[cls_idx] += all_keyps_batch[cls_idx]
            all_keyps_vcoco[cls_idx] += all_keyps_vcoco_batch[cls_idx]
    det_file = os.path.join(output_dir, 'detections.pkl')
    cfg_yaml = yaml.dump(cfg)
    save_object(
        dict(all_boxes=all_boxes,
             all_segms=all_segms,
             all_keyps=all_keyps,
             all_hois=all_hois,
             all_keyps_vcoco=all_keyps_vcoco,
             all_losses=all_losses,
             cfg=cfg_yaml), det_file)
    logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))

    return all_boxes, all_segms, all_keyps, all_hois, all_keyps_vcoco, all_losses
示例#19
0
def _do_detection_eval(json_dataset, res_file, output_dir):
    coco_dt = json_dataset.COCO.loadRes(str(res_file))
    coco_eval = COCOeval(json_dataset.COCO, coco_dt, 'bbox')
    coco_eval.evaluate()
    coco_eval.accumulate()
    detection_AP_each = _log_detection_eval_metrics(json_dataset, coco_eval)
    pandas.DataFrame(detection_AP_each).to_csv('detection_AP_each_group.csv')
    eval_file = os.path.join(output_dir, 'detection_results.pkl')
    save_object(coco_eval, eval_file)
    logger.info('Wrote json eval results to: {}'.format(eval_file))
    return coco_eval
def _wad_do_detection_eval(args, json_dataset, res_file, output_dir):
    coco_dt = json_dataset.WAD_CVPR2018.loadRes(str(res_file))
    coco_gt = json_dataset.WAD_CVPR2018.loadGt(args.range, 'boxes')
    coco_eval = WAD_eval(args, coco_gt, coco_dt, 'bbox')
    coco_eval.evaluate()
    coco_eval.accumulate()
    _log_detection_eval_metrics(json_dataset, coco_eval)
    eval_file = os.path.join(output_dir, 'detection_results.pkl')
    save_object(coco_eval, eval_file)
    logger.info('Wrote json eval results to: {}'.format(eval_file))
    return coco_eval
示例#21
0
def _apolloscape_do_segmentation_eval(args, json_dataset, res_file, output_dir):
    coco_dt = json_dataset.ApolloScape.loadRes(str(res_file))
    coco_gt = json_dataset.ApolloScape.loadGt(json_dataset.roidb, args.range, 'segms')
    coco_eval = WAD_eval(args, coco_gt, coco_dt, 'segm')
    coco_eval.evaluate()
    coco_eval.accumulate()
    _log_detection_eval_metrics(json_dataset, coco_eval)
    eval_file = os.path.join(output_dir, 'segmentation_results.pkl')
    save_object(coco_eval, eval_file)
    logger.info('Wrote json eval results to: {}'.format(eval_file))
    return coco_eval
示例#22
0
def _do_detection_eval(json_dataset, res_file, output_dir):
    coco_dt = json_dataset.COCO.loadRes(str(res_file))
    coco_eval = COCOeval(json_dataset.COCO, coco_dt, 'bbox')
    coco_eval.params.imgIds = json_dataset.test_img_ids
    coco_eval.params.iouThrs = np.linspace(.4, .6, 3, endpoint=True)
    coco_eval.evaluate()
    coco_eval.accumulate()
    _log_detection_eval_metrics(json_dataset, coco_eval)
    eval_file = os.path.join(output_dir, 'detection_results.pkl')
    save_object(coco_eval, eval_file)
    logger.info('Wrote json eval results to: {}'.format(eval_file))
    return coco_eval
示例#23
0
def _do_detection_eval(json_dataset, res_file, output_dir):
    tmp = [i - 1 for i in json_dataset.list]
    coco_dt = json_dataset.COCO.loadRes(str(res_file))
    #coco_eval = COCOeval(json_dataset.COCO, coco_dt, 'bbox')
    coco_eval = customCOCOeval(json_dataset.COCO, coco_dt, 'bbox')
    coco_eval.evaluate()
    coco_eval.accumulate()
    #_log_detection_eval_metrics(json_dataset, coco_eval)
    coco_eval.summarize(class_index=tmp)
    eval_file = os.path.join(output_dir, 'detection_results.pkl')
    save_object(coco_eval, eval_file)
    logger.info('Wrote json eval results to: {}'.format(eval_file))
    return coco_eval
示例#24
0
def multi_gpu_test_net_on_dataset(args, dataset_name, proposal_file,
                                  num_images, output_dir):
    """Multi-gpu inference on a dataset."""
    binary_dir = envu.get_runtime_dir()
    binary_ext = envu.get_py_bin_ext()
    binary = os.path.join(binary_dir, args.test_net_file + binary_ext)
    assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary)

    # Pass the target dataset and proposal file (if any) via the command line
    opts = ['TEST.DATASETS', '("{}",)'.format(dataset_name)]
    if proposal_file:
        opts += ['TEST.PROPOSAL_FILES', '("{}",)'.format(proposal_file)]

    if args.do_val:
        opts += ['--do_val']
    if args.do_vis:
        opts += ['--do_vis']
    if args.do_special:
        opts += ['--do_special']
    if args.use_gt_boxes:
        opts += ['--use_gt_boxes']
    if args.use_gt_labels:
        opts += ['--use_gt_labels']

    # Run inference in parallel in subprocesses
    # Outputs will be a list of outputs from each subprocess, where the output
    # of each subprocess is the dictionary saved by test_net().
    outputs = subprocess_utils.process_in_parallel('rel_detection', num_images,
                                                   binary, output_dir,
                                                   args.load_ckpt,
                                                   args.load_detectron, opts)

    # Collate the results from each subprocess
    all_results = []
    for det_data in outputs:
        all_results += det_data

    if args.use_gt_boxes:
        if args.use_gt_labels:
            det_file = os.path.join(args.output_dir,
                                    'rel_detections_gt_boxes_prdcls.pkl')
        else:
            det_file = os.path.join(args.output_dir,
                                    'rel_detections_gt_boxes_sgcls.pkl')
    else:
        det_file = os.path.join(args.output_dir, 'rel_detections.pkl')
    save_object(all_results, det_file)
    logger.info('Wrote rel_detections to: {}'.format(
        os.path.abspath(det_file)))

    return all_results
def _do_keypoint_eval(json_dataset, res_file, output_dir):
    ann_type = 'keypoints'
    imgIds = json_dataset.COCO.getImgIds()
    imgIds.sort()
    coco_dt = json_dataset.COCO.loadRes(res_file)
    coco_eval = COCOeval(json_dataset.COCO, coco_dt, ann_type)
    coco_eval.params.imgIds = imgIds
    coco_eval.evaluate()
    coco_eval.accumulate()
    eval_file = os.path.join(output_dir, 'keypoint_results.pkl')
    save_object(coco_eval, eval_file)
    logger.info('Wrote json eval results to: {}'.format(eval_file))
    coco_eval.summarize()
    return coco_eval
示例#26
0
def _do_keypoint_eval(json_dataset, res_file, output_dir):
    ann_type = 'keypoints'
    imgIds = json_dataset.COCO.getImgIds()
    imgIds.sort()
    coco_dt = json_dataset.COCO.loadRes(res_file)
    coco_eval = COCOeval(json_dataset.COCO, coco_dt, ann_type)
    coco_eval.params.imgIds = imgIds
    coco_eval.evaluate()
    coco_eval.accumulate()
    eval_file = os.path.join(output_dir, 'keypoint_results.pkl')
    save_object(coco_eval, eval_file)
    logger.info('Wrote json eval results to: {}'.format(eval_file))
    coco_eval.summarize()
    return coco_eval
示例#27
0
def multi_gpu_test_net_on_dataset(args, dataset_name, proposal_file,
                                  num_images, output_dir):
    """Multi-gpu inference on a dataset."""
    '''This function just allocate the processes, it doesn't contain any thing on core algorithm'''
    binary_dir = envu.get_runtime_dir()
    binary_ext = envu.get_py_bin_ext()
    binary = os.path.join(binary_dir, args.test_net_file + binary_ext)
    assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary)

    # Pass the target dataset and proposal file (if any) via the command line
    opts = ['TEST.DATASETS', '("{}",)'.format(dataset_name)]
    if proposal_file:
        opts += ['TEST.PROPOSAL_FILES', '("{}",)'.format(proposal_file)]

    # Run inference in parallel in subprocesses
    # Outputs will be a list of outputs from each subprocess, where the output
    # of each subprocess is the dictionary saved by test_net().
    outputs = subprocess_utils.process_in_parallel('detection', num_images,
                                                   binary, output_dir,
                                                   args.load_ckpt,
                                                   args.load_detectron, opts)

    # Collate the results from each subprocess
    all_boxes = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
    all_segms = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
    all_keyps = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
    for det_data in outputs:
        '''
         pickle files are used to pass the output, I think it's not merely log file, 
        instead, it keeps the whole hierachy, and thus output results can directly be gained
        '''
        all_boxes_batch = det_data['all_boxes']
        all_segms_batch = det_data['all_segms']
        all_keyps_batch = det_data['all_keyps']
        for cls_idx in range(1, cfg.MODEL.NUM_CLASSES):
            all_boxes[cls_idx] += all_boxes_batch[cls_idx]
            all_segms[cls_idx] += all_segms_batch[cls_idx]
            all_keyps[cls_idx] += all_keyps_batch[cls_idx]
    det_file = os.path.join(output_dir, 'detections.pkl')
    cfg_yaml = yaml.dump(cfg)
    save_object(
        dict(all_boxes=all_boxes,
             all_segms=all_segms,
             all_keyps=all_keyps,
             cfg=cfg_yaml), det_file)
    '''wrote to the pkl file'''
    logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))

    return all_boxes, all_segms, all_keyps
示例#28
0
def multi_gpu_test_net_on_dataset(
    weights_file, dataset_name, proposal_file, num_images, output_dir
):
    """Multi-gpu inference on a dataset."""
    binary_dir = envu.get_runtime_dir()
    binary_ext = envu.get_py_bin_ext()
    binary = os.path.join(binary_dir, 'test_net' + binary_ext)
    if not os.path.exists(binary):
        binary = os.path.join(binary_dir, 'tools/test_net' + binary_ext)
    assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary)

    # Pass the target dataset and proposal file (if any) via the command line
    opts = ['TEST.DATASETS', '("{}",)'.format(dataset_name)]
    opts += ['TEST.WEIGHTS', weights_file]
    if proposal_file:
        opts += ['TEST.PROPOSAL_FILES', '("{}",)'.format(proposal_file)]

    # Run inference in parallel in subprocesses
    # Outputs will be a list of outputs from each subprocess, where the output
    # of each subprocess is the dictionary saved by test_net().
    outputs = subprocess_utils.process_in_parallel(
        'detection', num_images, binary, output_dir, opts
    )

    # Collate the results from each subprocess
    all_boxes = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
    all_segms = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
    all_keyps = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
    for det_data in outputs:
        all_boxes_batch = det_data['all_boxes']
        all_segms_batch = det_data['all_segms']
        all_keyps_batch = det_data['all_keyps']
        for cls_idx in range(1, cfg.MODEL.NUM_CLASSES):
            all_boxes[cls_idx] += all_boxes_batch[cls_idx]
            all_segms[cls_idx] += all_segms_batch[cls_idx]
            all_keyps[cls_idx] += all_keyps_batch[cls_idx]
    det_file = os.path.join(output_dir, 'detections.pkl')
    cfg_yaml = yaml.dump(cfg)
    save_object(
        dict(
            all_boxes=all_boxes,
            all_segms=all_segms,
            all_keyps=all_keyps,
            cfg=cfg_yaml
        ), det_file
    )
    logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))

    return all_boxes, all_segms, all_keyps
示例#29
0
def generate_rpn_on_range(
    weights_file,
    dataset_name,
    _proposal_file_ignored,
    output_dir,
    ind_range=None,
    gpu_id=0
):
    """Run inference on all images in a dataset or over an index range of images
    in a dataset using a single GPU.
    """
    assert cfg.MODEL.RPN_ONLY or cfg.MODEL.FASTER_RCNN

    roidb, start_ind, end_ind, total_num_images = get_roidb(
        dataset_name, ind_range
    )
    logger.info(
        'Output will be saved to: {:s}'.format(os.path.abspath(output_dir))
    )

    model = model_builder.create(cfg.MODEL.TYPE, train=False, gpu_id=gpu_id)
    nu.initialize_gpu_from_weights_file(
        model, weights_file, gpu_id=gpu_id,
    )
    model_builder.add_inference_inputs(model)
    workspace.CreateNet(model.net)

    boxes, scores, ids = generate_proposals_on_roidb(
        model,
        roidb,
        start_ind=start_ind,
        end_ind=end_ind,
        total_num_images=total_num_images,
        gpu_id=gpu_id,
    )

    cfg_yaml = yaml.dump(cfg)
    if ind_range is not None:
        rpn_name = 'rpn_proposals_range_%s_%s.pkl' % tuple(ind_range)
    else:
        rpn_name = 'rpn_proposals.pkl'
    rpn_file = os.path.join(output_dir, rpn_name)
    save_object(
        dict(boxes=boxes, scores=scores, ids=ids, cfg=cfg_yaml), rpn_file
    )
    logger.info('Wrote RPN proposals to {}'.format(os.path.abspath(rpn_file)))
    return boxes, scores, ids, rpn_file
示例#30
0
def _do_python_eval(json_dataset, salt, output_dir='output'):
    info = voc_info(json_dataset)
    year = info['year']
    anno_path = info['anno_path']
    image_set_path = info['image_set_path']
    devkit_path = info['devkit_path']
    cachedir = os.path.join(devkit_path, 'annotations_cache')
    aps = []
    # The PASCAL VOC metric changed in 2010
    use_07_metric = True if int(year) < 2010 else False
    logger.info('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
    if not os.path.isdir(output_dir):
        os.mkdir(output_dir)

    for _, cls in enumerate(json_dataset.classes):
        if cls == '__background__' or cls == 'ignored_regions' or cls == 'others':
            continue
        filename = _get_voc_results_file_template(json_dataset,
                                                  salt).format(cls)
        #print("filename:",filename)
        #print(os.path.exists(filename))
        #exit()
        rec, prec, ap = voc_eval(filename,
                                 anno_path,
                                 image_set_path,
                                 cls,
                                 cachedir,
                                 ovthresh=0.5,
                                 use_07_metric=use_07_metric)
        aps += [ap]
        logger.info('AP for {} = {:.4f}'.format(cls, ap))
        res_file = os.path.join(output_dir, cls + '_pr.pkl')
        save_object({'rec': rec, 'prec': prec, 'ap': ap}, res_file)
    logger.info('Mean AP = {:.4f}'.format(np.mean(aps)))
    logger.info('~~~~~~~~')
    logger.info('Results:')
    for ap in aps:
        logger.info('{:.3f}'.format(ap))
    #logger.info('{:.3f}'.format(np.mean(aps)))
    logger.info('~~~~~~~~')
    logger.info('')
    logger.info('----------------------------------------------------------')
    logger.info('Results computed with the **unofficial** Python eval code.')
    logger.info('Results should be very close to the official MATLAB code.')
    logger.info('Use `./tools/reval.py --matlab ...` for your paper.')
    logger.info('-- Thanks, The Management')
    logger.info('----------------------------------------------------------')
示例#31
0
def multi_gpu_test_net_on_dataset(
    weights_file, dataset_name, proposal_file, num_images, output_dir
):
    """Multi-gpu inference on a dataset."""
    binary_dir = envu.get_runtime_dir()
    binary_ext = envu.get_py_bin_ext()
    binary = os.path.join(binary_dir, 'test_net' + binary_ext)
    assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary)

    # Pass the target dataset and proposal file (if any) via the command line
    opts = ['TEST.DATASETS', '("{}",)'.format(dataset_name)]
    opts += ['TEST.WEIGHTS', weights_file]
    if proposal_file:
        opts += ['TEST.PROPOSAL_FILES', '("{}",)'.format(proposal_file)]

    # Run inference in parallel in subprocesses
    # Outputs will be a list of outputs from each subprocess, where the output
    # of each subprocess is the dictionary saved by test_net().
    outputs = subprocess_utils.process_in_parallel(
        'detection', num_images, binary, output_dir, opts
    )

    # Collate the results from each subprocess
    all_boxes = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
    all_segms = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
    all_keyps = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
    for det_data in outputs:
        all_boxes_batch = det_data['all_boxes']
        all_segms_batch = det_data['all_segms']
        all_keyps_batch = det_data['all_keyps']
        for cls_idx in range(1, cfg.MODEL.NUM_CLASSES):
            all_boxes[cls_idx] += all_boxes_batch[cls_idx]
            all_segms[cls_idx] += all_segms_batch[cls_idx]
            all_keyps[cls_idx] += all_keyps_batch[cls_idx]
    det_file = os.path.join(output_dir, 'detections.pkl')
    cfg_yaml = yaml.dump(cfg)
    save_object(
        dict(
            all_boxes=all_boxes,
            all_segms=all_segms,
            all_keyps=all_keyps,
            cfg=cfg_yaml
        ), det_file
    )
    logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))

    return all_boxes, all_segms, all_keyps
def test_retinanet(ind_range=None):
    """
    Test RetinaNet model either on the entire dataset or the subset of dataset
    specified by the index range
    """
    assert cfg.RETINANET.RETINANET_ON, \
        'RETINANET_ON must be set for testing RetinaNet model'
    output_dir = get_output_dir(training=False)
    dataset = JsonDataset(cfg.TEST.DATASET)
    im_list = dataset.get_roidb()
    if ind_range is not None:
        start, end = ind_range
        im_list = im_list[start:end]
        logger.info('Testing on roidb range: {}-{}'.format(start, end))
    else:
        # if testing over the whole dataset, use the NUM_TEST_IMAGES setting
        # the NUM_TEST_IMAGES could be over a small set of images for quick
        # debugging purposes
        im_list = im_list[0:cfg.TEST.NUM_TEST_IMAGES]

    model = model_builder.create(cfg.MODEL.TYPE, train=False)
    if cfg.TEST.WEIGHTS:
        nu.initialize_from_weights_file(model,
                                        cfg.TEST.WEIGHTS,
                                        broadcast=False)
    model_builder.add_inference_inputs(model)
    workspace.CreateNet(model.net)
    boxes, scores, classes, image_ids = im_list_detections(
        model, im_list[0:cfg.TEST.NUM_TEST_IMAGES])

    cfg_yaml = yaml.dump(cfg)
    if ind_range is not None:
        det_name = 'retinanet_detections_range_%s_%s.pkl' % tuple(ind_range)
    else:
        det_name = 'retinanet_detections.pkl'
    det_file = os.path.join(output_dir, det_name)
    save_object(
        dict(boxes=boxes,
             scores=scores,
             classes=classes,
             ids=image_ids,
             cfg=cfg_yaml), det_file)
    logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))
    return boxes, scores, classes, image_ids
def generate_rpn_on_range(weights_file,
                          dataset_name,
                          _proposal_file_ignored,
                          output_dir,
                          ind_range=None,
                          gpu_id=0):
    """Run inference on all images in a dataset or over an index range of images
    in a dataset using a single GPU.
    """
    assert cfg.MODEL.RPN_ONLY or cfg.MODEL.FASTER_RCNN

    roidb, start_ind, end_ind, total_num_images = get_roidb(
        dataset_name, ind_range)
    logger.info('Output will be saved to: {:s}'.format(
        os.path.abspath(output_dir)))

    model = model_builder.create(cfg.MODEL.TYPE, train=False, gpu_id=gpu_id)
    nu.initialize_gpu_from_weights_file(
        model,
        weights_file,
        gpu_id=gpu_id,
    )
    model_builder.add_inference_inputs(model)
    workspace.CreateNet(model.net)

    boxes, scores, ids = generate_proposals_on_roidb(
        model,
        roidb,
        start_ind=start_ind,
        end_ind=end_ind,
        total_num_images=total_num_images,
        gpu_id=gpu_id,
    )

    cfg_yaml = yaml.dump(cfg)
    if ind_range is not None:
        rpn_name = 'rpn_proposals_range_%s_%s.pkl' % tuple(ind_range)
    else:
        rpn_name = 'rpn_proposals.pkl'
    rpn_file = os.path.join(output_dir, rpn_name)
    save_object(dict(boxes=boxes, scores=scores, ids=ids, cfg=cfg_yaml),
                rpn_file)
    logger.info('Wrote RPN proposals to {}'.format(os.path.abspath(rpn_file)))
    return boxes, scores, ids, rpn_file
示例#34
0
def test_retinanet_on_dataset(multi_gpu=False):
    """
    Main entry point for testing on a given dataset: whether multi_gpu or not
    """
    output_dir = get_output_dir(training=False)
    logger.info('Output will be saved to: {:s}'.format(os.path.abspath(output_dir)))

    dataset = JsonDataset(cfg.TEST.DATASET)
    # for test-dev or full test dataset, we generate detections for all images
    if 'test-dev' in cfg.TEST.DATASET or 'test' in cfg.TEST.DATASET:
        cfg.TEST.NUM_TEST_IMAGES = len(dataset.get_roidb())

    if multi_gpu:
        num_images = cfg.TEST.NUM_TEST_IMAGES
        boxes, scores, classes, image_ids = multi_gpu_test_retinanet_on_dataset(
            num_images, output_dir, dataset
        )
    else:
        boxes, scores, classes, image_ids = test_retinanet()

    # write RetinaNet detections pkl file to be used for various purposes
    # dump the boxes first just in case there are spurious failures
    res_file = os.path.join(output_dir, 'retinanet_detections.pkl')
    logger.info(
        'Writing roidb detections to file: {}'.
        format(os.path.abspath(res_file))
    )
    save_object(
        dict(boxes=boxes, scores=scores, classes=classes, ids=image_ids),
        res_file
    )
    logger.info('Wrote RetinaNet detections to {}'.format(os.path.abspath(res_file)))

    # Write the detections to a file that can be uploaded to coco evaluation server
    # which takes a json file format
    res_file = write_coco_detection_results(
        output_dir, dataset, boxes, scores, classes, image_ids)

    # Perform coco evaluation
    coco_eval = coco_evaluate(dataset, res_file, image_ids)

    box_results = task_evaluation._coco_eval_to_box_results(coco_eval)
    return OrderedDict([(dataset.name, box_results)])
示例#35
0
def save_model_to_weights_file(weights_file, model):
    """Stash model weights in a dictionary and pickle them to a file. We map
    GPU device scoped names to unscoped names (e.g., 'gpu_0/conv1_w' ->
    'conv1_w').
    """
    logger.info('Saving parameters and momentum to {}'.format(
        os.path.abspath(weights_file)))
    blobs = {}

    # A little surgery to address the problem that SpatialBN doesn't
    # save *_rm and *_riv as model parameter at training time
    # but require to use it at inference time...
    # Stupid caffe2.
    for scoped_name in workspace.Blobs():
        if scoped_name.endswith("_rm") or scoped_name.endswith("_riv"):
            param = core.BlobReference(scoped_name)
            model.params.append(param)

    # Save all parameters
    for param in model.params:
        scoped_name = str(param)
        unscoped_name = c2_utils.UnscopeName(scoped_name)
        if unscoped_name not in blobs:
            logger.debug(' {:s} -> {:s}'.format(scoped_name, unscoped_name))
            blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
    # Save momentum
    for param in model.TrainableParams():
        scoped_name = str(param) + '_momentum'
        unscoped_name = c2_utils.UnscopeName(scoped_name)
        if unscoped_name not in blobs:
            logger.debug(' {:s} -> {:s}'.format(scoped_name, unscoped_name))
            blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
    # Save preserved blobs
    for scoped_name in workspace.Blobs():
        if scoped_name.startswith('__preserve__/'):
            unscoped_name = c2_utils.UnscopeName(scoped_name)
            if unscoped_name not in blobs:
                logger.debug(' {:s} -> {:s} (preserved)'.format(
                    scoped_name, unscoped_name))
                blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
    cfg_yaml = yaml.dump(cfg)
    save_object(dict(blobs=blobs, cfg=cfg_yaml), weights_file)
def test_retinanet_on_dataset(multi_gpu=False):
    """
    Main entry point for testing on a given dataset: whether multi_gpu or not
    """
    output_dir = get_output_dir(training=False)
    logger.info('Output will be saved to: {:s}'.format(
        os.path.abspath(output_dir)))

    dataset = JsonDataset(cfg.TEST.DATASET)
    # for test-dev or full test dataset, we generate detections for all images
    if 'test-dev' in cfg.TEST.DATASET or 'test' in cfg.TEST.DATASET:
        cfg.TEST.NUM_TEST_IMAGES = len(dataset.get_roidb())

    if multi_gpu:
        num_images = cfg.TEST.NUM_TEST_IMAGES
        boxes, scores, classes, image_ids = multi_gpu_test_retinanet_on_dataset(
            num_images, output_dir, dataset)
    else:
        boxes, scores, classes, image_ids = test_retinanet()

    # write RetinaNet detections pkl file to be used for various purposes
    # dump the boxes first just in case there are spurious failures
    res_file = os.path.join(output_dir, 'retinanet_detections.pkl')
    logger.info('Writing roidb detections to file: {}'.format(
        os.path.abspath(res_file)))
    save_object(
        dict(boxes=boxes, scores=scores, classes=classes, ids=image_ids),
        res_file)
    logger.info('Wrote RetinaNet detections to {}'.format(
        os.path.abspath(res_file)))

    # Write the detections to a file that can be uploaded to coco evaluation server
    # which takes a json file format
    res_file = write_coco_detection_results(output_dir, dataset, boxes, scores,
                                            classes, image_ids)

    # Perform coco evaluation
    coco_eval = coco_evaluate(dataset, res_file, image_ids)

    box_results = task_evaluation._coco_eval_to_box_results(coco_eval)
    return OrderedDict([(dataset.name, box_results)])
示例#37
0
def test_retinanet(ind_range=None):
    """
    Test RetinaNet model either on the entire dataset or the subset of dataset
    specified by the index range
    """
    assert cfg.RETINANET.RETINANET_ON, \
        'RETINANET_ON must be set for testing RetinaNet model'
    output_dir = get_output_dir(training=False)
    dataset = JsonDataset(cfg.TEST.DATASET)
    im_list = dataset.get_roidb()
    if ind_range is not None:
        start, end = ind_range
        im_list = im_list[start:end]
        logger.info('Testing on roidb range: {}-{}'.format(start, end))
    else:
        # if testing over the whole dataset, use the NUM_TEST_IMAGES setting
        # the NUM_TEST_IMAGES could be over a small set of images for quick
        # debugging purposes
        im_list = im_list[0:cfg.TEST.NUM_TEST_IMAGES]

    model = model_builder.create(cfg.MODEL.TYPE, train=False)
    if cfg.TEST.WEIGHTS:
        nu.initialize_from_weights_file(
            model, cfg.TEST.WEIGHTS, broadcast=False
        )
    model_builder.add_inference_inputs(model)
    workspace.CreateNet(model.net)
    boxes, scores, classes, image_ids = im_list_detections(
        model, im_list[0:cfg.TEST.NUM_TEST_IMAGES])

    cfg_yaml = yaml.dump(cfg)
    if ind_range is not None:
        det_name = 'retinanet_detections_range_%s_%s.pkl' % tuple(ind_range)
    else:
        det_name = 'retinanet_detections.pkl'
    det_file = os.path.join(output_dir, det_name)
    save_object(
        dict(boxes=boxes, scores=scores, classes=classes, ids=image_ids, cfg=cfg_yaml),
        det_file)
    logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))
    return boxes, scores, classes, image_ids
示例#38
0
def multi_gpu_test_net_on_dataset(
        args, dataset_name, proposal_file, num_images, output_dir):
    """Multi-gpu inference on a dataset."""
    binary_dir = envu.get_runtime_dir()
    binary_ext = envu.get_py_bin_ext()
    binary = os.path.join(binary_dir, args.test_net_file + binary_ext)
    assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary)

    # Pass the target dataset and proposal file (if any) via the command line
    opts = ['TEST.DATASETS', '("{}",)'.format(dataset_name)]
    if proposal_file:
        opts += ['TEST.PROPOSAL_FILES', '("{}",)'.format(proposal_file)]

    # Run inference in parallel in subprocesses
    # Outputs will be a list of outputs from each subprocess, where the output
    # of each subprocess is the dictionary saved by test_net().
    tag = 'discovery' if 'train' in dataset_name else 'detection'
    outputs = subprocess_utils.process_in_parallel(
        tag, num_images, binary, output_dir,
        args.load_ckpt, args.load_detectron, opts
    )

    # Collate the results from each subprocess
    all_boxes = {}
    for det_data in outputs:
        all_boxes_batch = det_data['all_boxes']
        all_boxes.update(all_boxes_batch)
    if 'train' in dataset_name:
        det_file = os.path.join(output_dir, 'discovery.pkl')
    else:
        det_file = os.path.join(output_dir, 'detections.pkl')
    cfg_yaml = yaml.dump(cfg)
    save_object(
        dict(
            all_boxes=all_boxes,
            cfg=cfg_yaml
        ), det_file
    )
    logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))

    return all_boxes
示例#39
0
def generate_rpn_on_range(
        args,
        dataset_name,
        proposal_file,
        output_dir,
        ind_range=None,
        gpu_id=0
):
    """Run inference on all images in a dataset or over an index range of images
    in a dataset using a single GPU.
    """
    assert cfg.MODEL.RPN_ONLY or cfg.MODEL.FASTER_RCNN

    roidb, start_ind, end_ind, total_num_images = get_roidb(
        dataset_name, ind_range
    )
    logger.info(
        'Output will be saved to: {:s}'.format(os.path.abspath(output_dir))
    )

    model = initialize_model_from_cfg(args, gpu_id=gpu_id)

    boxes, scores, ids = generate_proposals_on_roidb(
        model,
        roidb,
        start_ind=start_ind,
        end_ind=end_ind,
        total_num_images=total_num_images
    )

    cfg_yaml = yaml.dump(cfg)
    if ind_range is not None:
        rpn_name = 'rpn_proposals_range_{}_{}.pkl'.format(ind_range[0], ind_range[1])
    else:
        rpn_name = 'rpn_proposals.pkl'
    rpn_file = os.path.join(output_dir, rpn_name)
    save_object(
        dict(boxes=boxes, scores=scores, ids=ids, cfg=cfg_yaml), rpn_file
    )
    logger.info('Wrote RPN proposals to {}'.format(os.path.abspath(rpn_file)))
    return boxes, scores, ids, rpn_file
def _do_python_eval(json_dataset, salt, output_dir='output'):
    info = voc_info(json_dataset)
    year = info['year']
    anno_path = info['anno_path']
    image_set_path = info['image_set_path']
    devkit_path = info['devkit_path']
    cachedir = os.path.join(devkit_path, 'annotations_cache')
    aps = []
    # The PASCAL VOC metric changed in 2010
    use_07_metric = True if int(year) < 2010 else False
    logger.info('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
    if not os.path.isdir(output_dir):
        os.mkdir(output_dir)
    for _, cls in enumerate(json_dataset.classes):
        if cls == '__background__':
            continue
        filename = _get_voc_results_file_template(
            json_dataset, salt).format(cls)
        rec, prec, ap = voc_eval(
            filename, anno_path, image_set_path, cls, cachedir, ovthresh=0.5,
            use_07_metric=use_07_metric)
        aps += [ap]
        logger.info('AP for {} = {:.4f}'.format(cls, ap))
        res_file = os.path.join(output_dir, cls + '_pr.pkl')
        save_object({'rec': rec, 'prec': prec, 'ap': ap}, res_file)
    logger.info('Mean AP = {:.4f}'.format(np.mean(aps)))
    logger.info('~~~~~~~~')
    logger.info('Results:')
    for ap in aps:
        logger.info('{:.3f}'.format(ap))
    logger.info('{:.3f}'.format(np.mean(aps)))
    logger.info('~~~~~~~~')
    logger.info('')
    logger.info('----------------------------------------------------------')
    logger.info('Results computed with the **unofficial** Python eval code.')
    logger.info('Results should be very close to the official MATLAB code.')
    logger.info('Use `./tools/reval.py --matlab ...` for your paper.')
    logger.info('-- Thanks, The Management')
    logger.info('----------------------------------------------------------')
示例#41
0
def multi_gpu_test_net_on_dataset(num_images, output_dir):
    """Multi-gpu inference on a dataset."""
    binary_dir = envu.get_runtime_dir()
    binary_ext = envu.get_py_bin_ext()
    binary = os.path.join(binary_dir, 'test_net' + binary_ext)
    assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary)

    # Run inference in parallel in subprocesses
    # Outputs will be a list of outputs from each subprocess, where the output
    # of each subprocess is the dictionary saved by test_net().
    outputs = subprocess_utils.process_in_parallel(
        'detection', num_images, binary, output_dir
    )

    # Collate the results from each subprocess
    all_boxes = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
    all_segms = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
    all_keyps = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
    for det_data in outputs:
        all_boxes_batch = det_data['all_boxes']
        all_segms_batch = det_data['all_segms']
        all_keyps_batch = det_data['all_keyps']
        for cls_idx in range(1, cfg.MODEL.NUM_CLASSES):
            all_boxes[cls_idx] += all_boxes_batch[cls_idx]
            all_segms[cls_idx] += all_segms_batch[cls_idx]
            all_keyps[cls_idx] += all_keyps_batch[cls_idx]
    det_file = os.path.join(output_dir, 'detections.pkl')
    cfg_yaml = yaml.dump(cfg)
    save_object(
        dict(
            all_boxes=all_boxes,
            all_segms=all_segms,
            all_keyps=all_keyps,
            cfg=cfg_yaml
        ), det_file
    )
    logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))

    return all_boxes, all_segms, all_keyps
示例#42
0
def test_net(
        args,
        dataset_name,
        proposal_file,
        output_dir,
        ind_range=None,
        gpu_id=0):
    """Run inference on all images in a dataset or over an index range of images
    in a dataset using a single GPU.
    """
    assert not cfg.MODEL.RPN_ONLY, \
        'Use rpn_generate to generate proposals from RPN-only models'

    roidb, dataset, start_ind, end_ind, total_num_images = get_roidb_and_dataset(
        dataset_name, proposal_file, ind_range
    )
    model = initialize_model_from_cfg(args, gpu_id=gpu_id)
    num_images = len(roidb)
    num_classes = cfg.MODEL.NUM_CLASSES
    all_boxes, all_segms, all_keyps = empty_results(num_classes, num_images)
    timers = defaultdict(Timer)
    for i, entry in enumerate(roidb):
        if cfg.TEST.PRECOMPUTED_PROPOSALS:
            # The roidb may contain ground-truth rois (for example, if the roidb
            # comes from the training or val split). We only want to evaluate
            # detection on the *non*-ground-truth rois. We select only the rois
            # that have the gt_classes field set to 0, which means there's no
            # ground truth.
            box_proposals = entry['boxes'][entry['gt_classes'] == 0]
            if len(box_proposals) == 0:
                continue
        else:
            # Faster R-CNN type models generate proposals on-the-fly with an
            # in-network RPN; 1-stage models don't require proposals.
            box_proposals = None

        im = cv2.imread(entry['image'])
        cls_boxes_i, cls_segms_i, cls_keyps_i = im_detect_all(model, im, box_proposals, timers)

        extend_results(i, all_boxes, cls_boxes_i)
        if cls_segms_i is not None:
            extend_results(i, all_segms, cls_segms_i)
        if cls_keyps_i is not None:
            extend_results(i, all_keyps, cls_keyps_i)

        if i % 10 == 0:  # Reduce log file size
            ave_total_time = np.sum([t.average_time for t in timers.values()])
            eta_seconds = ave_total_time * (num_images - i - 1)
            eta = str(datetime.timedelta(seconds=int(eta_seconds)))
            det_time = (
                timers['im_detect_bbox'].average_time +
                timers['im_detect_mask'].average_time +
                timers['im_detect_keypoints'].average_time
            )
            misc_time = (
                timers['misc_bbox'].average_time +
                timers['misc_mask'].average_time +
                timers['misc_keypoints'].average_time
            )
            logger.info(
                (
                    'im_detect: range [{:d}, {:d}] of {:d}: '
                    '{:d}/{:d} {:.3f}s + {:.3f}s (eta: {})'
                ).format(
                    start_ind + 1, end_ind, total_num_images, start_ind + i + 1,
                    start_ind + num_images, det_time, misc_time, eta
                )
            )

        if cfg.VIS:
            im_name = os.path.splitext(os.path.basename(entry['image']))[0]
            vis_utils.vis_one_image(
                im[:, :, ::-1],
                '{:d}_{:s}'.format(i, im_name),
                os.path.join(output_dir, 'vis'),
                cls_boxes_i,
                segms=cls_segms_i,
                keypoints=cls_keyps_i,
                thresh=cfg.VIS_TH,
                box_alpha=0.8,
                dataset=dataset,
                show_class=True
            )

    cfg_yaml = yaml.dump(cfg)
    if ind_range is not None:
        det_name = 'detection_range_%s_%s.pkl' % tuple(ind_range)
    else:
        det_name = 'detections.pkl'
    det_file = os.path.join(output_dir, det_name)
    save_object(
        dict(
            all_boxes=all_boxes,
            all_segms=all_segms,
            all_keyps=all_keyps,
            cfg=cfg_yaml
        ), det_file
    )
    logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))
    return all_boxes, all_segms, all_keyps