コード例 #1
1
def main():
    args = _parse_args()
    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.opts is not None:
        cfg_from_list(args.opts)
    assert_and_infer_cfg()
    test_output_dir = get_output_dir(training=False)
    json_data, _, _, _, _ = get_roidb_and_dataset(None, include_gt=True)
    run_posetrack_tracking(test_output_dir, json_data)
コード例 #2
1
ファイル: eval_mpii.py プロジェクト: TPNguyen/DetectAndTrack
def main():
    args = _parse_args()
    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.opts is not None:
        cfg_from_list(args.opts)
    assert_and_infer_cfg()
    test_output_dir = get_output_dir(training=False)
    roidb, dataset, _, _, _ = get_roidb_and_dataset(None)
    run_mpii_eval(test_output_dir, roidb, dataset)
コード例 #3
1
ファイル: test_net.py プロジェクト: BMEI1314/Detectron
def main(ind_range=None, multi_gpu_testing=False):
    output_dir = get_output_dir(training=False)
    all_results = run_inference(
        output_dir, ind_range=ind_range, multi_gpu_testing=multi_gpu_testing
    )
    if not ind_range:
        task_evaluation.check_expected_results(
            all_results,
            atol=cfg.EXPECTED_RESULTS_ATOL,
            rtol=cfg.EXPECTED_RESULTS_RTOL
        )
        task_evaluation.log_copy_paste_friendly_results(all_results)
コード例 #4
1
ファイル: test_engine.py プロジェクト: ls200201020/Detectron
def run_inference(
    weights_file, ind_range=None, multi_gpu_testing=False, gpu_id=0
):
    parent_func, child_func = get_eval_functions()

    is_parent = ind_range is None
    if is_parent:
        # Parent case:
        # In this case we're either running inference on the entire dataset in a
        # single process or (if multi_gpu_testing is True) using this process to
        # launch subprocesses that each run inference on a range of the dataset
        all_results = {}
        for i in range(len(cfg.TEST.DATASETS)):
            dataset_name, proposal_file = get_inference_dataset(i)
            output_dir = get_output_dir(dataset_name, training=False)
            results = parent_func(
                weights_file,
                dataset_name,
                proposal_file,
                output_dir,
                multi_gpu=multi_gpu_testing
            )
            all_results.update(results)

        return all_results
    else:
        # Subprocess child case:
        # In this case test_net was called via subprocess.Popen to execute on a
        # range of inputs on a single dataset
        dataset_name, proposal_file = get_inference_dataset(0, is_parent=False)
        output_dir = get_output_dir(dataset_name, training=False)
        return child_func(
            weights_file,
            dataset_name,
            proposal_file,
            output_dir,
            ind_range=ind_range,
            gpu_id=gpu_id
        )
コード例 #5
1
def get_mixed_dataset(args):
    setID = args.setID
    repeat = args.repeat
    size = args.size
    ds_name = args.mixed_name

    mixedData = load_mixture_set(setID,repeat,size)
    train,test = mixedData["train"],mixedData["test"]
    roidbTr = train[0][ds_name]
    annoCountTr = train[1]
    print("annotation counts for training sets")
    print(len(roidbTr))
    print(annoCountTr)
    roidbTr = prepare_onlyA_roidb(roidbTr)
    output_dir = get_output_dir("{}_{}_{}".format(ds_name,size,repeat))
    return roidbTr,output_dir
コード例 #6
0
ファイル: test_engine.py プロジェクト: Alphonses/Detectron
def run_inference(ind_range=None, multi_gpu_testing=False, gpu_id=0):
    parent_func, child_func = get_eval_functions()

    is_parent = ind_range is None
    if is_parent:
        # Parent case:
        # In this case we're either running inference on the entire dataset in a
        # single process or (if multi_gpu_testing is True) using this process to
        # launch subprocesses that each run inference on a range of the dataset
        if len(cfg.TEST.DATASETS) == 0:
            cfg.TEST.DATASETS = (cfg.TEST.DATASET, )
            cfg.TEST.PROPOSAL_FILES = (cfg.TEST.PROPOSAL_FILE, )

        all_results = {}
        for i in range(len(cfg.TEST.DATASETS)):
            cfg.TEST.DATASET = cfg.TEST.DATASETS[i]
            if cfg.TEST.PRECOMPUTED_PROPOSALS:
                cfg.TEST.PROPOSAL_FILE = cfg.TEST.PROPOSAL_FILES[i]
            output_dir = get_output_dir(cfg.TEST.DATASET, training=False)
            results = parent_func(output_dir, multi_gpu=multi_gpu_testing)
            all_results.update(results)

        return all_results
    else:
        # Subprocess child case:
        # In this case test_net was called via subprocess.Popen to execute on a
        # range of inputs on a single dataset (i.e., use cfg.TEST.DATASET and
        # don't loop over cfg.TEST.DATASETS)
        output_dir = get_output_dir(cfg.TEST.DATASET, training=False)
        return child_func(output_dir, ind_range=ind_range, gpu_id=gpu_id)
コード例 #7
0
ファイル: train.py プロジェクト: zhengant/detectron-vlp
def create_model():
    """Build the model and look for saved model checkpoints in case we can
    resume from one.
    """
    logger = logging.getLogger(__name__)
    start_iter = 0
    checkpoints = {}
    output_dir = get_output_dir(training=True)
    weights_file = cfg.TRAIN.WEIGHTS
    if cfg.TRAIN.AUTO_RESUME:
        # Check for the final model (indicates training already finished)
        final_path = os.path.join(output_dir, 'model_final.pkl')
        if os.path.exists(final_path):
            logger.info('model_final.pkl exists; no need to train!')
            return None, None, None, {'final': final_path}, output_dir, None

        # Find the most recent checkpoint (highest iteration number)
        files = os.listdir(output_dir)
        for f in files:
            iter_string = re.findall(r'(?<=model_iter)\d+(?=\.pkl)', f)
            if len(iter_string) > 0:
                checkpoint_iter = int(iter_string[0])
                if checkpoint_iter > start_iter:
                    # Start one iteration immediately after the checkpoint iter
                    start_iter = checkpoint_iter + 1
                    resume_weights_file = f

        if start_iter > 0:
            # Override the initialization weights with the found checkpoint
            weights_file = os.path.join(output_dir, resume_weights_file)
            logger.info(
                '========> Resuming from checkpoint {} at start iter {}'.
                format(weights_file, start_iter))

    tb_dir = get_tb_dir(training=True)
    writer = SummaryWriter(tb_dir, tag="detectron")
    logger.info('Building model: {}'.format(cfg.MODEL.TYPE))
    model = model_builder.create(cfg.MODEL.TYPE, train=True, writer=writer)
    params = [blob._name for blob in model.TrainableParams(gpu_id=0)]
    for param in params:
        if 'mem' in param:
            model.AddSummaryHistogram(param)
    if cfg.MEMONGER:
        optimize_memory(model)
    # Performs random weight initialization as defined by the model
    workspace.RunNetOnce(model.param_init_net)
    return model, weights_file, start_iter, checkpoints, output_dir, writer
コード例 #8
0
def generate_rpn_on_dataset(multi_gpu=False):
    """Run inference on a dataset."""
    output_dir = get_output_dir(training=False)
    dataset = JsonDataset(cfg.TEST.DATASET)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        _boxes, _scores, _ids, rpn_file = multi_gpu_generate_rpn_on_dataset(
            num_images, output_dir)
    else:
        # Processes entire dataset range by default
        _boxes, _scores, _ids, rpn_file = generate_rpn_on_range()
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(
        test_timer.average_time))
    return evaluate_proposal_file(dataset, rpn_file, output_dir)
コード例 #9
0
ファイル: rpn_generator.py プロジェクト: ArsenLuca/Detectron
def generate_rpn_on_dataset(multi_gpu=False):
    """Run inference on a dataset."""
    output_dir = get_output_dir(training=False)
    dataset = JsonDataset(cfg.TEST.DATASET)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        _boxes, _scores, _ids, rpn_file = multi_gpu_generate_rpn_on_dataset(
            num_images, output_dir
        )
    else:
        # Processes entire dataset range by default
        _boxes, _scores, _ids, rpn_file = generate_rpn_on_range()
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))
    return evaluate_proposal_file(dataset, rpn_file, output_dir)
コード例 #10
0
def test_net_on_dataset(multi_gpu=False):
    output_dir = get_output_dir(training=False)
    dataset = JsonDataset(cfg.TEST.DATASET)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(
            num_images, output_dir)
    else:
        all_boxes, all_segms, all_keyps = test_net()
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(
        test_timer.average_time))
    # Run tracking and eval for posetrack datasets
    if dataset.name.startswith('posetrack') or dataset.name.startswith(
            'kinetics'):
        roidb, dataset, _, _, _ = get_roidb_and_dataset(None)
        if dataset.name.startswith('posetrack'):
            score_ap, score_mot, apAll, preAll, recAll, mota = run_posetrack_tracking(
                output_dir, roidb)
            ##################### add by jianbo #############
            import re, os, json
            from core.config import get_log_dir_path
            tmp_dic = {
                "total_AP": score_ap.tolist(),
                "total_MOTA": score_mot.tolist(),
                "apAll": apAll.tolist(),
                "preAll": preAll.tolist(),
                "recAll": recAll.tolist(),
                "mota": mota.tolist()
            }
            dir_path = get_log_dir_path()
            if not os.path.exists(dir_path):
                os.mkdir(dir_path)
            f = open(dir_path + "/eval.json", "w")
            f.write(json.dumps(tmp_dic))
            f.flush()
            f.close()
            ##################### add by jianbo #############
        else:
            run_posetrack_tracking(output_dir, roidb)
    else:
        ###jianbo
        roidb, dataset, _, _, _ = get_roidb_and_dataset(None)
        run_posetrack_tracking(output_dir, roidb)
コード例 #11
0
def test_net_on_dataset(multi_gpu=False):
    """Run inference on a dataset."""
    output_dir = get_output_dir(training=False)
    dataset = JsonDataset(cfg.TEST.DATASET)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(
            num_images, output_dir)
    else:
        all_boxes, all_segms, all_keyps = test_net()
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(
        test_timer.average_time))
    results = task_evaluation.evaluate_all(dataset, all_boxes, all_segms,
                                           all_keyps, output_dir)
    return results
コード例 #12
0
def test_retinanet(ind_range=None):
    """
    Test RetinaNet model either on the entire dataset or the subset of dataset
    specified by the index range
    """
    assert cfg.RETINANET.RETINANET_ON, \
        'RETINANET_ON must be set for testing RetinaNet model'
    output_dir = get_output_dir(training=False)
    dataset = JsonDataset(cfg.TEST.DATASET)
    im_list = dataset.get_roidb()
    if ind_range is not None:
        start, end = ind_range
        im_list = im_list[start:end]
        logger.info('Testing on roidb range: {}-{}'.format(start, end))
    else:
        # if testing over the whole dataset, use the NUM_TEST_IMAGES setting
        # the NUM_TEST_IMAGES could be over a small set of images for quick
        # debugging purposes
        im_list = im_list[0:cfg.TEST.NUM_TEST_IMAGES]

    model = model_builder.create(cfg.MODEL.TYPE, train=False)
    if cfg.TEST.WEIGHTS:
        nu.initialize_from_weights_file(model,
                                        cfg.TEST.WEIGHTS,
                                        broadcast=False)
    model_builder.add_inference_inputs(model)
    workspace.CreateNet(model.net)
    boxes, scores, classes, image_ids = im_list_detections(
        model, im_list[0:cfg.TEST.NUM_TEST_IMAGES])

    cfg_yaml = yaml.dump(cfg)
    if ind_range is not None:
        det_name = 'retinanet_detections_range_%s_%s.pkl' % tuple(ind_range)
    else:
        det_name = 'retinanet_detections.pkl'
    det_file = os.path.join(output_dir, det_name)
    save_object(
        dict(boxes=boxes,
             scores=scores,
             classes=classes,
             ids=image_ids,
             cfg=cfg_yaml), det_file)
    logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))
    return boxes, scores, classes, image_ids
コード例 #13
0
ファイル: test_retinanet.py プロジェクト: ArsenLuca/Detectron
def test_retinanet_on_dataset(multi_gpu=False):
    """
    Main entry point for testing on a given dataset: whether multi_gpu or not
    """
    output_dir = get_output_dir(training=False)
    logger.info('Output will be saved to: {:s}'.format(os.path.abspath(output_dir)))

    dataset = JsonDataset(cfg.TEST.DATASET)
    # for test-dev or full test dataset, we generate detections for all images
    if 'test-dev' in cfg.TEST.DATASET or 'test' in cfg.TEST.DATASET:
        cfg.TEST.NUM_TEST_IMAGES = len(dataset.get_roidb())

    if multi_gpu:
        num_images = cfg.TEST.NUM_TEST_IMAGES
        boxes, scores, classes, image_ids = multi_gpu_test_retinanet_on_dataset(
            num_images, output_dir, dataset
        )
    else:
        boxes, scores, classes, image_ids = test_retinanet()

    # write RetinaNet detections pkl file to be used for various purposes
    # dump the boxes first just in case there are spurious failures
    res_file = os.path.join(output_dir, 'retinanet_detections.pkl')
    logger.info(
        'Writing roidb detections to file: {}'.
        format(os.path.abspath(res_file))
    )
    save_object(
        dict(boxes=boxes, scores=scores, classes=classes, ids=image_ids),
        res_file
    )
    logger.info('Wrote RetinaNet detections to {}'.format(os.path.abspath(res_file)))

    # Write the detections to a file that can be uploaded to coco evaluation server
    # which takes a json file format
    res_file = write_coco_detection_results(
        output_dir, dataset, boxes, scores, classes, image_ids)

    # Perform coco evaluation
    coco_eval = coco_evaluate(dataset, res_file, image_ids)

    box_results = task_evaluation._coco_eval_to_box_results(coco_eval)
    return OrderedDict([(dataset.name, box_results)])
コード例 #14
0
ファイル: test_engine.py プロジェクト: ArsenLuca/Detectron
def test_net_on_dataset(multi_gpu=False):
    """Run inference on a dataset."""
    output_dir = get_output_dir(training=False)
    dataset = JsonDataset(cfg.TEST.DATASET)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(
            num_images, output_dir
        )
    else:
        all_boxes, all_segms, all_keyps = test_net()
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))
    results = task_evaluation.evaluate_all(
        dataset, all_boxes, all_segms, all_keyps, output_dir
    )
    return results
コード例 #15
0
def test_retinanet_on_dataset(multi_gpu=False):
    """
    Main entry point for testing on a given dataset: whether multi_gpu or not
    """
    output_dir = get_output_dir(training=False)
    dataset = JsonDataset(cfg.TEST.DATASET)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        all_boxes = multi_gpu_test_retinanet_on_dataset(num_images, output_dir)
    else:
        all_boxes = test_retinanet()
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))
    results = task_evaluation.evaluate_all(
        dataset, all_boxes, None, None, output_dir
    )
    return results
コード例 #16
0
ファイル: train_net.py プロジェクト: ls200201020/Detectron
def create_model():
    """Build the model and look for saved model checkpoints in case we can
    resume from one.
    """
    logger = logging.getLogger(__name__)
    start_iter = 0
    checkpoints = {}
    output_dir = get_output_dir(cfg.TRAIN.DATASETS, training=True)
    weights_file = cfg.TRAIN.WEIGHTS
    if cfg.TRAIN.AUTO_RESUME:
        # Check for the final model (indicates training already finished)
        final_path = os.path.join(output_dir, 'model_final.pkl')
        if os.path.exists(final_path):
            logger.info('model_final.pkl exists; no need to train!')
            return None, None, None, {'final': final_path}, output_dir

        # Find the most recent checkpoint (highest iteration number)
        files = os.listdir(output_dir)
        for f in files:
            iter_string = re.findall(r'(?<=model_iter)\d+(?=\.pkl)', f)
            if len(iter_string) > 0:
                checkpoint_iter = int(iter_string[0])
                if checkpoint_iter > start_iter:
                    # Start one iteration immediately after the checkpoint iter
                    start_iter = checkpoint_iter + 1
                    resume_weights_file = f

        if start_iter > 0:
            # Override the initialization weights with the found checkpoint
            weights_file = os.path.join(output_dir, resume_weights_file)
            logger.info(
                '========> Resuming from checkpoint {} at start iter {}'.
                format(weights_file, start_iter)
            )

    logger.info('Building model: {}'.format(cfg.MODEL.TYPE))
    model = model_builder.create(cfg.MODEL.TYPE, train=True)
    if cfg.MEMONGER:
        optimize_memory(model)
    # Performs random weight initialization as defined by the model
    workspace.RunNetOnce(model.param_init_net)
    return model, weights_file, start_iter, checkpoints, output_dir
コード例 #17
0
ファイル: test_retinanet.py プロジェクト: ArsenLuca/Detectron
def test_retinanet(ind_range=None):
    """
    Test RetinaNet model either on the entire dataset or the subset of dataset
    specified by the index range
    """
    assert cfg.RETINANET.RETINANET_ON, \
        'RETINANET_ON must be set for testing RetinaNet model'
    output_dir = get_output_dir(training=False)
    dataset = JsonDataset(cfg.TEST.DATASET)
    im_list = dataset.get_roidb()
    if ind_range is not None:
        start, end = ind_range
        im_list = im_list[start:end]
        logger.info('Testing on roidb range: {}-{}'.format(start, end))
    else:
        # if testing over the whole dataset, use the NUM_TEST_IMAGES setting
        # the NUM_TEST_IMAGES could be over a small set of images for quick
        # debugging purposes
        im_list = im_list[0:cfg.TEST.NUM_TEST_IMAGES]

    model = model_builder.create(cfg.MODEL.TYPE, train=False)
    if cfg.TEST.WEIGHTS:
        nu.initialize_from_weights_file(
            model, cfg.TEST.WEIGHTS, broadcast=False
        )
    model_builder.add_inference_inputs(model)
    workspace.CreateNet(model.net)
    boxes, scores, classes, image_ids = im_list_detections(
        model, im_list[0:cfg.TEST.NUM_TEST_IMAGES])

    cfg_yaml = yaml.dump(cfg)
    if ind_range is not None:
        det_name = 'retinanet_detections_range_%s_%s.pkl' % tuple(ind_range)
    else:
        det_name = 'retinanet_detections.pkl'
    det_file = os.path.join(output_dir, det_name)
    save_object(
        dict(boxes=boxes, scores=scores, classes=classes, ids=image_ids, cfg=cfg_yaml),
        det_file)
    logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))
    return boxes, scores, classes, image_ids
コード例 #18
0
def test_retinanet_on_dataset(multi_gpu=False):
    """
    Main entry point for testing on a given dataset: whether multi_gpu or not
    """
    output_dir = get_output_dir(training=False)
    logger.info('Output will be saved to: {:s}'.format(
        os.path.abspath(output_dir)))

    dataset = JsonDataset(cfg.TEST.DATASET)
    # for test-dev or full test dataset, we generate detections for all images
    if 'test-dev' in cfg.TEST.DATASET or 'test' in cfg.TEST.DATASET:
        cfg.TEST.NUM_TEST_IMAGES = len(dataset.get_roidb())

    if multi_gpu:
        num_images = cfg.TEST.NUM_TEST_IMAGES
        boxes, scores, classes, image_ids = multi_gpu_test_retinanet_on_dataset(
            num_images, output_dir, dataset)
    else:
        boxes, scores, classes, image_ids = test_retinanet()

    # write RetinaNet detections pkl file to be used for various purposes
    # dump the boxes first just in case there are spurious failures
    res_file = os.path.join(output_dir, 'retinanet_detections.pkl')
    logger.info('Writing roidb detections to file: {}'.format(
        os.path.abspath(res_file)))
    save_object(
        dict(boxes=boxes, scores=scores, classes=classes, ids=image_ids),
        res_file)
    logger.info('Wrote RetinaNet detections to {}'.format(
        os.path.abspath(res_file)))

    # Write the detections to a file that can be uploaded to coco evaluation server
    # which takes a json file format
    res_file = write_coco_detection_results(output_dir, dataset, boxes, scores,
                                            classes, image_ids)

    # Perform coco evaluation
    coco_eval = coco_evaluate(dataset, res_file, image_ids)

    box_results = task_evaluation._coco_eval_to_box_results(coco_eval)
    return OrderedDict([(dataset.name, box_results)])
コード例 #19
0
ファイル: rpn_generator.py プロジェクト: ArsenLuca/Detectron
def generate_rpn_on_range(ind_range=None):
    """Run inference on all images in a dataset or over an index range of images
    in a dataset using a single GPU.
    """
    assert cfg.TEST.WEIGHTS != '', \
        'TEST.WEIGHTS must be set to the model file to test'
    assert cfg.TEST.DATASET != '', \
        'TEST.DATASET must be set to the dataset name to test'
    assert cfg.MODEL.RPN_ONLY or cfg.MODEL.FASTER_RCNN

    roidb, start_ind, end_ind, total_num_images = get_roidb(ind_range)
    output_dir = get_output_dir(training=False)
    logger.info(
        'Output will be saved to: {:s}'.format(os.path.abspath(output_dir))
    )

    model = model_builder.create(cfg.MODEL.TYPE, train=False)
    nu.initialize_from_weights_file(model, cfg.TEST.WEIGHTS)
    model_builder.add_inference_inputs(model)
    workspace.CreateNet(model.net)

    boxes, scores, ids = generate_proposals_on_roidb(
        model,
        roidb,
        start_ind=start_ind,
        end_ind=end_ind,
        total_num_images=total_num_images
    )

    cfg_yaml = yaml.dump(cfg)
    if ind_range is not None:
        rpn_name = 'rpn_proposals_range_%s_%s.pkl' % tuple(ind_range)
    else:
        rpn_name = 'rpn_proposals.pkl'
    rpn_file = os.path.join(output_dir, rpn_name)
    save_object(
        dict(boxes=boxes, scores=scores, ids=ids, cfg=cfg_yaml), rpn_file
    )
    logger.info('Wrote RPN proposals to {}'.format(os.path.abspath(rpn_file)))
    return boxes, scores, ids, rpn_file
コード例 #20
0
def test_net_on_dataset(multi_gpu=False):
    output_dir = get_output_dir(training=False)
    dataset = JsonDataset(cfg.TEST.DATASET)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(
            num_images, output_dir)
    else:
        all_boxes, all_segms, all_keyps = test_net()
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(
        test_timer.average_time))
    # Run tracking and eval for posetrack datasets
    if dataset.name.startswith('posetrack') or dataset.name.startswith('kinetics'):
        roidb, dataset, _, _, _ = get_roidb_and_dataset(None)
        run_posetrack_tracking(output_dir, roidb)
    try:
        evaluate_all(dataset, all_boxes, all_segms, all_keyps, output_dir)
    except Exception as e:
        # Typically would crash as we don't have evaluators for each dataset
        logger.error('Evaluation crashed with exception {}'.format(e))
コード例 #21
0
ファイル: rpn_generator.py プロジェクト: assafmus/Detectron
def generate_rpn_on_range(ind_range=None):
    """Run inference on all images in a dataset or over an index range of images
    in a dataset using a single GPU.
    """
    assert cfg.TEST.WEIGHTS != '', \
        'TEST.WEIGHTS must be set to the model file to test'
    assert cfg.TEST.DATASET != '', \
        'TEST.DATASET must be set to the dataset name to test'
    assert cfg.MODEL.RPN_ONLY or cfg.MODEL.FASTER_RCNN

    roidb, start_ind, end_ind, total_num_images = get_roidb(ind_range)
    output_dir = get_output_dir(training=False)
    logger.info('Output will be saved to: {:s}'.format(
        os.path.abspath(output_dir)))

    model = model_builder.create(cfg.MODEL.TYPE, train=False)
    nu.initialize_from_weights_file(model, cfg.TEST.WEIGHTS)
    model_builder.add_inference_inputs(model)
    workspace.CreateNet(model.net)

    boxes, scores, ids = generate_proposals_on_roidb(
        model,
        roidb,
        start_ind=start_ind,
        end_ind=end_ind,
        total_num_images=total_num_images)

    cfg_yaml = yaml.dump(cfg)
    if ind_range is not None:
        rpn_name = 'rpn_proposals_range_%s_%s.pkl' % tuple(ind_range)
    else:
        rpn_name = 'rpn_proposals.pkl'
    rpn_file = os.path.join(output_dir, rpn_name)
    save_object(dict(boxes=boxes, scores=scores, ids=ids, cfg=cfg_yaml),
                rpn_file)
    logger.info('Wrote RPN proposals to {}'.format(os.path.abspath(rpn_file)))
    return boxes, scores, ids, rpn_file
コード例 #22
0
def test_net_on_dataset(multi_gpu=False):
    output_dir = get_output_dir(training=False)
    dataset = JsonDataset(cfg.TEST.DATASET)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(
            num_images, output_dir)
    else:
        all_boxes, all_segms, all_keyps = test_net()
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(
        test_timer.average_time))
    # Run tracking and eval for posetrack datasets
    if dataset.name.startswith('posetrack') or dataset.name.startswith(
            'kinetics'):
        roidb, dataset, _, _, _ = get_roidb_and_dataset(None)
        run_posetrack_tracking(output_dir, roidb)
    try:
        evaluate_all(dataset, all_boxes, all_segms, all_keyps, output_dir)
    except Exception as e:
        # Typically would crash as we don't have evaluators for each dataset
        logger.error('Evaluation crashed with exception {}'.format(e))
コード例 #23
0
        out_path = osp.join(output_dir, out_name)
        gen_utils.mkdir_p(osp.dirname(out_path))
        #         cv2.imwrite(out_path, combined)
        ################ jianbo
        cv2.imwrite(out_path, pred)


################ jianbo

if __name__ == '__main__':
    args = _parse_args()
    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.opts is not None:
        cfg_from_list(args.opts)
    assert_and_infer_cfg()
    test_output_dir = get_output_dir(training=False)
    det_file = osp.join(test_output_dir, 'detections.pkl')
    tracking_det_file = osp.join(test_output_dir, 'detections_withTracks.pkl')
    if osp.exists(tracking_det_file):
        det_file = tracking_det_file
    output_dir = osp.join(test_output_dir, 'vis/')
    if not osp.exists(det_file):
        raise ValueError('Output file not found {}'.format(det_file))
    else:
        logger.info('Visualizing {}'.format(det_file))
    # Set include_gt True when using the roidb to evalute directly. Not doing
    # that currently
    roidb, dataset, _, _, _ = get_roidb_and_dataset(None, include_gt=True)
    vis(roidb, det_file, args.thresh, output_dir)
コード例 #24
0
ファイル: test_engine.py プロジェクト: xieshuqin/RefineNet
def test_net(ind_range=None):
    """Run inference on all images in a dataset or over an index range of images
    in a dataset using a single GPU.
    """
    assert cfg.TEST.WEIGHTS != '', \
        'TEST.WEIGHTS must be set to the model file to test'
    assert not cfg.MODEL.RPN_ONLY, \
        'Use rpn_generate to generate proposals from RPN-only models'
    assert cfg.TEST.DATASET != '', \
        'TEST.DATASET must be set to the dataset name to test'

    output_dir = get_output_dir(training=False)
    roidb, dataset, start_ind, end_ind, total_num_images = get_roidb_and_dataset(
        ind_range
    )
    model = initialize_model_from_cfg()
    num_images = len(roidb)
    num_classes = cfg.MODEL.NUM_CLASSES
    all_boxes, all_segms, all_keyps, \
    all_refined_segms, all_refined_keyps = \
        empty_results(num_classes, num_images)

    timers = defaultdict(Timer)
    for i, entry in enumerate(roidb):
        if cfg.MODEL.FASTER_RCNN:
            # Faster R-CNN type models generate proposals on-the-fly with an
            # in-network RPN
            box_proposals = None
        else:
            # The roidb may contain ground-truth rois (for example, if the roidb
            # comes from the training or val split). We only want to evaluate
            # detection on the *non*-ground-truth rois. We select only the rois
            # that have the gt_classes field set to 0, which means there's no
            # ground truth.
            box_proposals = entry['boxes'][entry['gt_classes'] == 0]
            if len(box_proposals) == 0:
                continue

        im = cv2.imread(entry['image'])
        with c2_utils.NamedCudaScope(0):
            cls_boxes_i, cls_segms_i, cls_keyps_i, \
            cls_refined_segms_i, cls_refined_keyps_i = \
                im_detect_all(model, im, box_proposals, timers)

        extend_results(i, all_boxes, cls_boxes_i)
        if cls_segms_i is not None:
            extend_results(i, all_segms, cls_segms_i)
        if cls_keyps_i is not None:
            extend_results(i, all_keyps, cls_keyps_i)
        if cls_refined_segms_i is not None:
            extend_results(i, all_refined_segms, cls_refined_segms_i)
        if cls_refined_keyps_i is not None:
            extend_results(i, all_refined_keyps, cls_refined_keyps_i)

        if i % 10 == 0:  # Reduce log file size
            ave_total_time = np.sum([t.average_time for t in timers.values()])
            eta_seconds = ave_total_time * (num_images - i - 1)
            eta = str(datetime.timedelta(seconds=int(eta_seconds)))
            det_time = (
                timers['im_detect_bbox'].average_time +
                timers['im_detect_mask'].average_time +
                timers['im_detect_keypoints'].average_time +
                timers['im_detect_refined_mask'].average_time +
                timers['im_detect_refined_keypoints'].average_time
            )
            misc_time = (
                timers['misc_bbox'].average_time +
                timers['misc_mask'].average_time +
                timers['misc_keypoints'].average_time +
                timers['misc_refined_mask'].average_time +
                timers['misc_refined_keypoints'].average_time
            )
            logger.info(
                (
                    'im_detect: range [{:d}, {:d}] of {:d}: '
                    '{:d}/{:d} {:.3f}s + {:.3f}s (eta: {})'
                ).format(
                    start_ind + 1, end_ind, total_num_images, start_ind + i + 1,
                    start_ind + num_images, det_time, misc_time, eta
                )
            )

        if cfg.VIS:
            im_name = os.path.splitext(os.path.basename(entry['image']))[0]
            # visualize local result
            vis_utils.vis_one_image(
                im[:, :, ::-1],
                '{:d}_{:s}'.format(i, im_name),
                os.path.join(output_dir, 'vis_local'),
                cls_boxes_i,
                segms=cls_segms_i,
                keypoints=cls_keyps_i,
                thresh=cfg.VIS_TH,
                box_alpha=0.8,
                dataset=dataset,
                show_class=True
            )
            # visualize refined result
            vis_utils.vis_one_image(
                im[:, :, ::-1],
                '{:d}_{:s}'.format(i, im_name),
                os.path.join(output_dir, 'vis_refined'),
                cls_boxes_i,
                segms=cls_refined_segms_i,
                keypoints=cls_refined_keyps_i,
                thresh=cfg.VIS_TH,
                box_alpha=0.8,
                dataset=dataset,
                show_class=True
            )

    cfg_yaml = yaml.dump(cfg)
    if ind_range is not None:
        det_name = 'detection_range_%s_%s.pkl' % tuple(ind_range)
    else:
        det_name = 'detections.pkl'
    det_file = os.path.join(output_dir, det_name)
    save_object(
        dict(
            all_boxes=all_boxes,
            all_segms=all_segms,
            all_keyps=all_keyps,
            all_refined_segms=all_refined_segms,
            all_refined_keyps=all_refined_keyps,
            cfg=cfg_yaml
        ), det_file
    )
    logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))
    return all_boxes, all_segms, all_keyps, all_refined_segms, all_refined_keyps
コード例 #25
0
def test_net(ind_range=None):
    assert cfg.TEST.WEIGHTS != '', \
        'TEST.WEIGHTS must be set to the model file to test'
    assert not cfg.MODEL.RPN_ONLY, \
        'Use rpn_generate to generate proposals from RPN-only models'
    assert cfg.TEST.DATASET != '', \
        'TEST.DATASET must be set to the dataset name to test'

    output_dir = get_output_dir(training=False)
    roidb, dataset, start_ind, end_ind, total_num_images = \
        get_roidb_and_dataset(ind_range)
    model = initialize_model_from_cfg()
    num_images = len(roidb)
    num_classes = cfg.MODEL.NUM_CLASSES
    all_boxes, all_segms, all_keyps = empty_results(num_classes, num_images)
    timers = defaultdict(Timer)
    gpu_dev = core.DeviceOption(caffe2_pb2.CUDA, cfg.ROOT_GPU_ID)
    name_scope = 'gpu_{}'.format(cfg.ROOT_GPU_ID)
    for i, entry in enumerate(roidb):
        if cfg.MODEL.FASTER_RCNN:
            box_proposals = None
        else:
            # The roidb may contain ground-truth rois (for example, if the roidb
            # comes from the training or val split). We only want to evaluate
            # detection on the *non*-ground-truth rois. We select only the rois
            # that have the gt_classes field set to 0, which means there's no
            # ground truth.
            box_proposals = entry['boxes'][entry['gt_classes'] == 0]
            if len(box_proposals) == 0:
                continue

        im = image_utils.read_image_video(entry)
        with core.NameScope(name_scope):
            with core.DeviceScope(gpu_dev):
                cls_boxes_i, cls_segms_i, cls_keyps_i = im_detect_all(
                    model, im, box_proposals, timers)

        extend_results(i, all_boxes, cls_boxes_i)
        if cls_segms_i is not None:
            extend_results(i, all_segms, cls_segms_i)
        if cls_keyps_i is not None:
            extend_results(i, all_keyps, cls_keyps_i)

        if i % 10 == 0:  # Reduce log file size
            ave_total_time = np.sum([t.average_time for t in timers.values()])
            eta_seconds = ave_total_time * (num_images - i - 1)
            eta = str(datetime.timedelta(seconds=int(eta_seconds)))
            det_time = (timers['im_detect_bbox'].average_time +
                        timers['im_detect_mask'].average_time +
                        timers['im_detect_keypoints'].average_time)
            misc_time = (timers['misc_bbox'].average_time +
                         timers['misc_mask'].average_time +
                         timers['misc_keypoints'].average_time)
            logger.info(('im_detect: range [{:d}, {:d}] of {:d}: '
                         '{:d}/{:d} {:.3f}s + {:.3f}s (eta: {})').format(
                             start_ind + 1, end_ind, total_num_images,
                             start_ind + i + 1, start_ind + num_images,
                             det_time, misc_time, eta))

        if cfg.VIS:
            im_name = os.path.splitext(os.path.basename(entry['image']))[0]
            vis_utils.vis_one_image(im[:, :, ::-1],
                                    '{:d}_{:s}'.format(i, im_name),
                                    os.path.join(output_dir, 'vis'),
                                    cls_boxes_i,
                                    segms=cls_segms_i,
                                    keypoints=cls_keyps_i,
                                    thresh=cfg.VIS_THR,
                                    box_alpha=0.8,
                                    dataset=dataset,
                                    show_class=True)

    cfg_yaml = yaml.dump(cfg)
    if ind_range is not None:
        det_name = 'detection_range_%s_%s.pkl' % tuple(ind_range)
    else:
        det_name = 'detections.pkl'
    det_file = os.path.join(output_dir, det_name)
    robust_pickle_dump(
        dict(all_boxes=all_boxes,
             all_segms=all_segms,
             all_keyps=all_keyps,
             cfg=cfg_yaml), det_file)
    logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))
    return all_boxes, all_segms, all_keyps
コード例 #26
0
ファイル: test_net.py プロジェクト: TPNguyen/DetectAndTrack
if __name__ == '__main__':
    workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
    args = parse_args()
    logger.info('Called with args:')
    logger.info(args)
    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.opts is not None:
        cfg_from_list(args.opts)
    assert_and_infer_cfg()
    logger.info('Testing with config:')
    logger.info(pprint.pformat(cfg))

    if not os.path.exists(cfg.TEST.WEIGHTS):
        # Check if there's a trained model stored in this directory
        output_dir = get_output_dir(training=True)
        train_ckpt_path = os.path.join(output_dir, 'model_final.pkl')
        if os.path.exists(train_ckpt_path):
            cfg.TEST.WEIGHTS = train_ckpt_path
        else:
            # Take the longest trained model so far
            potential_ckpts = [pth for pth in os.listdir(output_dir)
                               if pth.startswith('model_iter')]
            potential_ckpts_nums = [int(el[len('model_iter'):-len('.pkl')]) for
                                    el in potential_ckpts]
            if len(potential_ckpts_nums) > 0:
                cfg.TEST.WEIGHTS = os.path.join(
                    output_dir,
                    potential_ckpts[np.argmax(potential_ckpts_nums)])
        logger.info('No test weights specified but found the trained '
                    'model here {}. Using that for testing.'.format(
コード例 #27
0
ファイル: test_net.py プロジェクト: Myles-ZMY/REID
    return args


if __name__ == '__main__':
    args = parse_args()

    print('Called with args:')
    print(args)

    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)
    if args.exp_dir is not None:
        cfg.EXP_DIR = args.exp_dir

    cfg.GPU_ID = args.gpu_id

    print('Using config:')
    pprint.pprint(cfg)

    caffe.set_mode_gpu()
    caffe.set_device(args.gpu_id)

    output_dir_name = 'test'
    if args.datasets:
        output_dir_name += '_' + '_'.join(args.datasets)
    output_dir_name += '_' + datetime.datetime.now().strftime("%d_%m_%Y_%H_%M")
    output_dir = get_output_dir(output_dir_name, None)
    test_net(args.caffemodel, output_dir, args.datasets)
コード例 #28
0
ファイル: test_net.py プロジェクト: ksofiyuk/gml-nn-detector
    args = parser.parse_args()
    return args

if __name__ == '__main__':
    args = parse_args()

    print('Called with args:')
    print(args)

    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)
    if args.exp_dir is not None:
        cfg.EXP_DIR = args.exp_dir

    cfg.GPU_ID = args.gpu_id

    print('Using config:')
    pprint.pprint(cfg)

    caffe.set_mode_gpu()
    caffe.set_device(args.gpu_id)

    output_dir_name = 'test'
    if args.datasets:
        output_dir_name += '_' + '_'.join(args.datasets)
    output_dir_name += '_' + datetime.datetime.now().strftime("%d_%m_%Y_%H_%M")
    output_dir = get_output_dir(output_dir_name, None)
    test_net(args.caffemodel, output_dir, args.datasets)
コード例 #29
0
    cfg.GPU_ID = args.gpu_id

    print('Using config:')
    pprint.pprint(cfg)

    if not args.randomize:
        # fix the random seeds (numpy and caffe) for reproducibility
        np.random.seed(cfg.RNG_SEED)
        caffe.set_random_seed(cfg.RNG_SEED)

    # set up caffe
    caffe.set_mode_gpu()
    caffe.set_device(args.gpu_id)

    imdb, roidb = get_roidb(args.imdb_name)
    print '{:d} roidb entries'.format(len(roidb))
    print("num_classes", imdb.num_classes)
    print(imdb.roidb_num_bboxes_at(-1))
    print(roidb[0])
    sys.exit()

    output_dir = get_output_dir(imdb)
    print 'Output will be saved to `{:s}`'.format(output_dir)

    train_net(args.solver,
              roidb,
              output_dir,
              pretrained_model=args.pretrained_model,
              max_iters=args.max_iters)
コード例 #30
0
        im_name = entry['image']
        if isinstance(im_name, list):
            im_name = im_name[len(im_name) // 2]
        out_name = im_name[len(dataset.image_directory):]
        out_path = osp.join(output_dir, out_name)
        gen_utils.mkdir_p(osp.dirname(out_path))
        cv2.imwrite(out_path, combined)


if __name__ == '__main__':
    args = _parse_args()
    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.opts is not None:
        cfg_from_list(args.opts)
    assert_and_infer_cfg()
    test_output_dir = get_output_dir(training=False)
    det_file = osp.join(test_output_dir, 'detections.pkl')
    tracking_det_file = osp.join(test_output_dir, 'detections_withTracks.pkl')
    if osp.exists(tracking_det_file):
        det_file = tracking_det_file
    output_dir = osp.join(test_output_dir, 'vis/')
    if not osp.exists(det_file):
        raise ValueError('Output file not found {}'.format(det_file))
    else:
        logger.info('Visualizing {}'.format(det_file))
    # Set include_gt True when using the roidb to evalute directly. Not doing
    # that currently
    roidb, dataset, _, _, _ = get_roidb_and_dataset(None, include_gt=True)
    vis(roidb, det_file, args.thresh, output_dir)
コード例 #31
0
def test_net(ind_range=None):
    assert cfg.TEST.WEIGHTS != '', \
        'TEST.WEIGHTS must be set to the model file to test'
    assert not cfg.MODEL.RPN_ONLY, \
        'Use rpn_generate to generate proposals from RPN-only models'
    assert cfg.TEST.DATASET != '', \
        'TEST.DATASET must be set to the dataset name to test'

    output_dir = get_output_dir(training=False)
    roidb, dataset, start_ind, end_ind, total_num_images = \
        get_roidb_and_dataset(ind_range)
    model = initialize_model_from_cfg()
    num_images = len(roidb)
    num_classes = cfg.MODEL.NUM_CLASSES
    all_boxes, all_segms, all_keyps = empty_results(num_classes, num_images)
    timers = defaultdict(Timer)
    gpu_dev = core.DeviceOption(caffe2_pb2.CUDA, cfg.ROOT_GPU_ID)
    name_scope = 'gpu_{}'.format(cfg.ROOT_GPU_ID)
    for i, entry in enumerate(roidb):
        if cfg.MODEL.FASTER_RCNN:
            box_proposals = None
        else:
            # The roidb may contain ground-truth rois (for example, if the roidb
            # comes from the training or val split). We only want to evaluate
            # detection on the *non*-ground-truth rois. We select only the rois
            # that have the gt_classes field set to 0, which means there's no
            # ground truth.
            box_proposals = entry['boxes'][entry['gt_classes'] == 0]
            if len(box_proposals) == 0:
                continue

        im = image_utils.read_image_video(entry)
        with core.NameScope(name_scope):
            with core.DeviceScope(gpu_dev):
                cls_boxes_i, cls_segms_i, cls_keyps_i = im_detect_all(
                    model, im, box_proposals, timers)

        extend_results(i, all_boxes, cls_boxes_i)
        if cls_segms_i is not None:
            extend_results(i, all_segms, cls_segms_i)
        if cls_keyps_i is not None:
            extend_results(i, all_keyps, cls_keyps_i)

        if i % 10 == 0:  # Reduce log file size
            ave_total_time = np.sum([t.average_time for t in timers.values()])
            eta_seconds = ave_total_time * (num_images - i - 1)
            eta = str(datetime.timedelta(seconds=int(eta_seconds)))
            det_time = (timers['im_detect_bbox'].average_time +
                        timers['im_detect_mask'].average_time +
                        timers['im_detect_keypoints'].average_time)
            misc_time = (timers['misc_bbox'].average_time +
                         timers['misc_mask'].average_time +
                         timers['misc_keypoints'].average_time)
            logger.info(
                ('im_detect: range [{:d}, {:d}] of {:d}: '
                 '{:d}/{:d} {:.3f}s + {:.3f}s (eta: {})').format(
                    start_ind + 1, end_ind, total_num_images,
                    start_ind + i + 1, start_ind + num_images,
                    det_time, misc_time, eta))

        if cfg.VIS:
            im_name = os.path.splitext(os.path.basename(entry['image']))[0]
            vis_utils.vis_one_image(
                im[:, :, ::-1], '{:d}_{:s}'.format(i, im_name),
                os.path.join(output_dir, 'vis'), cls_boxes_i,
                segms=cls_segms_i, keypoints=cls_keyps_i,
                thresh=cfg.VIS_THR,
                box_alpha=0.8, dataset=dataset, show_class=True)

    cfg_yaml = yaml.dump(cfg)
    if ind_range is not None:
        det_name = 'detection_range_%s_%s.pkl' % tuple(ind_range)
    else:
        det_name = 'detections.pkl'
    det_file = os.path.join(output_dir, det_name)
    robust_pickle_dump(
        dict(all_boxes=all_boxes,
             all_segms=all_segms,
             all_keyps=all_keyps,
             cfg=cfg_yaml),
        det_file)
    logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))
    return all_boxes, all_segms, all_keyps
コード例 #32
0
if __name__ == '__main__':
    workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
    args = parse_args()
    logger.info('Called with args:')
    logger.info(args)
    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.opts is not None:
        cfg_from_list(args.opts)
    assert_and_infer_cfg()
    logger.info('Testing with config:')
    logger.info(pprint.pformat(cfg))

    if not os.path.exists(cfg.TEST.WEIGHTS):
        # Check if there's a trained model stored in this directory
        output_dir = get_output_dir(training=True)
        train_ckpt_path = os.path.join(output_dir, 'model_final.pkl')
        if os.path.exists(train_ckpt_path):
            cfg.TEST.WEIGHTS = train_ckpt_path
        else:
            # Take the longest trained model so far
            potential_ckpts = [
                pth for pth in os.listdir(output_dir)
                if pth.startswith('model_iter')
            ]
            potential_ckpts_nums = [
                int(el[len('model_iter'):-len('.pkl')])
                for el in potential_ckpts
            ]
            if len(potential_ckpts_nums) > 0:
                cfg.TEST.WEIGHTS = os.path.join(
コード例 #33
0
if __name__ == '__main__':
    args = parse_args()

    print('Called with args:')
    print(args)

    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)

    if args.exp_dir is not None:
        cfg.EXP_DIR = args.exp_dir

    cfg.GPU_ID = args.gpu_id

    print('Using config:')
    pprint.pprint(cfg)

    np.random.seed(cfg.RNG_SEED)
    caffe.set_random_seed(cfg.RNG_SEED)
    random.seed(cfg.RNG_SEED)

    # set up caffe
    caffe.set_mode_gpu()
    caffe.set_device(args.gpu_id)

    time_suffix = 'train_' + datetime.datetime.now().strftime("%d_%m_%Y_%H_%M")
    output_dir = get_output_dir(time_suffix, None)
    print('Output will be saved to `{:s}`'.format(output_dir))

    train_net(output_dir)
コード例 #34
0
ファイル: test.py プロジェクト: farrel15/metaDatasetGenerator
def test_net(net, imdb, max_per_image=100, thresh=1 / 80., vis=False):
    """Test a Fast R-CNN network on an image database."""
    num_images = len(imdb.image_index)
    # all detections are collected into:
    #    all_boxes[cls][image] = N x 5 array of detections in
    #    (x1, y1, x2, y2, score)
    all_boxes = [[[] for _ in xrange(num_images)]
                 for _ in xrange(imdb.num_classes)]

    output_dir = get_output_dir(imdb, net)

    # timers
    _t = {'im_detect': Timer(), 'misc': Timer()}

    if not cfg.TEST.OBJ_DET.HAS_RPN:
        roidb = imdb.roidb

    for i in xrange(num_images):
        # filter out any ground truth boxes
        if cfg.TEST.OBJ_DET.HAS_RPN:
            box_proposals = None
        else:
            # The roidb may contain ground-truth rois (for example, if the roidb
            # comes from the training or val split). We only want to evaluate
            # detection on the *non*-ground-truth rois. We select those the rois
            # that have the gt_classes field set to 0, which means there's no
            # ground truth.
            box_proposals = roidb[i]['boxes'][roidb[i]['gt_classes'] == 0]

        im = cv2.imread(imdb.image_path_at(i))
        _t['im_detect'].tic()
        scores, boxes = im_detect(net, im, box_proposals)
        _t['im_detect'].toc()

        _t['misc'].tic()
        # skip j = 0, because it's the background class
        for j in xrange(1, imdb.num_classes):
            inds = np.where(scores[:, j] > thresh)[0]
            cls_scores = scores[inds, j]
            cls_boxes = boxes[inds, j * 4:(j + 1) * 4]
            cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
                .astype(np.float32, copy=False)
            keep = nms(cls_dets, cfg.TEST.OBJ_DET.NMS)
            cls_dets = cls_dets[keep, :]
            if vis:
                vis_detections(im, imdb.classes[j], cls_dets)
            all_boxes[j][i] = cls_dets

        # Limit to max_per_image detections *over all classes*
        if max_per_image > 0:
            image_scores = np.hstack(
                [all_boxes[j][i][:, -1] for j in xrange(1, imdb.num_classes)])
            if len(image_scores) > max_per_image:
                image_thresh = np.sort(image_scores)[-max_per_image]
                for j in xrange(1, imdb.num_classes):
                    keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
                    all_boxes[j][i] = all_boxes[j][i][keep, :]
        _t['misc'].toc()

        print 'im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
              .format(i + 1, num_images, _t['im_detect'].average_time,
                      _t['misc'].average_time)

    det_file = os.path.join(output_dir, 'detections.pkl')
    print(det_file)
    with open(det_file, 'wb') as f:
        cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)

    print(len(all_boxes))
    for i in range(len(all_boxes)):
        n = 0
        for j in range(len(all_boxes[i])):
            n += len(all_boxes[i][j])
        print("{}: {}".format(i, n))

    #return all_boxes,output_dir
    print 'Evaluating detections'
    imdb.evaluate_detections(all_boxes, output_dir)
コード例 #35
0
def net_trainer():
    model, start_iter, checkpoints = create_model()
    if 'final' in checkpoints:
        return checkpoints

    add_model_inputs(model)

    if cfg.TRAIN.WEIGHTS:
        nu.initialize_gpu_0_from_weights_file(model, cfg.TRAIN.WEIGHTS)
    # Even if we're randomly initializing we still need to synchronize
    # parameters across GPUs
    nu.broadcast_parameters(model)
    workspace.CreateNet(model.net)

    output_dir = get_output_dir(training=True)
    logger.info('Outputs saved to: {:s}'.format(os.path.abspath(output_dir)))
    dump_proto_files(model, output_dir)
    json_out_file = os.path.join(output_dir, 'json_stats.log')

    # Start loading mini-batches and enqueuing blobs
    model.roi_data_loader.register_sigint_handler()
    # DEBUG data loading
    if cfg.DEBUG.DATA_LOADING:
        for _ in range(10000000):
            # this was with threading...
            # model.roi_data_loader._get_next_minibatch()
            model.roi_data_loader._get_next_minibatch2(
                model.roi_data_loader.shared_readonly_dict,
                model.roi_data_loader._lock,
                model.roi_data_loader.mp_cur,
                model.roi_data_loader.mp_perm)
        sys.exit(0)
    model.roi_data_loader.start(prefill=True)

    smoothed_values = {
        key: SmoothedValue(WIN_SZ) for key in model.losses + model.metrics}
    iter_values = {key: 0 for key in model.losses + model.metrics}
    total_loss = SmoothedValue(WIN_SZ)
    iter_time = SmoothedValue(WIN_SZ)
    mb_qsize = SmoothedValue(WIN_SZ)
    iter_timer = Timer()
    checkpoints = {}
    for i in range(start_iter, cfg.SOLVER.MAX_ITER):
        iter_timer.tic()
        lr = model.UpdateWorkspaceLr(i)
        workspace.RunNet(model.net.Proto().name)
        if i == start_iter:
            nu.print_net(model)
        iter_time.AddValue(iter_timer.toc(average=False))
        for k in iter_values.keys():
            if k in model.losses:
                iter_values[k] = nu.sum_multi_gpu_blob(k)
            else:
                iter_values[k] = nu.average_multi_gpu_blob(k)
        for k, v in smoothed_values.items():
            v.AddValue(iter_values[k])
        loss = np.sum(np.array([iter_values[k] for k in model.losses]))
        total_loss.AddValue(loss)
        mb_qsize.AddValue(model.roi_data_loader._minibatch_queue.qsize())

        if i % LOG_PERIOD == 0 or i == cfg.SOLVER.MAX_ITER - 1:
            eta_seconds = iter_timer.average_time * (cfg.SOLVER.MAX_ITER - i)
            eta = str(datetime.timedelta(seconds=int(eta_seconds)))
            mem_stats = c2_utils.GetGPUMemoryUsageStats()
            mem_usage = np.max(mem_stats['max_by_gpu'][:cfg.NUM_GPUS])
            stats = dict(
                iter=i,
                lr=float(lr),
                time=iter_timer.average_time,
                loss=total_loss.GetMedianValue(),
                eta=eta,
                mb_qsize=int(np.round(mb_qsize.GetMedianValue())),
                mem=int(np.ceil(mem_usage / 1024 / 1024)))
            for k, v in smoothed_values.items():
                stats[k] = v.GetMedianValue()
            log_json_stats(stats, json_out_file=json_out_file)
        if cfg.DEBUG.STOP_TRAIN_ITER:
            import pdb
            pdb.set_trace()

        if ((i + 1) % int(cfg.TRAIN.SNAPSHOT_ITERS / cfg.NUM_GPUS) == 0 and
                i > start_iter):
            checkpoints[i] = os.path.join(
                output_dir, 'model_iter{}.pkl'.format(i))
            nu.save_model_to_weights_file(checkpoints[i], model)

        if i == start_iter + LOG_PERIOD:
            # Reset the iter timer after the first LOG_PERIOD iterations to
            # discard initial iterations that have outlier timings
            iter_timer.reset()

        if np.isnan(loss):
            logger.critical('Loss is NaN, exiting...')
            os._exit(0)  # FB: use code 0 to avoid flow retries

    # Save the final model
    checkpoints['final'] = os.path.join(output_dir, 'model_final.pkl')
    nu.save_model_to_weights_file(checkpoints['final'], model)
    # Shutdown data loading threads
    model.roi_data_loader.shutdown()
    return checkpoints
コード例 #36
0
ファイル: test.py プロジェクト: gauenk/metaDatasetGenerator
def test_net(net,
             imdb,
             max_per_image=100,
             thresh=1 / 80.,
             vis=False,
             al_net=None):
    """Test a Fast R-CNN network on an image database."""
    roidb = imdb.roidb
    """
    TODO: the image id's don't align because we load in the entire image -- not a cropped image.
    We need to load cropped images.
    """

    num_images = len(imdb.image_index)
    # all detections are collected into:
    #    all_boxes[cls][image] = N x 5 array of detections in
    #    (x1, y1, x2, y2, score)
    ds_av = {}
    if cfg.TASK == 'object_detection':
        all_boxes = [[[] for _ in xrange(num_images)]
                     for _ in xrange(imdb.num_classes)]
        all_items = all_boxes
    elif cfg.TASK == 'classification':
        all_probs = [[-1 for _ in xrange(num_images)]
                     for _ in xrange(imdb.num_classes)]
        all_items = all_probs

    output_dir = get_output_dir(imdb, net)

    # timers
    _t = {'im_detect': Timer(), 'misc': Timer()}

    # information to generate Active Learning Report
    fidAlReport = None
    pctErrorRed = None
    if cfg.ACTIVE_LEARNING.REPORT:
        pctErrorRed = openAlResultsCsv()
        fidAlReport = startAlReport(imdb, net)

    im_rotates_all = dict.fromkeys(imdb.image_index)

    print("num_images: {}".format(num_images))
    for i in xrange(num_images):
        # filter out any ground truth boxes
        if cfg.TEST.OBJ_DET.HAS_RPN or cfg.TASK != 'object_detection':
            box_proposals = None

        # always send in ['boxes'][0] since we want the first box in the index; we are flattened if we use 'boxes'
        im, isImBlob = loadImage(imdb, imdb.image_path_at(i),
                                 imdb.image_index[i],
                                 imdb.roidb[i]['boxes'][0], al_net, i)
        # save_blob_list_to_file(im,None,vis=True)

        _t['im_detect'].tic()
        scores, boxes, im_rotates, activity_vectors = im_detect(net, im, box_proposals,\
                                                                imdb.image_index[i],isImBlob=isImBlob)
        # print("image id: {}".format(imdb.image_index[i]))
        # print_net_activiation_data(net,["data","conv1_2","rpn_cls_prob_reshape","rois"])

        if cfg._DEBUG.core.test: print(imdb.image_index[i])
        _t['im_detect'].toc()

        _t['misc'].tic()
        # print("boxes.shape",boxes.shape)
        # sys.exit()

        # skip j = 0, because it's the background class
        im_rotates_all[imdb.image_index_at(i)] = im_rotates

        if len(cfg.SAVE_ACTIVITY_VECTOR_BLOBS) > 0:
            aggregateAV(ds_av, activity_vectors, imdb.image_index[i])
        if cfg.TASK == 'object_detection':
            aggregateDetections(imdb, scores, boxes, all_items, thresh, i, im,
                                vis, max_per_image)
        elif cfg.TASK == 'classification':
            aggregateClassification(imdb, scores, all_items, i)

        if cfg.ACTIVE_LEARNING.REPORT:
            recordImageForAlReport(imdb, scores, activity_vectors, fidAlReport,
                                   i, pctErrorRed)

        _t['misc'].toc()
        print 'im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
              .format(i + 1, num_images, _t['im_detect'].average_time,
                      _t['misc'].average_time)

    if cfg.SAVE_ACTIVITY_VECTOR_BLOBS:
        dirn = cfg.GET_SAVE_ACTIVITY_VECTOR_BLOBS_DIR()
        print("activity vectors saved @")
        for blob_name, av in ds_av.items():
            fn = os.path.join(dirn, "{}.pkl".format(blob_name))
            print(fn)
            with open(fn, 'wb') as f:
                cPickle.dump(av, f, cPickle.HIGHEST_PROTOCOL)

    save_dict = {}
    if cfg.TASK == 'object_detection':
        save_dict["all_boxes"] = all_items
        save_dict["im_rotates_all"] = im_rotates_all
        det_file = os.path.join(output_dir, 'detections.pkl')
    elif cfg.TASK == 'classification':
        save_dict["all_probs"] = all_items
        save_dict["im_rotates_all"] = im_rotates_all
        det_file = os.path.join(output_dir, 'probs.pkl')

    with open(det_file, 'wb') as f:
        cPickle.dump(save_dict, f, cPickle.HIGHEST_PROTOCOL)

    print 'Evaluating detections'
    imdb.evaluate_detections(save_dict, output_dir)