コード例 #1
0
def build_dataset(dataset_list, is_train=True, local_rank=0):
    if not isinstance(dataset_list, (list, tuple)):
        raise RuntimeError(
            "dataset_list should be a list of strings, got {}".format(
                dataset_list))
    for dataset_name in dataset_list:
        assert contains(dataset_name), 'Unknown dataset name: {}'.format(
            dataset_name)
        assert os.path.exists(
            get_im_dir(dataset_name)), 'Im dir \'{}\' not found'.format(
                get_im_dir(dataset_name))
        logging_rank('Creating: {}'.format(dataset_name),
                     local_rank=local_rank)

    transforms = build_transforms(is_train)
    datasets = []
    for dataset_name in dataset_list:
        args = {}
        args['root'] = get_im_dir(dataset_name)
        args['ann_file'] = get_ann_fn(dataset_name)
        args['remove_images_without_annotations'] = is_train
        ann_types = ('bbox', )
        if cfg.MODEL.MASK_ON:
            ann_types = ann_types + ('segm', )
        if cfg.MODEL.KEYPOINT_ON:
            ann_types = ann_types + ('keypoints', )
        if cfg.MODEL.PARSING_ON:
            ann_types = ann_types + ('parsing', )
        if cfg.MODEL.UV_ON:
            ann_types = ann_types + ('uv', )
        args['ann_types'] = ann_types
        args['transforms'] = transforms
        # make dataset from factory
        dataset = D.COCODataset(**args)
        datasets.append(dataset)

    # for training, concatenate all datasets into a single one
    dataset = datasets[0]
    if len(datasets) > 1:
        dataset = D.ConcatDataset(datasets)

    return dataset
コード例 #2
0
def evaluation(dataset,
               all_boxes,
               all_segms,
               all_parss,
               all_pscores,
               clean_up=True):
    output_folder = os.path.join(cfg.CKPT, 'test')
    expected_results = ()
    expected_results_sigma_tol = 4

    coco_results = {}
    iou_types = ("bbox", )
    coco_results["bbox"] = all_boxes
    if cfg.MODEL.MASK_ON:
        iou_types = iou_types + ("segm", )
        coco_results["segm"] = all_segms
    if cfg.MODEL.PARSING_ON:
        iou_types = iou_types + ("parsing", )
        coco_results['parsing'] = [all_parss, all_pscores]

    results = COCOResults(*iou_types)
    logging_rank("Evaluating predictions", local_rank=0)
    for iou_type in iou_types:
        if iou_type == "parsing":
            eval_ap = cfg.PRCNN.EVAL_AP
            num_parsing = cfg.PRCNN.NUM_PARSING
            assert len(
                cfg.TEST.DATASETS) == 1, 'Parsing only support one dataset now'
            im_dir = dataset_catalog.get_im_dir(cfg.TEST.DATASETS[0])
            ann_fn = dataset_catalog.get_ann_fn(cfg.TEST.DATASETS[0])
            res = evaluate_parsing(coco_results[iou_type], eval_ap,
                                   cfg.PRCNN.SCORE_THRESH, num_parsing, im_dir,
                                   ann_fn, output_folder)
            results.update_parsing(res)
        else:
            with tempfile.NamedTemporaryFile() as f:
                file_path = f.name
                if output_folder:
                    file_path = os.path.join(output_folder, iou_type + ".json")
                res = evaluate_predictions_on_coco(dataset.coco,
                                                   coco_results[iou_type],
                                                   file_path, iou_type)
                results.update(res)
    logging_rank(results, local_rank=0)
    check_expected_results(results, expected_results,
                           expected_results_sigma_tol)
    if output_folder:
        torch.save(results, os.path.join(output_folder, "coco_results.pth"))
        if clean_up:
            shutil.rmtree(output_folder)
    return results, coco_results