Ejemplo n.º 1
0
 def __init__(self, name):
     assert dataset_catalog.contains(name), \
         'Unknown dataset name: {}'.format(name)
     assert os.path.exists(dataset_catalog.get_im_dir(name)), \
         'Im dir \'{}\' not found'.format(dataset_catalog.get_im_dir(name))
     assert os.path.exists(dataset_catalog.get_ann_fn(name)), \
         'Ann fn \'{}\' not found'.format(dataset_catalog.get_ann_fn(name))
     logger.debug('Creating: {}'.format(name))
     self.name = name
     self.image_directory = dataset_catalog.get_im_dir(name)
     self.image_prefix = dataset_catalog.get_im_prefix(name)
     self.COCO = COCO(dataset_catalog.get_ann_fn(name))
     self.debug_timer = Timer()
     # Set up dataset classes
     category_ids = self.COCO.getCatIds()
     categories = [c['name'] for c in self.COCO.loadCats(category_ids)]
     self.category_to_id_map = dict(zip(categories, category_ids))
     self.classes = ['__background__'] + categories
     self.num_classes = len(self.classes)
     self.json_category_id_to_contiguous_id = {
         v: i + 1
         for i, v in enumerate(self.COCO.getCatIds())
     }
     self.contiguous_category_id_to_json_id = {
         v: k
         for k, v in self.json_category_id_to_contiguous_id.items()
     }
     self._init_keypoints()
Ejemplo n.º 2
0
 def __init__(self, name):
     assert dataset_catalog.contains(name), \
         'Unknown dataset name: {}'.format(name)
     assert os.path.exists(dataset_catalog.get_im_dir(name)), \
         'Im dir \'{}\' not found'.format(dataset_catalog.get_im_dir(name))
     assert os.path.exists(dataset_catalog.get_ann_fn(name)), \
         'Ann fn \'{}\' not found'.format(dataset_catalog.get_ann_fn(name))
     logger.debug('Creating: {}'.format(name))
     self.name = name
     self.image_directory = dataset_catalog.get_im_dir(name)
     self.image_prefix = dataset_catalog.get_im_prefix(name)
     self.COCO = COCO(dataset_catalog.get_ann_fn(name))
     self.debug_timer = Timer()
     # Set up dataset classes
     category_ids = self.COCO.getCatIds()
     categories = [c['name'] for c in self.COCO.loadCats(category_ids)]
     self.category_to_id_map = dict(zip(categories, category_ids))
     self.classes = ['__background__'] + categories
     self.num_classes = len(self.classes)
     self.json_category_id_to_contiguous_id = {
         v: i + 1
         for i, v in enumerate(self.COCO.getCatIds())
     }
     self.contiguous_category_id_to_json_id = {
         v: k
         for k, v in self.json_category_id_to_contiguous_id.items()
     }
     self._init_keypoints()
Ejemplo n.º 3
0
def get_gt(parsing_COCO):
    assert len(cfg.TEST.DATASETS) == 1, \
        'Parsing only support one dataset now'
    name = cfg.TEST.DATASETS[0]
    _json_path = dataset_catalog.get_ann_fn(name)

    class_recs = []
    npos = 0
    image_ids = parsing_COCO.getImgIds()
    image_ids.sort()

    for image_id in image_ids:
        # imagename = parsing_COCO.loadImgs(image_id)[0]['file_name']
        ann_ids = parsing_COCO.getAnnIds(imgIds=image_id, iscrowd=None)
        objs = parsing_COCO.loadAnns(ann_ids)
        # gt_box = []
        anno_adds = []
        for obj in objs:
            if 'dp_masks' in obj:
                # gt_box.append(obj['bbox'])
                parsing_path = (obj['id'])
                anno_adds.append(parsing_path)
                npos = npos + 1

        det = [False] * len(anno_adds)
        # class_recs.append({'gt_box': np.array(gt_box),
        #                    'anno_adds': anno_adds,
        #                    'det': det})
        class_recs.append({'anno_adds': anno_adds, 'det': det})
    # class_recs shape: num_images,
    return class_recs, npos
def _do_matlab_tracking_eval(json_datasets, res_files, output_dir):
    import subprocess

    json_files = [
        get_ann_fn(json_dataset.name) for json_dataset in json_datasets
    ]

    #TODO possible to format this easier?
    json_files_string = ("[string(\'" + "\'),string(\'".join(json_files) +
                         "\')]")
    res_files_string = ("[string(\'" + "\'),string(\'".join(res_files) +
                        "\')]")

    logger.info(
        '------------------------------------------------------------------')
    logger.info(
        'Computing tracking results with the mobilityaids MATLAB eval code.')
    logger.info(
        '------------------------------------------------------------------')

    path = os.path.join(cfg.ROOT_DIR, 'detectron', 'datasets', 'mobility_aids',
                        'matlab_eval')
    cmd = 'cd {} && '.format(path)
    cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)
    cmd += '-r "dbstop if error; '
    cmd += 'mobilityaids_eval({:s},{:s},\'{:s}\',true); quit;"' \
       .format(json_files_string, res_files_string, output_dir)
    logger.info('Running:\n{}'.format(cmd))
    subprocess.call(cmd, shell=True)
Ejemplo n.º 5
0
def convert(json_file, output_dir):
    print('Reading: {}'.format(json_file))
    with open(json_file, 'r') as fid:
        dt = json.load(fid)
    print('done!')

    test_image_info = get_ann_fn('coco_2017_test')
    with open(test_image_info, 'r') as fid:
        info_test = json.load(fid)
    image_test = info_test['images']
    image_test_id = [i['id'] for i in image_test]
    print('{} has {} images'.format(test_image_info, len(image_test_id)))

    test_dev_image_info = get_ann_fn('coco_2017_test-dev')
    with open(test_dev_image_info, 'r') as fid:
        info_testdev = json.load(fid)
    image_testdev = info_testdev['images']
    image_testdev_id = [i['id'] for i in image_testdev]
    print('{} has {} images'.format(test_dev_image_info,
                                    len(image_testdev_id)))

    dt_testdev = []
    print('Filtering test-dev from test...')
    t = Timer()
    t.tic()
    for i in range(len(dt)):
        if i % 1000 == 0:
            print('{}/{}'.format(i, len(dt)))
        if dt[i]['image_id'] in image_testdev_id:
            dt_testdev.append(dt[i])
    print('Done filtering ({:2}s)!'.format(t.toc()))

    filename, file_extension = os.path.splitext(os.path.basename(json_file))
    filename = filename + '_test-dev'
    filename = os.path.join(output_dir, filename + file_extension)
    with open(filename, 'w') as fid:
        info_test = json.dump(dt_testdev, fid)
    print('Done writing: {}!'.format(filename))
Ejemplo n.º 6
0
def convert(json_file, output_dir):
    print('Reading: {}'.format(json_file))
    with open(json_file, 'r') as fid:
        dt = json.load(fid)
    print('done!')

    test_image_info = get_ann_fn('coco_2017_test')
    with open(test_image_info, 'r') as fid:
        info_test = json.load(fid)
    image_test = info_test['images']
    image_test_id = [i['id'] for i in image_test]
    print('{} has {} images'.format(test_image_info, len(image_test_id)))

    test_dev_image_info = get_ann_fn('coco_2017_test-dev')
    with open(test_dev_image_info, 'r') as fid:
        info_testdev = json.load(fid)
    image_testdev = info_testdev['images']
    image_testdev_id = [i['id'] for i in image_testdev]
    print('{} has {} images'.format(test_dev_image_info, len(image_testdev_id)))

    dt_testdev = []
    print('Filtering test-dev from test...')
    t = Timer()
    t.tic()
    for i in range(len(dt)):
        if i % 1000 == 0:
            print('{}/{}'.format(i, len(dt)))
        if dt[i]['image_id'] in image_testdev_id:
            dt_testdev.append(dt[i])
    print('Done filtering ({:2}s)!'.format(t.toc()))

    filename, file_extension = os.path.splitext(os.path.basename(json_file))
    filename = filename + '_test-dev'
    filename = os.path.join(output_dir, filename + file_extension)
    with open(filename, 'w') as fid:
        info_test = json.dump(dt_testdev, fid)
    print('Done writing: {}!'.format(filename))
Ejemplo n.º 7
0
def get_parsing():
    name = cfg.TEST.DATASETS[0]
    if name.split('_')[0] in ['CIHP', 'MHP']:
        _json_path = dataset_catalog.get_ann_fn(name)
        with open(_json_path, 'r') as f:
            _json = json.load(f)
        parsing_name = _json['categories'][0]['humanparts']
    else:
        parsing_name = [
            'Torso', 'Right Hand', 'Left Hand', 'Left Foot', 'Right Foot',
            'Upper Leg Right', 'Upper Leg Left', 'Lower Leg Right',
            'Lower Leg Left', 'Upper Arm Left', 'Upper Arm Right',
            'Lower Arm Left', 'Lower Arm Right', 'Head'
        ]
    parsing_name.insert(0, '__background__')
    return parsing_name
def load_data(dataset_name, image_id_list=None):
    imgs = []
    annotation_file = dataset_catalog.get_ann_fn(dataset_name)
    dataset = json.load(open(annotation_file, 'r'))
    for img in dataset['images']:
        imgs.append(img)
    output_dir = get_output_dir(cfg.TRAIN.DATASETS, training=True)
    file_name = os.path.join(output_dir, 'train_init.json')
    select = []
    image_id_set = set()
    for i in image_id_list:
        image_id_set.add(int(i))
    for i in range(0, len(imgs)):
        if (imgs[i]['id'] in image_id_set):
            select.append(imgs[i])
    dataset['images'] = select
    with open(file_name, 'wt') as f:
        json.dump(dataset, f)
    return dataset
Ejemplo n.º 9
0
def evaluate_parsing(dataset, all_boxes, all_parss, output_dir):
    logger.info('Evaluating parsing')
    pkl_temp = glob.glob(os.path.join(output_dir, '*.pkl'))
    '''
    for pkl in pkl_temp:
        os.remove(pkl)
    '''
    dataset_name = cfg.TEST.DATASETS[0]
    _json_path = dataset_catalog.get_ann_fn(dataset_name)
    parsing_COCO = COCO(_json_path)
    filename2imgid(parsing_COCO)
    parsing_result = _empty_parsing_results()
    if dataset.name.find('test') > -1:
        return OrderedDict([(dataset.name, parsing_result)])
    predict_dir = os.path.join(output_dir, 'parsing_predict')
    assert os.path.exists(predict_dir), \
        'predict dir \'{}\' not found'.format(predict_dir)
    if True:
        _iou, _miou, _miou_s, _miou_m, _miou_l \
            = parsing_utils.parsing_iou(dataset, predict_dir, parsing_COCO)

        parsing_result['parsing']['mIoU'] = _miou
        parsing_result['parsing']['mIoUs'] = _miou_s
        parsing_result['parsing']['mIoUm'] = _miou_m
        parsing_result['parsing']['mIoUl'] = _miou_l

        parsing_name = parsing_utils.get_parsing()
        logger.info('IoU for each category:')
        assert len(parsing_name) == len(_iou), \
            '{} VS {}'.format(str(len(parsing_name)), str(len(_iou)))

        for i, iou in enumerate(_iou):
            print(' {:<30}:  {:.2f}'.format(parsing_name[i], 100 * iou))

        print('----------------------------------------')
        print(' {:<30}:  {:.2f}'.format('mean IoU', 100 * _miou))
        print(' {:<30}:  {:.2f}'.format('mean IoU small', 100 * _miou_s))
        print(' {:<30}:  {:.2f}'.format('mean IoU medium', 100 * _miou_m))
        print(' {:<30}:  {:.2f}'.format('mean IoU large', 100 * _miou_l))

    if True:
        all_ap_p, all_pcp = parsing_utils.eval_seg_ap(dataset, all_boxes[1], all_parss[1], parsing_COCO)
        ap_p_vol = np.mean(all_ap_p)

        logger.info('~~~~ Summary metrics ~~~~')
        print(' Average Precision based on part (APp)               @[mIoU=0.10:0.90 ] = {:.3f}'
            .format(ap_p_vol)
        )
        print(' Average Precision based on part (APp)               @[mIoU=0.10      ] = {:.3f}'
            .format(all_ap_p[0])
        )
        print(' Average Precision based on part (APp)               @[mIoU=0.30      ] = {:.3f}'
            .format(all_ap_p[2])
        )
        print(' Average Precision based on part (APp)               @[mIoU=0.50      ] = {:.3f}'
            .format(all_ap_p[4])
        )
        print(' Average Precision based on part (APp)               @[mIoU=0.70      ] = {:.3f}'
            .format(all_ap_p[6])
        )
        print(' Average Precision based on part (APp)               @[mIoU=0.90      ] = {:.3f}'
            .format(all_ap_p[8])
        )
        print(' Percentage of Correctly parsed semantic Parts (PCP) @[mIoU=0.50      ] = {:.3f}'
            .format(all_pcp[4])
        )
        parsing_result['parsing']['APp50'] = all_ap_p[4]
        parsing_result['parsing']['APpvol'] = ap_p_vol
        parsing_result['parsing']['PCP'] = all_pcp[4]


    return OrderedDict([(dataset.name, parsing_result)])
Ejemplo n.º 10
0
dataset_name = 'voc_2007'
dataset_split = 'test'
root_dir = osp.join(home, 'Dataset')

pkl_path = sys.argv[1]
with open(pkl_path, 'r') as f:
    detections = pickle.load(f)
all_boxes = detections['all_boxes']
print(detections.keys())
print(len(all_boxes))
print(len(all_boxes[1]))

num_classes = len(all_boxes) - 1
num_images = len(all_boxes[1])

ann_fn = dataset_catalog.get_ann_fn(dataset_name + '_' + dataset_split)
with open(ann_fn, 'r') as f:
    json_data = json.load(f)
print(json_data.keys())
print(len(json_data['annotations']))

for i in range(0, len(json_data['images'])):
    print(json_data['images'][i])
    break

for i in range(0, len(json_data['annotations'])):
    print(json_data['annotations'][i])
    break

anns = []
ann_id = 1
Ejemplo n.º 11
0
    parser.add_argument('--dataset_name',
                        dest='dataset_name',
                        help='Dataset name according to dataset_catalog',
                        default='nucoco_train')

    args = parser.parse_args()
    return args


if __name__ == '__main__':
    args = parse_args()
    # fig = plt.figure(figsize=(16, 6))

    # Load the nucoco dataset
    dataset_name = args.dataset_name
    ann_file = dataset_catalog.get_ann_fn(dataset_name)
    img_dir = dataset_catalog.get_im_dir(dataset_name)
    coco = COCO_PLUS(ann_file, img_dir)

    # Load the proposals
    proposals = rrpn_loader(args.proposals_file)

    for i in range(1, len(coco.dataset['images']), 10):
        fig = plt.figure(figsize=(16, 6))
        img_id = coco.dataset['images'][i]['id']
        scores = proposals[img_id]['scores']
        boxes = proposals[img_id]['boxes']
        points = coco.imgToPointcloud[img_id]['points']

        img_path = os.path.join(img_dir, coco.imgs[img_id]["file_name"])
        # print(img_path)