コード例 #1
0
def evaluateByJsp(resJsp,
                  gtJsp,
                  log=True,
                  method=None,
                  levels=['averaged', 'easy', 'medium', 'hard']):
    if method is None:
        method = basename(dirname(dirname(dirname(resJsp))))


#        method=basename(dirname(resJsp))
    resTable = defaultdict(lambda: {})

    resJs = loadjson(resJsp)
    gtJs = loadjson(gtJsp)

    if 'averaged' in levels:
        level = 'averaged'
        row = evaluateByJson(
            resJs,
            gtJs,
        )

        row['method'] = method
        row['level'] = level
        resTable[level] = dict(row)
        tree - row
    for level in filter(lambda x: x in levels, ['easy', 'medium', 'hard']):
        coco = loadjson(gtJsp)
        coco['images'] = [d for d in coco['images'] if d['level'] == level]
        imgIds = [d['id'] for d in coco['images']]
        coco['annotations'] = [
            bb for bb in coco['annotations'] if bb['image_id'] in imgIds
        ]
        resJs = loadjson(resJsp)
        row = evaluateByJson(
            resJs,
            coco,
        )

        row['method'] = method
        row['level'] = level
        resTable[level] = dict(row)
        tree - row

    resdir = dirname(resJsp)
    savejson(resTable, pathjoin(resdir, 'resTable.json'))
    return resTable
コード例 #2
0
def cover2rpc(sourceFile, targetFile, reLevel=False):

    from boxx import openwrite, openread, df2dicts, zip2
    sjs = boxx.loadjson(sourceFile)
    tjs = boxx.loadjson(targetFile)

    openwrite(openread(sourceFile), sourceFile + '.before.cover2rpc.bak')
    if reLevel:
        imgdf = pd.DataFrame(sjs["images"])
        anndf = pd.DataFrame(sjs["annotations"])

        imgid2len = anndf.groupby('image_id').apply(lambda sdf: [
            len(sdf),
            len(set(sdf.category_id)),
        ])

        imgdf['instance_num'] = imgdf.id.apply(lambda i: imgid2len.loc[i][0])
        imgdf['class_num'] = imgdf.id.apply(lambda i: imgid2len.loc[i][1])
        imgdf = imgdf.sort_values(['instance_num'])
        n = len(imgdf)

        indexDic = dict(
            zip2(range(n // 3), ['easy'] * n) +
            zip2(range(n // 3, 2 * n // 3), ['medium'] * n) +
            zip2(range(2 * n // 3, n), ['hard'] * n))
        imgdf['_ind'] = range(n)
        imgdf['level'] = imgdf._ind.apply(lambda i: indexDic[i])
        imgdf.pop('_ind')
        #    imgdf.iloc[:n//3].level = 'easy'
        #    imgdf.iloc[n//3:n*2//3].level = 'medium'
        #    imgdf.iloc[n*2//3:].level = 'hard'
        assert all(imgdf.iloc[n * 2 // 3:].level == 'hard'), imgdf.iloc[-1]

        imgdf.pop('instance_num')
        imgdf.pop('class_num')
        dicts = df2dicts(imgdf)
        tjs['images'] = dicts
    else:
        tjs['images'] = sjs["images"]
    tjs['annotations'] = sjs["annotations"]
    return boxx.savejson(tjs, sourceFile)
def get_json_items_df(json_file_path):

    json = boxx.loadjson(json_file_path)

    json_items_df = pd.DataFrame(json['__raw_Chinese_name_df'])

    json_items_df = json_items_df[[
        'sku_name', 'category_id', 'sku_class', 'code', 'shelf', 'num', 'name',
        'clas', 'known', 'ind'
    ]]

    return json_items_df
def get_json_images_df(json_file_path):

    json = boxx.loadjson(json_file_path)

    json_images_df = pd.DataFrame(json['images'])

    json_images_df = json_images_df.rename(columns={
        'height': 'image_height',
        'width': 'image_width',
        'id': 'image_id'
    })

    json_images_df = json_images_df[[
        'file_name', 'image_width', 'image_height', 'image_id'
    ]]

    return json_images_df
def get_json_annotations_df(json_file_path):

    json = boxx.loadjson(json_file_path)

    json_annotations_df = pd.DataFrame(json['annotations'])

    json_annotations_df = json_annotations_df.rename(
        columns={
            'area': 'image_area',
            'bbox': 'image_bbox',
            'point_xy': 'image_point_xy',
            'segmentation': 'image_segmentation',
            'iscrowd': 'is_crowded'
        })

    json_annotations_df = json_annotations_df[[
        'image_id', 'image_area', 'image_bbox', 'image_point_xy',
        'image_segmentation', 'is_crowded', 'category_id'
    ]]

    return json_annotations_df
コード例 #6
0
print(resJsp)
thre = .784321  # mix 11
thre = 0.84  # testAsTrain
diff = 'easy'
#diff = 'medium'
#diff = 'hard'
diff = 'all'

getWrong = True
#getWrong = False

xuanMai = [i + 1 for i in [135, 136, 137]]

if 1:
    valJsp = '../checkout_submission_tools/tmp_file_json/instances_test2017.json'
    coco = loadjson(valJsp)

    imgds = coco['images']
    imgdf = pd.DataFrame(imgds)
    imgdf = imgdf.set_index('id')
    imgdf['id'] = imgdf.index

    if diff != 'all':
        imgdf = imgdf[imgdf['level'] == diff]
    imgIds = set(imgdf.id)

    gtanns = coco['annotations']
    gtdf = pd.DataFrame(gtanns)
    gtdf = gtdf[gtdf.image_id.isin(imgIds)]
    #    gtdf['level'] = gtdf.image_id.apply(lambda idd: imgdf.loc[idd]['level'])
    gtdf['fname'] = gtdf.image_id.apply(
コード例 #7
0
ファイル: wdeward.py プロジェクト: jianzhnie/DPSNet
def rpc_evaluation(
        dataset,
        predictions,
        output_folder,
        generate_pseudo_labels=False,
        iteration=-1,
        threshold=THRESHOLD,
        use_ground_truth=False,  # use ground truth to select pseudo labels
        **_):
    threshold = 0.9995 if threshold >= 1 else threshold

    logger = logging.getLogger("maskrcnn_benchmark.inference")
    if generate_pseudo_labels:
        logger.info('Use ground truth: {}'.format(use_ground_truth))

    pred_boxlists = []
    pred_boxlists_with_density = []
    annotations = []
    density_correct = 0
    box_correct = 0
    mae = 0  # MEAN ABSOLUTE ERROR
    has_density_map = predictions[0].has_field('density')
    num_density_classes = 1
    if has_density_map:
        num_density_classes = predictions[0].get_field('density').shape[-1]
        logger.info('Density category: {}'.format(num_density_classes))

    for image_id, prediction in tqdm(enumerate(predictions)):
        img_info = dataset.get_img_info(image_id)

        image_width = img_info["width"]
        image_height = img_info["height"]
        prediction = prediction.resize((image_width, image_height))
        bboxes = prediction.bbox.tolist()
        labels = prediction.get_field("labels").tolist()
        scores = prediction.get_field("scores").tolist()

        # -----------------------------------------------#
        # -----------------Pseudo Label------------------#
        # -----------------------------------------------#

        gt_density_cat_counts = np.zeros((num_density_classes, ),
                                         dtype=np.int32)
        gt_all_cat_counts = np.zeros((NUM_CLASSES, ), dtype=np.int32)
        pred_density_cat_counts = np.zeros((num_density_classes, ),
                                           dtype=np.int32)
        # density = 0.0
        if has_density_map:
            pred_density_cat_counts = prediction.get_field('density').numpy()
            pred_density_cat_map = prediction.get_field('density_map').numpy()
            pred_density_cat_counts = np.round(pred_density_cat_counts).astype(
                np.int32)

            ann = dataset.get_annotation(img_info['id'])
            for category, x, y, w, h in ann:
                density_category = rpc_category_to_super_category(
                    category, num_classes=num_density_classes)
                gt_all_cat_counts[category] += 1
                gt_density_cat_counts[density_category] += 1

            is_correct = np.all(
                gt_density_cat_counts == pred_density_cat_counts)
            if is_correct:
                density_correct += 1
            else:
                mae += np.sum(
                    np.abs(gt_density_cat_counts - pred_density_cat_counts))

        box_density_cat_counts = np.zeros((num_density_classes, ),
                                          dtype=np.int32)
        box_all_cat_counts = np.zeros((NUM_CLASSES, ), dtype=np.int32)

        if generate_pseudo_labels and has_density_map:
            image_result = {
                'bbox': [],
                'width': image_width,
                'height': image_height,
                'id': img_info['id'],
                'file_name': img_info['file_name'],
            }

            for i in range(len(prediction)):
                score = scores[i]
                if score > threshold:
                    box = bboxes[i]
                    label = labels[i]
                    density_category = rpc_category_to_super_category(
                        label, num_density_classes)
                    box_all_cat_counts[label] += 1
                    box_density_cat_counts[density_category] += 1
                    x, y, width, height = box[0], box[
                        1], box[2] - box[0], box[3] - box[1]
                    image_result['bbox'].append((label, x, y, width, height))
            if use_ground_truth:
                is_valid = np.all(box_all_cat_counts == gt_all_cat_counts)
            else:
                is_valid = np.all(
                    box_density_cat_counts == pred_density_cat_counts)
            if is_valid:
                annotations.append(image_result)
                is_box_correct = np.all(
                    box_all_cat_counts == gt_all_cat_counts)
                if is_box_correct:
                    box_correct += 1

        # -----------------------------------------------#
        # -----------------------------------------------#
        # -----------------------------------------------#

        for i in range(len(prediction)):
            score = scores[i]
            box = bboxes[i]
            label = labels[i]

            x, y, width, height = box[0], box[
                1], box[2] - box[0], box[3] - box[1]

            pred_boxlists.append({
                "image_id":
                img_info['id'],
                "category_id":
                int(label),
                "bbox": [float(k) for k in [x, y, width, height]],
                "score":
                float(score),
            })

            pred_boxlists_with_density.append({
                "image_id":
                img_info['id'],
                "category_id":
                int(label),
                "bbox": [float(k) for k in [x, y, width, height]],
                "score":
                float(score),
                "density_map":
                pred_density_cat_map
            })

    if has_density_map:
        logger.info('Density Ratio: {:.3f}'.format(density_correct /
                                                   len(predictions)))
        logger.info('Density MAE  : {:.3f} '.format(mae / len(predictions)))
        if generate_pseudo_labels:
            if len(annotations) == 0:
                logger.info('No annotations are selected.')
            else:
                logger.info(
                    'Select  Ratio: {:.3f} ({}/{}, {:.5f} Threshold)'.format(
                        box_correct / len(annotations), box_correct,
                        len(annotations), threshold))

    time_stamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
    if len(pred_boxlists) == 0:
        logger.info('Nothing detected.')
        with open(
                os.path.join(output_folder,
                             'result_{}.txt'.format(time_stamp)), 'w') as fid:
            fid.write('Nothing detected.')
        return dict(metrics={})

    if generate_pseudo_labels:
        logger.info('Pseudo-Labeling: {}'.format(len(annotations)))
        with open(os.path.join(output_folder, 'pseudo_labeling.json'),
                  'w') as fid:
            json.dump(annotations, fid)

    save_path = os.path.join(output_folder, 'bbox_results.json')
    with open(save_path, 'w') as fid:
        json.dump(pred_boxlists, fid)

    mmcv.dump(pred_boxlists_with_density,
              os.path.join(output_folder, 'bbox_results_with_density.pkl'))

    res_js = boxx.loadjson(save_path)
    ann_js = boxx.loadjson(dataset.ann_file)
    result = rpctool.evaluate(res_js, ann_js)
    logger.info(result)

    result_str = str(result)
    if iteration > 0:
        filename = os.path.join(output_folder,
                                'result_{:07d}.txt'.format(iteration))
    else:
        filename = os.path.join(output_folder,
                                'result_{}.txt'.format(time_stamp))

    if has_density_map:
        result_str += '\n' + 'Ratio: {:.3f}, '.format(
            density_correct / len(predictions)) + 'MAE: {:.3f} '.format(
                mae / len(predictions))
    with open(filename, 'w') as fid:
        fid.write(result_str)

    best_cAcc = check_best_result(output_folder, result, result_str, filename)
    logger.info('Best cAcc: {}%'.format(best_cAcc))
    metrics = {
        'cAcc': {
            'averaged': get_cAcc(result, 'averaged'),
            'hard': get_cAcc(result, 'hard'),
            'medium': get_cAcc(result, 'medium'),
            'easy': get_cAcc(result, 'easy'),
        }
    }
    if has_density_map:
        metrics.update({
            'Ratio': density_correct / len(predictions),
            'MAE': mae / len(predictions),
        })
    eval_result = dict(metrics=metrics)
    return eval_result
コード例 #8
0
def rpc_evaluation(dataset, predictions, output_folder, generate_pseudo_labels=False, iteration=-1, **_):
    logger = logging.getLogger("maskrcnn_benchmark.inference")
    pred_boxlists = []
    annotations = []
    correct = 0
    mae = 0  # mean absolute error
    has_density_map = predictions[0].has_field('density_map')
    for image_id, prediction in tqdm(enumerate(predictions)):
        img_info = dataset.get_img_info(image_id)

        image_width = img_info["width"]
        image_height = img_info["height"]
        prediction = prediction.resize((image_width, image_height))
        bboxes = prediction.bbox.numpy()
        labels = prediction.get_field("labels").numpy()
        scores = prediction.get_field("scores").numpy()

        # -----------------------------------------------#
        # -----------------Pseudo Label------------------#
        # -----------------------------------------------#
        density = 0.0
        if has_density_map:
            ann = dataset.get_annotation(img_info['id'])
            density_map = prediction.get_field('density_map').numpy()
            density = density_map.sum()
            if round(density) == len(ann):
                correct += 1
            mae += abs(density - len(ann))
        if generate_pseudo_labels and has_density_map:
            image_result = {
                'bbox': [],
                'width': image_width,
                'height': image_height,
                'id': img_info['id'],
                'file_name': img_info['file_name'],
            }

            for i in range(len(prediction)):
                score = scores[i]
                box = bboxes[i]
                label = labels[i]
                if score > 0.95:
                    x, y, width, height = float(box[0]), float(box[1]), float(box[2] - box[0]), float(box[3] - box[1])
                    image_result['bbox'].append(
                        (int(label), x, y, width, height)
                    )
            if len(image_result['bbox']) >= 3 and len(image_result['bbox']) == round(density):
                annotations.append(image_result)
        # -----------------------------------------------#
        # -----------------------------------------------#
        # -----------------------------------------------#

        for i in range(len(prediction)):
            score = scores[i]
            box = bboxes[i]
            label = labels[i]

            x, y, width, height = box[0], box[1], box[2] - box[0], box[3] - box[1]

            pred_boxlists.append({
                "image_id": img_info['id'],
                "category_id": int(label),
                "bbox": [float(k) for k in [x, y, width, height]],
                "score": float(score),
            })

    if has_density_map:
        logger.info('Ratio: {}'.format(correct / len(predictions)))
        logger.info('MAE: {:.3f} '.format(mae / len(predictions)))

    time_stamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
    if len(pred_boxlists) == 0:
        logger.info('Nothing detected.')
        with open(os.path.join(output_folder, 'result_{}.txt'.format(time_stamp)), 'w') as fid:
            fid.write('Nothing detected.')
        return 'Nothing detected.'

    if generate_pseudo_labels:
        logger.info('Pseudo-Labeling: {}'.format(len(annotations)))
        with open(os.path.join(output_folder, 'pseudo_labeling.json'), 'w') as fid:
            json.dump(annotations, fid)

    save_path = os.path.join(output_folder, 'bbox_results.json')
    with open(save_path, 'w') as fid:
        json.dump(pred_boxlists, fid)
    res_js = boxx.loadjson(save_path)
    ann_js = boxx.loadjson(dataset.ann_file)
    result = rpctool.evaluate(res_js, ann_js)
    logger.info(result)

    result_str = str(result)
    if iteration > 0:
        filename = os.path.join(output_folder, 'result_{:07d}.txt'.format(iteration))
    else:
        filename = os.path.join(output_folder, 'result_{}.txt'.format(time_stamp))

    if has_density_map:
        result_str += '\n' + 'Ratio: {}, '.format(correct / len(predictions)) + 'MAE: {:.3f} '.format(mae / len(predictions))
    with open(filename, 'w') as fid:
        fid.write(result_str)

    best_cAcc = check_best_result(output_folder, result, result_str, filename)
    logger.info('Best cAcc: {}%'.format(best_cAcc))
    metrics = {
        'cAcc': {
            'averaged': get_cAcc(result, 'averaged'),
            'hard': get_cAcc(result, 'hard'),
            'medium': get_cAcc(result, 'medium'),
            'easy': get_cAcc(result, 'easy'),
        }
    }
    if has_density_map:
        metrics.update({
            'Ratio': correct / len(predictions),
            'MAE': mae / len(predictions),
        })
    eval_result = dict(metrics=metrics)
    return eval_result
コード例 #9
0
    default=None,
    type=lambda s: s and s.replace(" ", '').split(','),
    help='Which different levels, default is "easy,medium,hard,averaged"',
)
parser.add_argument(
    "--vis",
    action='store_true',
    help="visualization after evaluate",
)
parser.add_argument(
    "--cn",
    action='store_true',
    help="Use raw Chinese class name, to instead of English name",
)

if __name__ == '__main__':
    args = parser.parse_args()

    resJs = boxx.loadjson(args.resFile)
    annJs = boxx.loadjson(args.annFile)
    skudf = get_skudf(annJs)
    md = evaluate(resJs,
                  annJs,
                  mmap=args.mmap,
                  method=args.method,
                  levels=args.levels)
    print(
        '''\nYou could submit this markdown result to RPC-Leaderboard by new a issue here: 
        https://github.com/RPC-Dataset/RPC-Leaderboard/issues''')
    print("\n## %s result on RPC-Dataset" % args.method)
    print(md)