Beispiel #1
0
def calc_accuracy_metrics(gt_json: str, dt_json: str) -> List[Dict]:
    gt_json = gt_json.replace("\n", "")
    dt_json = dt_json.replace("\n", "")

    gt_dict = json.loads(gt_json)  # type: List[Dict]
    dt_dict = json.loads(dt_json)  # type: List[Dict]

    gt_bboxes = _convert_dict_to_bboxes(gt_dict, BBType.GroundTruth)
    dt_bboxes = _convert_dict_to_bboxes(dt_dict, BBType.Detected)

    all_boxes = BoundingBoxes()
    all_boxes._boundingBoxes.extend(gt_bboxes._boundingBoxes)
    all_boxes._boundingBoxes.extend(dt_bboxes._boundingBoxes)

    eval = Evaluator()
    ret = eval.GetPascalVOCMetrics(all_boxes)
    return ret
Beispiel #2
0
def print_evaluation_scores_voc(output_file):
    import os
    import copy
    if not os.path.exists('./validationBBoxes.pickle'):
        filelist = []
        for file in output_file:
            fileName = file['image_id']
            if not fileName in filelist:
                filelist.append(fileName)
        createEvalBBoxes(filelist)
    with open('validationBBoxes.pickle', 'rb') as f:
        gtBBoxes = pickle.load(f)
    # create eval bboxes
    evalBBoxes = copy.deepcopy(gtBBoxes)
    for detBBoxes in output_file:
        box = np.round(detBBoxes['bbox'])
        evalBBoxes.addBoundingBox(BoundingBox(imageName=detBBoxes['image_id'], classId=detBBoxes['category_id'], \
                                              classConfidence=detBBoxes['score'], \
                                              x=box[0], y=box[1], \
                                              w=box[2], h=box[3], \
                                              typeCoordinates=CoordinatesType.Absolute,\
                                              bbType=BBType.Detected, format=BBFormat.XYX2Y2, \
                                              ))
    evaluator = Evaluator()
    metricsPerClass = evaluator.GetPascalVOCMetrics(evalBBoxes,
                                                    IOUThreshold=0.5)
    print("Average precision values per class:\n")
    # Loop through classes to obtain their metrics
    # reconstruct for monitors
    ret = {}
    for mc in metricsPerClass:
        # Get metric values per each class
        c = mc['class']
        precision = mc['precision']
        recall = mc['recall']
        average_precision = mc['AP']
        ipre = mc['interpolated precision']
        irec = mc['interpolated recall']
        # Print AP per class
        ret[c] = average_precision
        print('%s: %f' % (c, average_precision))
    return ret
Beispiel #3
0
def validate(args):
    setup_default_logging()

    def setthresh():
        if args.checkpoint.split("/")[-1].split(
                "_")[0] in getthresholds.keys():
            return getthresholds[args.checkpoint.split("/")[-1].split("_")[0]]
        else:
            a = []
            [a.append(args.threshold) for x in range(4)]
            return a

    threshs = setthresh()
    print(threshs)
    # might as well try to validate something
    args.pretrained = args.pretrained or not args.checkpoint
    args.prefetcher = not args.no_prefetcher

    # create model
    bench = create_model(
        args.model,
        bench_task='predict',
        pretrained=args.pretrained,
        redundant_bias=args.redundant_bias,
        checkpoint_path=args.checkpoint,
        checkpoint_ema=args.use_ema,
    )
    input_size = bench.config.image_size

    param_count = sum([m.numel() for m in bench.parameters()])
    print('Model %s created, param count: %d' % (args.model, param_count))

    bench = bench.cuda()
    if has_amp:
        print('Using AMP mixed precision.')
        bench = amp.initialize(bench, opt_level='O1')
    else:
        print('AMP not installed, running network in FP32.')

    if args.num_gpu > 1:
        bench = torch.nn.DataParallel(bench,
                                      device_ids=list(range(args.num_gpu)))

    if 'test' in args.anno:
        annotation_path = os.path.join(args.data, 'annotations',
                                       f'image_info_{args.anno}.json')
        image_dir = args.anno
    else:
        annotation_path = os.path.join(args.data, 'annotations',
                                       f'instances_{args.anno}.json')
        image_dir = args.anno
    print(os.path.join(args.data, image_dir), annotation_path)
    dataset = CocoDetection(os.path.join(args.data, image_dir),
                            annotation_path)

    loader = create_loader(dataset,
                           input_size=input_size,
                           batch_size=args.batch_size,
                           use_prefetcher=args.prefetcher,
                           interpolation=args.interpolation,
                           fill_color=args.fill_color,
                           num_workers=args.workers,
                           pin_mem=args.pin_mem,
                           mean=args.mean,
                           std=args.std)
    if 'test' in args.anno:
        threshold = float(args.threshold)
    else:
        threshold = .001
    img_ids = []
    results = []
    writetofilearrtay = []
    bench.eval()
    batch_time = AverageMeter()
    end = time.time()
    with torch.no_grad():
        for i, (input, target) in enumerate(loader):
            output = bench(input, target['img_scale'], target['img_size'])
            output = output.cpu()
            # print(target['img_id'])
            sample_ids = target['img_id'].cpu()

            for index, sample in enumerate(output):
                image_id = int(sample_ids[index])

                for det in sample:
                    score = float(det[4])
                    if score < threshold:  # stop when below this threshold, scores in descending order
                        coco_det = dict(image_id=image_id, category_id=-1)
                        img_ids.append(image_id)
                        results.append(coco_det)
                        break
                    coco_det = dict(image_id=image_id,
                                    bbox=det[0:4].tolist(),
                                    score=score,
                                    category_id=int(det[5]),
                                    sizes=target['img_size'].tolist()[0])
                    img_ids.append(image_id)
                    results.append(coco_det)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % args.log_freq == 0:
                print(
                    'Test: [{0:>4d}/{1}]  '
                    'Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s)  '
                    .format(
                        i,
                        len(loader),
                        batch_time=batch_time,
                        rate_avg=input.size(0) / batch_time.avg,
                    ))

    if 'test' in args.anno:
        from itertools import groupby
        results.sort(key=lambda x: x['image_id'])

        f = open(
            str(args.model) + "-" + str(args.anno) + "-" + str(min(threshs)) +
            ".txt", "w+")
        # for item in tqdm(writetofilearrtay):
        xxx = 0
        for k, v in tqdm(groupby(results, key=lambda x: x['image_id'])):
            xxx += 1
            f.write(getimageNamefromid(k) +
                    ",")  #print(getimageNamefromid(k),", ")
            for i in v:
                if i['category_id'] > 0:
                    if (i['category_id'] ==1 and i['score'] >= threshs[0] ) or (i['category_id'] ==2 and i['score'] >= threshs[1] ) or \
                      (i['category_id'] ==3 and i['score'] >= threshs[2] ) or (i['category_id'] ==4 and i['score'] >= threshs[3] ) :
                        f.write(
                            str(round(i['category_id'])) + " " +
                            str(round(i['bbox'][0])) + " " +
                            str(round(i['bbox'][1])) + " " + str(
                                round(
                                    float(i['bbox'][0]) +
                                    float(i['bbox'][2]))) + " " + str(
                                        round(
                                            float(i['bbox'][1]) +
                                            float(i['bbox'][3]))) + " ")
            f.write('\n')
            # print(i['category_id']," ",i['bbox'][0]," ",i['bbox'][1]," ",i['bbox'][2]," ",i['bbox'][3]," ")
        print("generated lines:", xxx)
        f.close()

    #   f.close()
    if 'test' not in args.anno:
        array_of_dm = []
        array_of_gt = []

        i = 0
        # if 'test' in args.anno :

        for _, item in tqdm(dataset):
            # if item["img_id"] == "1000780" :
            # print(item)
            for i in range(len(item['cls'])):
                # print(str(item["img_id"]),)
                array_of_gt.append(
                    BoundingBox(imageName=str(item["img_id"]),
                                classId=item["cls"][i],
                                x=item["bbox"][i][1] * item['img_scale'],
                                y=item["bbox"][i][0] * item['img_scale'],
                                w=item["bbox"][i][3] * item['img_scale'],
                                h=item["bbox"][i][2] * item['img_scale'],
                                typeCoordinates=CoordinatesType.Absolute,
                                bbType=BBType.GroundTruth,
                                format=BBFormat.XYX2Y2,
                                imgSize=(item['img_size'][0],
                                         item['img_size'][1])))

        for item in tqdm(results):
            if item["category_id"] >= 0:
                array_of_dm.append(
                    BoundingBox(imageName=str(item["image_id"]),
                                classId=item["category_id"],
                                classConfidence=item["score"],
                                x=item['bbox'][0],
                                y=item['bbox'][1],
                                w=item['bbox'][2],
                                h=item['bbox'][3],
                                typeCoordinates=CoordinatesType.Absolute,
                                bbType=BBType.Detected,
                                format=BBFormat.XYWH,
                                imgSize=(item['sizes'][0], item['sizes'][1])))
        myBoundingBoxes = BoundingBoxes()
        # # # # Add all bounding boxes to the BoundingBoxes object:
        for box in (array_of_gt):
            myBoundingBoxes.addBoundingBox(box)
        for dm in array_of_dm:
            myBoundingBoxes.addBoundingBox(dm)

        evaluator = Evaluator()
        f1res = []
        f1resd0 = []
        f1resd10 = []
        f1resd20 = []
        f1resd40 = []
        for conf in tqdm(range(210, 600, 1)):
            metricsPerClass = evaluator.GetPascalVOCMetrics(
                myBoundingBoxes, IOUThreshold=0.5, ConfThreshold=conf / 1000.0)

            totalTP = 0
            totalp = 0
            totalFP = 0
            tp = []
            fp = []
            ta = []
            # print('-------')
            for mc in metricsPerClass:
                tp.append(mc['total TP'])
                fp.append(mc['total FP'])
                ta.append(mc['total positives'])

                totalFP = totalFP + mc['total FP']
                totalTP = totalTP + mc['total TP']
                totalp = totalp + (mc['total positives'])

            # print(totalTP," ",totalFP," ",totalp)
            if totalTP + totalFP == 0:
                p = -1
            else:
                p = totalTP / (totalTP + totalFP)
            if totalp == 0:
                r = -1
            else:
                r = totalTP / (totalp)
            f1_dict = dict(tp=totalTP,
                           fp=totalFP,
                           totalp=totalp,
                           conf=conf / 1000.0,
                           prec=p,
                           rec=r,
                           f1score=(2 * p * r) / (p + r))
            f1res.append(f1_dict)
            #must clean these parts
            f1resd0.append(
                dict(tp=tp[0],
                     fp=fp[0],
                     totalp=ta[0],
                     conf=conf / 1000.0,
                     prec=tp[0] / (tp[0] + fp[0]),
                     rec=tp[0] / ta[0],
                     f1score=(2 * (tp[0] / (tp[0] + fp[0])) *
                              (tp[0] / ta[0])) / ((tp[0] / (tp[0] + fp[0])) +
                                                  (tp[0] / ta[0]))))

            f1resd10.append(
                dict(tp=tp[1],
                     fp=fp[1],
                     totalp=ta[1],
                     conf=conf / 1000.0,
                     prec=tp[1] / (tp[1] + fp[1]),
                     rec=tp[1] / ta[1],
                     f1score=(2 * (tp[1] / (tp[1] + fp[1])) *
                              (tp[1] / ta[1])) / ((tp[1] / (tp[1] + fp[1])) +
                                                  (tp[1] / ta[1]))))

            f1resd20.append(
                dict(tp=tp[2],
                     fp=fp[2],
                     totalp=ta[2],
                     conf=conf / 1000.0,
                     prec=tp[2] / (tp[2] + fp[2]),
                     rec=tp[2] / ta[2],
                     f1score=(2 * (tp[2] / (tp[2] + fp[2])) *
                              (tp[2] / ta[2])) / ((tp[2] / (tp[2] + fp[2])) +
                                                  (tp[2] / ta[2]))))

            f1resd40.append(
                dict(tp=tp[3],
                     fp=fp[3],
                     totalp=ta[3],
                     conf=conf / 1000.0,
                     prec=tp[3] / (tp[3] + fp[3]),
                     rec=tp[3] / ta[3],
                     f1score=(2 * (tp[3] / (tp[3] + fp[3])) *
                              (tp[3] / ta[3])) / ((tp[3] / (tp[3] + fp[3])) +
                                                  (tp[3] / ta[3]))))

        sortedf1 = sorted(f1res, key=lambda k: k['f1score'], reverse=True)

        f1resd0 = sorted(f1resd0, key=lambda k: k['f1score'], reverse=True)
        f1resd10 = sorted(f1resd10, key=lambda k: k['f1score'], reverse=True)
        f1resd20 = sorted(f1resd20, key=lambda k: k['f1score'], reverse=True)
        f1resd40 = sorted(f1resd40, key=lambda k: k['f1score'], reverse=True)

        print(sortedf1[0])
        print("\n\n")
        print(f1resd0[0])
        print(f1resd10[0])
        print(f1resd20[0])
        print(f1resd40[0])
        # sortedf1 = sorted(f1res, key=lambda k: k['f1score'],reverse=True)
        # print(sortedf1[0:2])
        # json.dump(results, open(args.results, 'w'), indent=4)
        json.dump(results, open(args.results, 'w'), indent=4)
        # coco_results = dataset.coco.loadRes(args.results)
        # coco_eval = COCOeval(dataset.coco, coco_results, 'bbox')
        # coco_eval.params.imgIds = img_ids  # score only ids we've used
        # coco_eval.evaluate()
        # coco_eval.accumulate()
        # coco_eval.summarize()
        # print(coco_eval.eval['params'])

    json.dump(results, open(args.results, 'w'), indent=4)

    return results