Ejemplo n.º 1
0
def calculate_map(gt, det):
    # Get groundtruth boxes
    allBoundingBoxes, allClasses = getBoundingBoxes(gt, True, gtFormat, gtCoordType, imgSize=imgSize)
    # Get detected boxes
    allBoundingBoxes, allClasses = getBoundingBoxes(det, False, detFormat, detCoordType, allBoundingBoxes, allClasses, imgSize=imgSize)
    allClasses.sort()
    evaluator = Evaluator()
    acc_AP = 0
    validClasses = 0
    
    # Plot Precision x Recall curve
    detections = evaluator.PlotPrecisionRecallCurve(allBoundingBoxes,
        IOUThreshold=0.5,  # IOU threshold
        method=MethodAveragePrecision.EveryPointInterpolation,
        )


    # each detection is a class
    for metricsPerClass in detections:
        
        # Get metric values per each class
        cl = metricsPerClass['class']
        ap = metricsPerClass['AP']
        precision = metricsPerClass['precision']
        recall = metricsPerClass['recall']
        totalPositives = metricsPerClass['total positives']
        total_TP = metricsPerClass['total TP']
        total_FP = metricsPerClass['total FP']

        if totalPositives > 0:
            validClasses = validClasses + 1
            acc_AP = acc_AP + ap
            prec = ['%.2f' % p for p in precision]
            rec = ['%.2f' % r for r in recall]
            ap_str = "{0:.2f}%".format(ap * 100)
            # ap_str = "{0:.4f}%".format(ap * 100)

    mAP = acc_AP / validClasses
    mAP_str = "{0:.2f}%".format(mAP * 100)
    
    return mAP
Ejemplo n.º 2
0
                                                detCoordType,
                                                allBoundingBoxes,
                                                allClasses,
                                                imgSize=imgSize)
allClasses.sort()

evaluator = Evaluator()
acc_AP = 0
validClasses = 0

# Plot Precision x Recall curve
detections = evaluator.PlotPrecisionRecallCurve(
    allBoundingBoxes,  # Object containing all bounding boxes (ground truths and detections)
    IOUThreshold=iouThreshold,  # IOU threshold
    method=MethodAveragePrecision.EveryPointInterpolation,
    showAP=True,  # Show Average Precision in the title of the plot
    showInterpolatedPrecision=
    False,  # Don't plot the interpolated precision curve
    savePath=savePath,
    showGraphic=showPlot)

f = open(os.path.join(savePath, 'results.txt'), 'w')
f.write('Object Detection Metrics\n')
f.write('https://github.com/rafaelpadilla/Object-Detection-Metrics\n\n\n')
f.write('Average Precision (AP), Precision and Recall per class:')

# each detection is a class
for metricsPerClass in detections:

    # Get metric values per each class
    cl = metricsPerClass['class']
Ejemplo n.º 3
0
f = open(os.path.join(savePath, 'results.txt'), 'w')
f.write('Object Detection Metrics\n')
f.write('https://github.com/rafaelpadilla/Object-Detection-Metrics\n\n\n')
f.write('Average Precision (AP), Precision and Recall per class:')

evaluator = Evaluator()
acc_AP = 0
validClasses = 0
# for each class
for c in allClasses:
    # Plot Precision x Recall curve
    metricsPerClass = evaluator.PlotPrecisionRecallCurve(
        c,  # Class to show
        allBoundingBoxes,  # Object containing all bounding boxes (ground truths and detections)
        IOUThreshold=iouThreshold,  # IOU threshold
        showAP=True,  # Show Average Precision in the title of the plot
        showInterpolatedPrecision=
        False,  # Don't plot the interpolated precision curve
        savePath=os.path.join(savePath, c + '.png'),
        showGraphic=showPlot)
    # Get metric values per each class
    cl = metricsPerClass['class']
    ap = metricsPerClass['AP']
    precision = metricsPerClass['precision']
    recall = metricsPerClass['recall']
    totalPositives = metricsPerClass['total positives']
    total_TP = metricsPerClass['total TP']
    total_FP = metricsPerClass['total FP']

    if totalPositives > 0:
        validClasses = validClasses + 1
Ejemplo n.º 4
0
def evaluation(gtFolder,
               detFolder,
               iouThreshold,
               gtFormat,
               detFormat,
               savePath,
               confidence_TH,
               range=None):

    # Get current path to set default folders
    currentPath = os.path.dirname(os.path.abspath(__file__))
    #confidence_TH = 0.8
    VERSION = '0.1 (beta)'
    gtCoordType = 'abs'
    detCoordType = 'abs'
    showPlot = False
    # parser = argparse.ArgumentParser(
    #     prog='Object Detection Metrics - Pascal VOC',
    #     description='This project applies the most popular metrics used to evaluate object detection '
    #     'algorithms.\nThe current implemention runs the Pascal VOC metrics.\nFor further references, '
    #     'please check:\nhttps://github.com/rafaelpadilla/Object-Detection-Metrics',
    #     epilog="Developed by: Rafael Padilla ([email protected])")
    # # formatter_class=RawTextHelpFormatter)
    # parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + VERSION)
    # # Positional arguments
    # # Mandatory
    # parser.add_argument(
    #     '-gt',
    #     '--gtfolder',
    #     dest='gtFolder',
    #     default=os.path.join(currentPath, 'groundtruths'),
    #     metavar='',
    #     help='folder containing your ground truth bounding boxes')
    # parser.add_argument(
    #     '-det',
    #     '--detfolder',
    #     dest='detFolder',
    #     default=os.path.join(currentPath, 'detections'),
    #     metavar='',
    #     help='folder containing your detected bounding boxes')
    # # Optional
    # parser.add_argument(
    #     '-t',
    #     '--threshold',
    #     dest='iouThreshold',
    #     type=float,
    #     default=0.5,
    #     metavar='',
    #     help='IOU threshold. Default 0.5')
    # parser.add_argument(
    #     '-gtformat',
    #     dest='gtFormat',
    #     metavar='',
    #     default='xywh',
    #     help='format of the coordinates of the ground truth bounding boxes: '
    #     '(\'xywh\': <left> <top> <width> <height>)'
    #     ' or (\'xyrb\': <left> <top> <right> <bottom>)')
    # parser.add_argument(
    #     '-detformat',
    #     dest='detFormat',
    #     metavar='',
    #     default='xywh',
    #     help='format of the coordinates of the detected bounding boxes '
    #     '(\'xywh\': <left> <top> <width> <height>) '
    #     'or (\'xyrb\': <left> <top> <right> <bottom>)')
    # parser.add_argument(
    #     '-gtcoords',
    #     dest='gtCoordinates',
    #     default='abs',
    #     metavar='',
    #     help='reference of the ground truth bounding box coordinates: absolute '
    #     'values (\'abs\') or relative to its image size (\'rel\')')
    # parser.add_argument(
    #     '-detcoords',
    #     default='abs',
    #     dest='detCoordinates',
    #     metavar='',
    #     help='reference of the ground truth bounding box coordinates: '
    #     'absolute values (\'abs\') or relative to its image size (\'rel\')')
    # parser.add_argument(
    #     '-imgsize',
    #     dest='imgSize',
    #     metavar='',
    #     help='image size. Required if -gtcoords or -detcoords are \'rel\'')
    # parser.add_argument(
    #     '-sp', '--savepath', dest='savePath', metavar='', help='folder where the plots are saved')
    # parser.add_argument(
    #     '-np',
    #     '--noplot',
    #     dest='showPlot',
    #     action='store_false',
    #     help='no plot is shown during execution')
    # args = parser.parse_args()
    #
    # iouThreshold = args.iouThreshold

    # Arguments validation
    errors = []
    # # Validate formats
    gtFormat = ValidateFormats(gtFormat, '-gtformat', errors)
    detFormat = ValidateFormats(detFormat, '-detformat', errors)
    # # Groundtruth folder
    # if ValidateMandatoryArgs(args.gtFolder, '-gt/--gtfolder', errors):
    #     gtFolder = ValidatePaths(args.gtFolder, '-gt/--gtfolder', errors)
    # else:
    #     # errors.pop()
    #     gtFolder = os.path.join(currentPath, 'groundtruths')
    #     if os.path.isdir(gtFolder) is False:
    #         errors.append('folder %s not found' % gtFolder)
    # # Coordinates types
    gtCoordType = ValidateCoordinatesTypes(gtCoordType, '-gtCoordinates',
                                           errors)
    detCoordType = ValidateCoordinatesTypes(gtCoordType, '-detCoordinates',
                                            errors)
    imgSize = (0, 0)
    # if gtCoordType == CoordinatesType.Relative:  # Image size is required
    #     imgSize = ValidateImageSize(args.imgSize, '-imgsize', '-gtCoordinates', errors)
    # if detCoordType == CoordinatesType.Relative:  # Image size is required
    #     imgSize = ValidateImageSize(args.imgSize, '-imgsize', '-detCoordinates', errors)
    # # Detection folder
    # if ValidateMandatoryArgs(args.detFolder, '-det/--detfolder', errors):
    #     detFolder = ValidatePaths(args.detFolder, '-det/--detfolder', errors)
    # else:
    #     # errors.pop()
    #     detFolder = os.path.join(currentPath, 'detections')
    #     if os.path.isdir(detFolder) is False:
    #         errors.append('folder %s not found' % detFolder)
    # print(args.savePath)

    if savePath is not None:
        print(savePath)
        if not os.path.exists(savePath):
            os.makedirs(savePath)
        #savePath = ValidatePaths(savePath, '-sp/--savepath', errors)
    else:
        savePath = os.path.join(currentPath, 'results')


# Validate savePath
# If error, show error messages
# if len(errors) is not 0:
#     print("""usage: Object Detection Metrics [-h] [-v] [-gt] [-det] [-t] [-gtformat]
#                                 [-detformat] [-save]""")
#     print('Object Detection Metrics: error(s): ')
# #    print(e) for e in errors
#     for e in errors:
#         print(e)
#     sys.exit()

# Create directory to save results
    shutil.rmtree(savePath, ignore_errors=True)  # Clear folder
    os.makedirs(savePath)
    # Show plot during execution
    #showPlot = args.showPlot

    # print('iouThreshold= %f' % iouThreshold)
    # print('savePath = %s' % savePath)
    # print('gtFormat = %s' % gtFormat)
    # print('detFormat = %s' % detFormat)
    # print('gtFolder = %s' % gtFolder)
    # print('detFolder = %s' % detFolder)
    # print('gtCoordType = %s' % gtCoordType)
    # print('detCoordType = %s' % detCoordType)
    # print('showPlot %s' % showPlot)

    # Get groundtruth boxes
    allBoundingBoxes, allClasses = getBoundingBoxes(gtFolder,
                                                    True,
                                                    gtFormat,
                                                    gtCoordType,
                                                    imgSize=imgSize,
                                                    range=range)
    # Get detected boxes
    allBoundingBoxes, allClasses = getBoundingBoxes(
        detFolder,
        False,
        detFormat,
        detCoordType,
        allBoundingBoxes,
        allClasses,
        imgSize=imgSize,
        confidence_TH=confidence_TH,
        range=range)
    allClasses.sort()

    evaluator = Evaluator()
    acc_AP = 0
    validClasses = 0
    output_str = ''
    if iouThreshold == 0:
        AP_sum = 0
        for iouTH in np.arange(0.5, 0.96, 0.05):
            detections = evaluator.PlotPrecisionRecallCurve(
                allBoundingBoxes,  # Object containing all bounding boxes (ground truths and detections)
                IOUThreshold=iouTH,  # IOU threshold
                method=MethodAveragePrecision.EveryPointInterpolation,
                showAP=True,  # Show Average Precision in the title of the plot
                showInterpolatedPrecision=
                False,  # Don't plot the interpolated precision curve
                savePath=savePath,
                showGraphic=showPlot)
            for metricsPerClass in detections:

                # Get metric values per each class
                cl = metricsPerClass['class']
                ap = metricsPerClass['AP']
                precision = metricsPerClass['precision']
                recall = metricsPerClass['recall']
                totalPositives = metricsPerClass['total positives']
                total_TP = metricsPerClass['total TP']
                total_FP = metricsPerClass['total FP']

                if totalPositives > 0:
                    validClasses = validClasses + 1
                    acc_AP = acc_AP + ap
                    prec = ['%.2f' % p for p in precision]
                    rec = ['%.2f' % r for r in recall]
                    ap_str = "{0:.2f}%".format(ap * 100)
                    # ap_str = "{0:.4f}%".format(ap * 100)
                    #print('AP: %s (%s)' % (ap_str, cl))

            mAP = acc_AP / validClasses
            print('mAP: {0:.2f}%'.format(mAP * 100))
            AP_sum += mAP
        mAP_str = "{0:.2f}%".format(AP_sum * 10)
        print('Total mAP: %s' % mAP_str)

    else:
        # Plot Precision x Recall curve
        detections = evaluator.PlotPrecisionRecallCurve(
            allBoundingBoxes,  # Object containing all bounding boxes (ground truths and detections)
            IOUThreshold=iouThreshold,  # IOU threshold
            method=MethodAveragePrecision.EveryPointInterpolation,
            showAP=True,  # Show Average Precision in the title of the plot
            showInterpolatedPrecision=
            False,  # Don't plot the interpolated precision curve
            savePath=savePath,
            showGraphic=showPlot)

        f = open(os.path.join(savePath, 'results.txt'), 'w')
        f.write('Object Detection Metrics\n')
        f.write(
            'https://github.com/rafaelpadilla/Object-Detection-Metrics\n\n\n')
        f.write('Average Precision (AP), Precision and Recall per class:')

        # each detection is a class
        for metricsPerClass in detections:

            # Get metric values per each class
            cl = metricsPerClass['class']
            print(cl)

            ap = metricsPerClass['AP']
            print(ap)
            precision = metricsPerClass['precision']
            recall = metricsPerClass['recall']
            totalPositives = metricsPerClass['total positives']
            total_TP = metricsPerClass['total TP']
            total_FP = metricsPerClass['total FP']

            if totalPositives > 0:
                validClasses = validClasses + 1
                acc_AP = acc_AP + ap
                print(acc_AP)
                prec = ['%.2f' % p for p in precision]

                rec = ['%.2f' % r for r in recall]
                ap_str = "{0:.2f}".format(ap * 100)
                # ap_str = "{0:.4f}%".format(ap * 100)
                prec_1 = 0
                rec_1 = 0
                if (len(precision) > 0):
                    prec_1 = precision[-1]

                if (len(recall) > 0):
                    rec_1 = recall[-1]

                fscore = 0
                if (prec_1 != 0 and rec_1 != 0):
                    fscore = (2 * prec_1 * rec_1) / (prec_1 + rec_1)
                rec_str = '{0:.2f}'.format(rec_1 * 100)
                prec_str = '{0:.2f}'.format(prec_1 * 100)
                fscore_str = '{0:.2f}'.format(fscore * 100)

                #print('precision {0:.2f}%' .format( precision[-1]*100))
                #print('recall {0:.2f}%'  .format(recall[-1]*100))
                #print('fscore={0:.2f}%'.format(fscore * 100))
                #print('AP: %s (%s)' % (ap_str, cl))

                #output_str = ap_str+','+prec_str+','+rec_str+','+fscore_str
                output_str = ap_str + ',' + fscore_str

                f.write('\n\nClass: %s' % cl)
                f.write('\nAP: %s' % ap_str)
                f.write('\nPrecision: %s' % prec)
                f.write('\nRecall: %s' % rec)

        mAP = acc_AP / validClasses
        mAP_str = "{0:.2f}%".format(mAP * 100)
        #print('mAP: %s' % mAP_str)
        f.write('\n\n\nmAP: %s' % mAP_str)

    return output_str

# Read txt files containing bounding boxes (ground truth and detections)
boundingboxes = getBoundingBoxes()
# Uncomment the line below to generate images based on the bounding boxes
# createImages(dictGroundTruth, dictDetected)
# Create an evaluator object in order to obtain the metrics
evaluator = Evaluator()
##############################################################
# VOC PASCAL Metrics
##############################################################
# Plot Precision x Recall curve
evaluator.PlotPrecisionRecallCurve(
    boundingboxes,  # Object containing all bounding boxes (ground truths and detections)
    IOUThreshold=0.3,  # IOU threshold
    method=MethodAveragePrecision.
    EveryPointInterpolation,  # As the official matlab code
    showAP=True,  # Show Average Precision in the title of the plot
    showInterpolatedPrecision=True)  # Plot the interpolated precision curve
# Get metrics with PASCAL VOC metrics
metricsPerClass = evaluator.GetPascalVOCMetrics(
    boundingboxes,  # Object containing all bounding boxes (ground truths and detections)
    IOUThreshold=0.3,  # IOU threshold
    method=MethodAveragePrecision.EveryPointInterpolation
)  # As the official matlab code
print("Average precision values per class:\n")
# Loop through classes to obtain their metrics
for mc in metricsPerClass:
    # Get metric values per each class
    c = mc['class']
    precision = mc['precision']
Ejemplo n.º 6
0
def get_mAP(basedir, gt_dir, det_dir, sp, threshold=0.5):
        VERSION = '0.1 (beta)'
        global currentPath
        currentPath = basedir

        parser = argparse.ArgumentParser(
            prog='Object Detection Metrics - Pascal VOC',
            description='This project applies the most popular metrics used to evaluate object detection '
            'algorithms.\nThe current implemention runs the Pascal VOC metrics.\nFor further references, '
            'please check:\nhttps://github.com/rafaelpadilla/Object-Detection-Metrics',
            epilog="Developed by: Rafael Padilla ([email protected])")
        # formatter_class=RawTextHelpFormatter)
        parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + VERSION)
        # Positional arguments
        # Mandatory
        parser.add_argument(
            '-gt',
            '--gtfolder',
            dest='gtFolder',
            default=gt_dir,
            metavar='',
            help='folder containing your ground truth bounding boxes')
        parser.add_argument(
            '-det',
            '--detfolder',
            dest='detFolder',
            default=det_dir,
            metavar='',
            help='folder containing your detected bounding boxes')
        # Optional
        parser.add_argument(
            '-t',
            '--threshold',
            dest='iouThreshold',
            type=float,
            default=threshold,
            metavar='',
            help='IOU threshold. Default 0.5')
        parser.add_argument(
            '-gtformat',
            dest='gtFormat',
            metavar='',
            default='xyrb',
            help='format of the coordinates of the ground truth bounding boxes: '
            '(\'xywh\': <left> <top> <width> <height>)'
            ' or (\'xyrb\': <left> <top> <right> <bottom>)')
        parser.add_argument(
            '-detformat',
            dest='detFormat',
            metavar='',
            default='xyrb',
            help='format of the coordinates of the detected bounding boxes '
            '(\'xywh\': <left> <top> <width> <height>) '
            'or (\'xyrb\': <left> <top> <right> <bottom>)')
        parser.add_argument(
            '-gtcoords',
            dest='gtCoordinates',
            default='abs',
            metavar='',
            help='reference of the ground truth bounding box coordinates: absolute '
            'values (\'abs\') or relative to its image size (\'rel\')')
        parser.add_argument(
            '-detcoords',
            default='abs',
            dest='detCoordinates',
            metavar='',
            help='reference of the ground truth bounding box coordinates: '
            'absolute values (\'abs\') or relative to its image size (\'rel\')')
        parser.add_argument(
            '-imgsize',
            dest='imgSize',
            default=300,
            metavar='',
            help='image size. Required if -gtcoords or -detcoords are \'rel\'')
        parser.add_argument(
            '-sp', '--savepath', dest='savePath', metavar='', default=sp, help='folder where the plots are saved')
        parser.add_argument(
            '-np',
            '--noplot',
            dest='showPlot',
            action='store_false',
            help='no plot is shown during execution')
        args, unknown = parser.parse_known_args()

        iouThreshold = args.iouThreshold


        # Arguments validation
        errors = []
        # Validate formats
        gtFormat = ValidateFormats(args.gtFormat, '-gtformat', errors)
        detFormat = ValidateFormats(args.detFormat, '-detformat', errors)
        # Groundtruth folder
        if ValidateMandatoryArgs(args.gtFolder, '-gt/--gtfolder', errors):
            gtFolder = ValidatePaths(args.gtFolder, '-gt/--gtfolder', errors)
        else:
            # errors.pop()
            gtFolder = os.path.join(currentPath, 'groundtruths')
            if os.path.isdir(gtFolder) is False:
                errors.append('folder %s not found' % gtFolder)
        # Coordinates types
        gtCoordType = ValidateCoordinatesTypes(args.gtCoordinates, '-gtCoordinates', errors)
        detCoordType = ValidateCoordinatesTypes(args.detCoordinates, '-detCoordinates', errors)
        imgSize = (0, 0)
        if gtCoordType == CoordinatesType.Relative:  # Image size is required
            imgSize = ValidateImageSize(args.imgSize, '-imgsize', '-gtCoordinates', errors)
        if detCoordType == CoordinatesType.Relative:  # Image size is required
            imgSize = ValidateImageSize(args.imgSize, '-imgsize', '-detCoordinates', errors)
        # Detection folder
        if ValidateMandatoryArgs(args.detFolder, '-det/--detfolder', errors):
            detFolder = ValidatePaths(args.detFolder, '-det/--detfolder', errors)
        else:
            # errors.pop()
            detFolder = os.path.join(currentPath, 'detections')
            if os.path.isdir(detFolder) is False:
                errors.append('folder %s not found' % detFolder)
        if args.savePath is not None:
            savePath = ValidatePaths(args.savePath, '-sp/--savepath', errors)
        else:
            savePath = os.path.join(currentPath, 'results')
        # Validate savePath
        # If error, show error messages
        if len(errors) is not 0:
            print("""usage: Object Detection Metrics [-h] [-v] [-gt] [-det] [-t] [-gtformat]
                                        [-detformat] [-save]""")
            print('Object Detection Metrics: error(s): ')
            [print(e) for e in errors]
            sys.exit()

        # Create directory to save results
        shutil.rmtree(savePath, ignore_errors=True)  # Clear folder
        os.makedirs(savePath)
        # Show plot during execution
        showPlot = args.showPlot

        # print('iouThreshold= %f' % iouThreshold)
        # print('savePath = %s' % savePath)
        # print('gtFormat = %s' % gtFormat)
        # print('detFormat = %s' % detFormat)
        # print('gtFolder = %s' % gtFolder)
        # print('detFolder = %s' % detFolder)
        # print('gtCoordType = %s' % gtCoordType)
        # print('detCoordType = %s' % detCoordType)
        # print('showPlot %s' % showPlot)

        # Get groundtruth boxes
        allBoundingBoxes, allClasses = getBoundingBoxes(
            gtFolder, True, gtFormat, gtCoordType, imgSize=imgSize)
        # Get detected boxes
        allBoundingBoxes, allClasses = getBoundingBoxes(
            detFolder, False, detFormat, detCoordType, allBoundingBoxes, allClasses, imgSize=imgSize)
        allClasses.sort()

        evaluator = Evaluator()
        acc_AP = 0
        validClasses = 0

        # Plot Precision x Recall curve
        detections = evaluator.PlotPrecisionRecallCurve(
            allBoundingBoxes,  # Object containing all bounding boxes (ground truths and detections)
            IOUThreshold=iouThreshold,  # IOU threshold
            method=MethodAveragePrecision.EveryPointInterpolation,
            showAP=True,  # Show Average Precision in the title of the plot
            showInterpolatedPrecision=False,  # Don't plot the interpolated precision curve
            savePath=savePath,
            showGraphic=showPlot)

        f = open(os.path.join(savePath, 'results.txt'), 'w')
        f.write('Object Detection Metrics\n')
        f.write('https://github.com/rafaelpadilla/Object-Detection-Metrics\n\n\n')
        f.write('Average Precision (AP), Precision and Recall per class:')

        # each detection is a class
        for metricsPerClass in detections:

            # Get metric values per each class
            cl = metricsPerClass['class']
            ap = metricsPerClass['AP']
            precision = metricsPerClass['precision']
            recall = metricsPerClass['recall']
            totalPositives = metricsPerClass['total positives']
            total_TP = metricsPerClass['total TP']
            total_FP = metricsPerClass['total FP']

            if totalPositives > 0:
                validClasses = validClasses + 1
                acc_AP = acc_AP + ap
                prec = ['%.2f' % p for p in precision]
                rec = ['%.2f' % r for r in recall]
                ap_str = "{0:.2f}%".format(ap * 100)
                # ap_str = "{0:.4f}%".format(ap * 100)
                print('AP: %s (%s)' % (ap_str, cl))
                f.write('\n\nClass: %s' % cl)
                f.write('\nAP: %s' % ap_str)
                f.write('\nPrecision: %s' % prec)
                f.write('\nRecall: %s' % rec)

        mAP = acc_AP / validClasses
        mAP_str = "{0:.2f}%".format(mAP * 100)
        print('mAP: %s' % mAP_str)
        f.write('\n\n\nmAP: %s' % mAP_str)
        return mAP
Ejemplo n.º 7
0

# Read txt files containing bounding boxes (ground truth and detections)
boundingboxes = getBoundingBoxes()
# Uncomment the line below to generate images based on the bounding boxes
#createImages(dictGroundTruth, dictDetected)
# Create an evaluator object in order to obtain the metrics
evaluator = Evaluator()
##############################################################
# VOC PASCAL Metrics
##############################################################
# Plot Precision x Recall curve
evaluator.PlotPrecisionRecallCurve(
    'object',  # Class to show
    boundingboxes,  # Object containing all bounding boxes (ground truths and detections)
    IOUThreshold=0.3,  # IOU threshold
    showAP=True,  # Show Average Precision in the title of the plot
    showInterpolatedPrecision=False
)  # Don't plot the interpolated precision curve
# Get metrics with PASCAL VOC metrics
metricsPerClass = evaluator.GetPascalVOCMetrics(
    boundingboxes,  # Object containing all bounding boxes (ground truths and detections)
    IOUThreshold=0.3)  # IOU threshold
print("Average precision values per class:\n")
# Loop through classes to obtain their metrics
for mc in metricsPerClass:
    # Get metric values per each class
    c = mc['class']
    precision = mc['precision']
    recall = mc['recall']
    average_precision = mc['AP']
Ejemplo n.º 8
0
def compute_metrics(gtFolder,
                    detFolder,
                    iouThreshold=0.5,
                    gtFormat='xyrb',
                    detFormat='xyrb',
                    gtCoordinates='abs',
                    detCoordinates='abs',
                    showPlot=False,
                    savePath=None):

    currentPath = os.path.dirname(os.path.abspath(__file__))

    # Arguments validation
    errors = []
    # Validate formats
    gtFormat = ValidateFormats(gtFormat, '-gtformat', errors)
    detFormat = ValidateFormats(detFormat, '-detformat', errors)

    # Coordinates types
    gtCoordType = ValidateCoordinatesTypes(gtCoordinates, '-gtCoordinates',
                                           errors)
    detCoordType = ValidateCoordinatesTypes(detCoordinates, '-detCoordinates',
                                            errors)

    imgSize = (0, 0)
    if gtCoordType == CoordinatesType.Relative:  # Image size is required
        imgSize = ValidateImageSize(imgSize, '-imgsize', '-gtCoordinates',
                                    errors)
    if detCoordType == CoordinatesType.Relative:  # Image size is required
        imgSize = ValidateImageSize(imgSize, '-imgsize', '-detCoordinates',
                                    errors)

    if savePath is not None:
        savePath = ValidatePaths(savePath, '-sp/--savepath', errors)
    else:
        savePath = os.path.join(currentPath, 'results')
    # Validate savePath
    # If error, show error messages
    if len(errors) != 0:
        print(
            """usage: Object Detection Metrics [-h] [-v] [-gt] [-det] [-t] [-gtformat]
                                            [-detformat] [-save]""")
        print('Object Detection Metrics: error(s): ')
        [print(e) for e in errors]
        sys.exit()

    # Create directory to save results
    shutil.rmtree(savePath, ignore_errors=True)  # Clear folder
    os.makedirs(savePath)
    # Show plot during execution
    showPlot = showPlot

    # print('iouThreshold= %f' % iouThreshold)
    # print('savePath = %s' % savePath)
    # print('gtFormat = %s' % gtFormat)
    # print('detFormat = %s' % detFormat)
    # print('gtFolder = %s' % gtFolder)
    # print('detFolder = %s' % detFolder)
    # print('gtCoordType = %s' % gtCoordType)
    # print('detCoordType = %s' % detCoordType)
    # print('showPlot %s' % showPlot)

    # Get groundtruth boxes
    allBoundingBoxes, allClasses = getBoundingBoxes(gtFolder,
                                                    True,
                                                    gtFormat,
                                                    gtCoordType,
                                                    imgSize=imgSize)
    # Get detected boxes
    allBoundingBoxes, allClasses = getBoundingBoxes(detFolder,
                                                    False,
                                                    detFormat,
                                                    detCoordType,
                                                    allBoundingBoxes,
                                                    allClasses,
                                                    imgSize=imgSize)
    allClasses.sort()

    evaluator = Evaluator()
    acc_AP = 0
    validClasses = 0

    # Plot Precision x Recall curve
    detections = evaluator.PlotPrecisionRecallCurve(
        allBoundingBoxes,  # Object containing all bounding boxes (ground truths and detections)
        IOUThreshold=iouThreshold,  # IOU threshold
        method=MethodAveragePrecision.EveryPointInterpolation,
        showAP=True,  # Show Average Precision in the title of the plot
        showInterpolatedPrecision=
        False,  # Don't plot the interpolated precision curve
        savePath=savePath,
        showGraphic=showPlot)

    f = open(os.path.join(savePath, 'results.txt'), 'w')
    f.write('Object Detection Metrics\n')
    f.write('https://github.com/rafaelpadilla/Object-Detection-Metrics\n\n\n')
    f.write('Average Precision (AP), Precision and Recall per class:')

    # each detection is a class
    for metricsPerClass in detections:

        # Get metric values per each class
        cl = metricsPerClass['class']
        ap = metricsPerClass['AP']
        precision = np.array(metricsPerClass['precision'])
        recall = np.array(metricsPerClass['recall'])
        totalPositives = metricsPerClass['total positives']
        total_TP = metricsPerClass['total TP']
        total_FP = metricsPerClass['total FP']

        if totalPositives > 0:
            validClasses = validClasses + 1
            acc_AP = acc_AP + ap
            prec = ['%.2f' % p for p in precision]
            rec = ['%.2f' % r for r in recall]
            ap_str = "{0:.2f}%".format(ap * 100)
            # ap_str = "{0:.4f}%".format(ap * 100)
            print('AP: %s (%s)' % (ap_str, cl))
            f.write('\n\nClass: %s' % cl)
            f.write('\nAP: %s' % ap_str)
            f.write('\nPrecision: %s' % prec)
            f.write('\nRecall: %s' % rec)

    # print(recall)
    # print(precision)

    precision = precision[-1]
    recall = recall[-1]

    print(recall)
    print(precision)
    f1 = 2 * precision * recall / (precision + recall)
    print('mean f1 ', f1)
    mAP = acc_AP / validClasses
    mAP_str = "{0:.2f}%".format(mAP * 100)
    print('mAP: %s' % mAP_str)
    f.write('\n\n\nmAP: %s' % mAP_str)

    return (mAP)
Ejemplo n.º 9
0
def Pascal():
    # Validate formats
    def ValidateFormats(argFormat, argName, errors):
        if argFormat == 'xywh':
            return BBFormat.XYWH
        elif argFormat == 'xyrb':
            return BBFormat.XYX2Y2
        elif argFormat is None:
            return BBFormat.XYWH  # default when nothing is passed
        else:
            errors.append(
                'argument %s: invalid value. It must be either \'xywh\' or \'xyrb\'' % argName)
    
    
    # Validate mandatory args
    def ValidateMandatoryArgs(arg, argName, errors):
        if arg is None:
            errors.append('argument %s: required argument' % argName)
        else:
            return True
    
    
    def ValidateImageSize(arg, argName, argInformed, errors):
        errorMsg = 'argument %s: required argument if %s is relative' % (argName, argInformed)
        ret = None
        if arg is None:
            errors.append(errorMsg)
        else:
            arg = arg.replace('(', '').replace(')', '')
            args = arg.split(',')
            if len(args) != 2:
                errors.append(
                    '%s. It must be in the format \'width,height\' (e.g. \'600,400\')' % errorMsg)
            else:
                if not args[0].isdigit() or not args[1].isdigit():
                    errors.append(
                        '%s. It must be in INdiaTEGER the format \'width,height\' (e.g. \'600,400\')' %
                        errorMsg)
                else:
                    ret = (int(args[0]), int(args[1]))
        return ret
    
    
    # Validate coordinate types
    def ValidateCoordinatesTypes(arg, argName, errors):
        if arg == 'abs':
            return CoordinatesType.Absolute
        elif arg == 'rel':
            return CoordinatesType.Relative
        elif arg is None:
            return CoordinatesType.Absolute  # default when nothing is passed
        errors.append('argument %s: invalid value. It must be either \'rel\' or \'abs\'' % argName)
    
    
    def ValidatePaths(arg, nameArg, errors):
        if arg is None:
            errors.append('argument %s: invalid directory' % nameArg)
        elif os.path.isdir(arg) is False and os.path.isdir(os.path.join(currentPath, arg)) is False:
            errors.append('argument %s: directory does not exist \'%s\'' % (nameArg, arg))
        # elif os.path.isdir(os.path.join(currentPath, arg)) is True:
        #     arg = os.path.join(currentPath, arg)
        else:
            arg = os.path.join(currentPath, arg)
        return arg
    
    
    def getBoundingBoxes(directory,
                         isGT,
                         bbFormat,
                         coordType,
                         allBoundingBoxes=None,
                         allClasses=None,
                         imgSize=(0, 0)):
        """Read txt files containing bounding boxes (ground truth and ss)."""
        if allBoundingBoxes is None:
            allBoundingBoxes = BoundingBoxes()
        if allClasses is None:
            allClasses = []
        # Read ground truths
        os.chdir(directory)
        files = glob.glob("*.txt")
        files.sort()
        # Read GT detections from txt file
        # Each line of the files in the groundtruths folder represents a ground truth bounding box
        # (bounding boxes that a detector should detect)
        # Each value of each line is  "class_id, x, y, width, height" respectively
        # Class_id represents the class of the bounding box
        # x, y represents the most top-left coordinates of the bounding box
        # x2, y2 represents the most bottom-right coordinates of the bounding box
        for f in files:
            nameOfImage = f.replace(".txt", "")
            fh1 = open(f, "r")
            for line in fh1:
                line = line.replace("\n", "")
                if line.replace(' ', '') == '':
                    continue
                splitLine = line.split(" ")
                if isGT:
                    # idClass = int(splitLine[0]) #class
                    idClass = (splitLine[0])  # class
                    x = float(splitLine[1])
                    y = float(splitLine[2])
                    w = float(splitLine[3])
                    h = float(splitLine[4])
                    bb = BoundingBox(
                        nameOfImage,
                        idClass,
                        x,
                        y,
                        w,
                        h,
                        coordType,
                        imgSize,
                        BBType.GroundTruth,
                        format=bbFormat)
                else:
                    # idClass = int(splitLine[0]) #class
                    idClass = (splitLine[0])  # class
                    confidence = float(splitLine[1])
                    x = float(splitLine[2])
                    y = float(splitLine[3])
                    w = float(splitLine[4])
                    h = float(splitLine[5])
                    bb = BoundingBox(
                        nameOfImage,
                        idClass,
                        x,
                        y,
                        w,
                        h,
                        coordType,
                        imgSize,
                        BBType.Detected,
                        confidence,
                        format=bbFormat)
                allBoundingBoxes.addBoundingBox(bb)
                if idClass not in allClasses:
                    allClasses.append(idClass)
            fh1.close()
        return allBoundingBoxes, allClasses
    
    
    # Get current path to set default folders
    currentPath = os.path.dirname(os.path.abspath(__file__))
    
    VERSION = '0.1 (beta)'
    
    parser = argparse.ArgumentParser(
        prog='Object Detection Metrics - Pascal VOC',
        description='This project applies the most popular metrics used to evaluate object detection '
        'algorithms.\nThe current implemention runs the Pascal VOC metrics.\nFor further references, '
        'please check:\nhttps://github.com/rafaelpadilla/Object-Detection-Metrics',
        epilog="Developed by: Rafael Padilla ([email protected])")
    # formatter_class=RawTextHelpFormatter)
    parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + VERSION)
    # Positional arguments
    # Mandatory
    parser.add_argument(
        '-gt',
        '--gtfolder',
        dest='gtFolder',
        default=os.path.join(currentPath, 'groundtruths'),
        metavar='',
        help='folder containing your ground truth bounding boxes')
    parser.add_argument(
        '-det',
        '--detfolder',
        dest='detFolder',
        default=os.path.join(currentPath, 'detections'),
        metavar='',
        help='folder containing your detected bounding boxes')
    # Optional
    parser.add_argument(
        '-t',
        '--threshold',
        dest='iouThreshold',
        type=float,
        default=0.5,
        metavar='',
        help='IOU threshold. Default 0.3')
    parser.add_argument(
        '-gtformat',
        dest='gtFormat',
        metavar='',
        default='xyrb',
        help='format of the coordinates of the ground truth bounding boxes: '
        '(\'xywh\': <left> <top> <width> <height>)'
        ' or (\'xyrb\': <left> <top> <right> <bottom>)')
    parser.add_argument(
        '-detformat',
        dest='detFormat',
        metavar='',
        default='xyrb',
        help='format of the coordinates of the detected bounding boxes '
        '(\'xywh\': <left> <top> <width> <height>) '
        'or (\'xyrb\': <left> <top> <right> <bottom>)')
    parser.add_argument(
        '-gtcoords',
        dest='gtCoordinates',
        default='abs',
        metavar='',
        help='reference of the ground truth bounding box coordinates: absolute '
        'values (\'abs\') or relative to its image size (\'rel\')')
    parser.add_argument(
        '-detcoords',
        default='abs',
        dest='detCoordinates',
        metavar='',
        help='reference of the ground truth bounding box coordinates: '
        'absolute values (\'abs\') or relative to its image size (\'rel\')')
    parser.add_argument(
        '-imgsize',
        dest='imgSize',
        metavar='',
        help='image size. Required if -gtcoords or -detcoords are \'rel\'')
    parser.add_argument(
        '-sp', '--savepath', dest='savePath', metavar='', help='folder where the plots are saved')
    parser.add_argument(
        '-np',
        '--noplot',
        dest='showPlot',
        action='store_false',
        help='no plot is shown during execution')
    args = parser.parse_args()
    
    iouThreshold = args.iouThreshold
    
    # Arguments validation
    errors = []
    # Validate formats
    gtFormat = ValidateFormats(args.gtFormat, '-gtformat', errors)
    detFormat = ValidateFormats(args.detFormat, '-detformat', errors)
    # Groundtruth folder
    if ValidateMandatoryArgs(args.gtFolder, '-gt/--gtfolder', errors):
        gtFolder = ValidatePaths(args.gtFolder, '-gt/--gtfolder', errors)
    else:
        # errors.pop()
        gtFolder = os.path.join(currentPath, 'groundtruths')
        if os.path.isdir(gtFolder) is False:
            errors.append('folder %s not found' % gtFolder)
    # Coordinates types
    gtCoordType = ValidateCoordinatesTypes(args.gtCoordinates, '-gtCoordinates', errors)
    detCoordType = ValidateCoordinatesTypes(args.detCoordinates, '-detCoordinates', errors)
    imgSize = (0, 0)
    if gtCoordType == CoordinatesType.Relative:  # Image size is required
        imgSize = ValidateImageSize(args.imgSize, '-imgsize', '-gtCoordinates', errors)
    if detCoordType == CoordinatesType.Relative:  # Image size is required
        imgSize = ValidateImageSize(args.imgSize, '-imgsize', '-detCoordinates', errors)
    # Detection folder
    if ValidateMandatoryArgs(args.detFolder, '-det/--detfolder', errors):
        detFolder = ValidatePaths(args.detFolder, '-det/--detfolder', errors)
    else:
        # errors.pop()
        detFolder = os.path.join(currentPath, 'detections')
        if os.path.isdir(detFolder) is False:
            errors.append('folder %s not found' % detFolder)
    if args.savePath is not None:
        savePath = ValidatePaths(args.savePath, '-sp/--savepath', errors)
    else:
        savePath = os.path.join(currentPath, 'results')
    # Validate savePath
    # If error, show error messages
    if len(errors) != 0:
        print("""usage: Object Detection Metrics [-h] [-v] [-gt] [-det] [-t] [-gtformat]
                                    [-detformat] [-save]""")
        print('Object Detection Metrics: error(s): ')
        [print(e) for e in errors]
        sys.exit()
    
    # Create directory to save results
    shutil.rmtree(savePath, ignore_errors=True)  # Clear folder
    os.makedirs(savePath)
    # Show plot during execution
    showPlot = args.showPlot
    
    # print('iouThreshold= %f' % iouThreshold)
    # print('savePath = %s' % savePath)
    # print('gtFormat = %s' % gtFormat)
    # print('detFormat = %s' % detFormat)
    # print('gtFolder = %s' % gtFolder)
    # print('detFolder = %s' % detFolder)
    # print('gtCoordType = %s' % gtCoordType)
    # print('detCoordType = %s' % detCoordType)
    # print('showPlot %s' % showPlot)
    
    # Get groundtruth boxes
    allBoundingBoxes, allClasses = getBoundingBoxes(
        gtFolder, True, gtFormat, gtCoordType, imgSize=imgSize)
    # Get detected boxes
    allBoundingBoxes, allClasses = getBoundingBoxes(
        detFolder, False, detFormat, detCoordType, allBoundingBoxes, allClasses, imgSize=imgSize)
    allClasses.sort()
    
    evaluator = Evaluator()
    acc_AP = 0
    validClasses = 0
    
    # Plot Precision x Recall curve
    detections = evaluator.PlotPrecisionRecallCurve(
        allBoundingBoxes,  # Object containing all bounding boxes (ground truths and detections)
        IOUThreshold=iouThreshold,  # IOU threshold
        method=MethodAveragePrecision.EveryPointInterpolation,
        showAP=True,  # Show Average Precision in the title of the plot
        showInterpolatedPrecision=False,  # Don't plot the interpolated precision curve
        savePath=savePath,
        showGraphic=showPlot)
    
    f = open(os.path.join(savePath, 'results.txt'), 'w')
    f.write('Object Detection Metrics\n')
    f.write('https://github.com/rafaelpadilla/Object-Detection-Metrics\n\n\n')
    f.write('Average Precision (AP), Precision and Recall per class:')
    
    # each detection is a class
    for metricsPerClass in detections:
    
        # Get metric values per each class
        cl = metricsPerClass['class']
        ap = metricsPerClass['AP']
        precision = metricsPerClass['precision']
        recall = metricsPerClass['recall']
        totalPositives = metricsPerClass['total positives']
        total_TP = metricsPerClass['total TP']
        total_FP = metricsPerClass['total FP']
    
        if totalPositives > 0:
            validClasses = validClasses + 1
            acc_AP = acc_AP + ap
            prec = ['%.2f' % p for p in precision]
            rec = ['%.2f' % r for r in recall]
            ap_str = "{0:.2f}%".format(ap * 100)
            # ap_str = "{0:.4f}%".format(ap * 100)
            print('AP: %s (%s)' % (ap_str, cl))
            f.write('\n\nClass: %s' % cl)
            f.write('\nAP: %s' % ap_str)
            f.write('\nPrecision: %s' % prec)
            f.write('\nRecall: %s' % rec)
    
    mAP = acc_AP / validClasses
    mAP_str = "{0:.2f}".format(mAP * 100)
    print('mAP: %s' % mAP_str)
    f.write('\n\n\nmAP: %s' % mAP_str)
    # Copy results to result_path
    return(mAP_str)
allBoundingBoxes, allClasses = getBoundingBoxes(detFolder,
                                                False,
                                                detFormat,
                                                detCoordType,
                                                allBoundingBoxes,
                                                allClasses,
                                                imgSize=imgSize)
allClasses.sort()

evaluator = Evaluator()
acc_AP = 0
validClasses = 0

# Plot Precision x Recall curve
detections = evaluator.PlotPrecisionRecallCurve(
    allBoundingBoxes,  # Object containing all bounding boxes (ground truths and detections)
    IOUThreshold=iouThreshold,  # IOU threshold
    method=MethodAveragePrecision.EveryPointInterpolation)

f = open(os.path.join(currentPath, 'results.txt'), 'w')
f.write('Object Detection Metrics\n')
f.write('Average Precision (AP), Precision and Recall per class:')

# each detection is a class
for metricsPerClass in detections:

    # Get metric values per each class
    cl = metricsPerClass['class']
    ap = metricsPerClass['AP']
    precision = metricsPerClass['precision']
    recall = metricsPerClass['recall']
    totalPositives = metricsPerClass['total positives']
Ejemplo n.º 11
0
        ap = metricsPerClass['AP']
        precision = metricsPerClass['precision']
        recall = metricsPerClass['recall']
        totalPositives = metricsPerClass['total positives']
        total_TP = metricsPerClass['total TP']
        total_FP = metricsPerClass['total FP']
        if totalPositives > 0:
            ap_str = "{0:.2f}%".format(ap * 100)
            print('AP: %s (%s)' % (ap_str, cl))
    mAP_str = "{0:.2f}%".format(mAP * 100)
    print('\nmAP: %s' % mAP_str)

    save_dir = os.path.join(root, r'model_predict\plot')
    if not os.path.exists(save_dir):
        os.mkdir(save_dir)
    evaluator.PlotPrecisionRecallCurve(
        gt_lst,
        det_lst,
        method='EveryPointInterpolation',
        showAP=True,
        showInterpolatedPrecision=False,
        savePath=save_dir,
        showGraphic=False
    )
    # x = list(range(2000))
    # y = [IOU_THRESHOLD(i) for i in x]
    # plt.plot(x, y)
    # plt.show()


Ejemplo n.º 12
0
def evaluate_agent(experiment_path, n_samples=100, agent_dir='best',
    visualize_episodes=True
):
    logging.basicConfig(level=logging.INFO, stream=sys.stdout, format='')
    print_config()

    dataset = load_dataset(CONFIG['dataset'], CONFIG['dataset_path'])

    # Always playout full episodes during testing
    CONFIG['playout_episode'] = True
    CONFIG['premasking'] = False
    env = create_env(dataset, CONFIG, mode='test')

    # Load agent from given path
    agent_path = os.path.join(experiment_path, agent_dir)
    agent = create_agent(env, CONFIG, from_path=agent_path)

    # Plot training summary (if it doesn't exist yet)
    if not os.path.exists(os.path.join(experiment_path, 'training')):
        plot_training_summary(experiment_path)

    # Create new evaluation folder
    eval_dirname = 'evaluation'
    eval_path = os.path.join(experiment_path, eval_dirname)
    ensure_folder(eval_path)

    # Use sampling to speed up evaluation if needed
    sample_size = len(dataset)
    if n_samples is not None and n_samples > -1:
        sample_size = min(sample_size, n_samples)

    collector = DetectionMetrics(eval_path)
    hooks = []
    hooks.append(collector)
    if visualize_episodes:
        gif_path = os.path.join(eval_path, 'episodes')
        hooks.append(EpisodeRenderer(gif_path))

    run_agent(agent, env, sample_size, hooks=hooks)

    print("Write bbox files")
    def _write_bbox_file(name, bbox_map):
        dir_path = os.path.join(eval_path, name)
        ensure_folder(dir_path)
        for image_idx, bboxes in bbox_map.items():
            image_name = dataset.get_image_name(image_idx)
            print(image_name)
            image_txt = ''
            for bbox in bboxes:
                image_txt += 'text ' # object class name
                # Ensure bounding boxes are saved as integers
                image_txt += str(int(bbox[0])) + ' '
                image_txt += str(int(bbox[1])) + ' '
                image_txt += str(int(bbox[2])) + ' '
                image_txt += str(int(bbox[3])) + ' '
                image_txt += '\n'
            print(image_txt)
            txt_fpath = os.path.join(dir_path, f'{image_name}.txt')
            with open(txt_fpath, 'w+') as f:
                f.write(image_txt)
    _write_bbox_file('predictions', collector.image_pred_bboxes)
    _write_bbox_file('groundtruths', collector.image_true_bboxes)

    print("Evaluating predictions against ground truth")

    def _generate_lib_bboxes(bb_type, bbox_map, confidence=None):
        boxes = []
        for image_idx, bboxes in bbox_map.items():
            image_name = dataset.get_image_name(image_idx)
            for bbox in bboxes:
                box = BoundingBox(
                    image_name,
                    'text',  # object class name
                    int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3]),
                    typeCoordinates=CoordinatesType.Absolute,
                    classConfidence=confidence,
                    bbType=bb_type,
                    format=BBFormat.XYX2Y2
                )
                boxes.append(box)
        return boxes

    true_boxes = _generate_lib_bboxes(BBType.GroundTruth, collector.image_true_bboxes)
    # Set default confidence as .01 for now (since agent doesn't score regions)
    pred_boxes = _generate_lib_bboxes(
        BBType.Detected, collector.image_pred_bboxes, confidence=.01
    )

    all_boxes = BoundingBoxes()
    for bbox in pred_boxes:
        all_boxes.addBoundingBox(bbox)
    for bbox in true_boxes:
        all_boxes.addBoundingBox(bbox)

    evaluator = Evaluator()
    # Mapping from IoU treshold to metrics calculated at this threshold
    iou_metrics = {}
    iou_thresholds = [round(x, 2) for x in np.arange(0, 1, .05)]

    all_actions = list(itertools.chain(*collector.image_actions.values()))
    action_counter = Counter(all_actions)
    n_actions = len(action_counter.keys())

    for iou_threshold in iou_thresholds:
        metrics_per_class = evaluator.GetPascalVOCMetrics(
            all_boxes,
            IOUThreshold=iou_threshold,
            method=MethodAveragePrecision.EveryPointInterpolation
        )
        text_metrics = metrics_per_class[0]  # class = 'text'
        metrics = {
            'precision': text_metrics['precision'][-1],
            'recall': text_metrics['recall'][-1],
            'ap': text_metrics['AP'],
            'num_p_total': text_metrics['total positives'],
            'num_tp': text_metrics['total TP'],
            'num_fp': text_metrics['total FP'],
        }
        metrics['f1'] = f1(metrics['precision'], metrics['recall'])
        if len(collector.image_avg_iou) > 0:
            metrics['avg_iou'] = sum(list(collector.image_avg_iou.values())) / len(collector.image_avg_iou)
        else:
            metrics['avg_iou'] = 0

        metrics['total_actions'] = sum(list(collector.image_num_actions.values()))
        if len(collector.image_num_actions) > 0:
            metrics['avg_actions'] = sum(list(collector.image_num_actions.values())) / len(collector.image_num_actions)
        else:
            metrics['avg_actions'] = 0
        print(collector.image_num_actions_per_subepisode)
        avg_actions_subepisode = [sum(x) / len(x) if len(x) else 0 for x in collector.image_num_actions_per_subepisode.values()]
        print(avg_actions_subepisode)
        metrics['mean_avg_actions_subepisode'] = sum(avg_actions_subepisode) / len(avg_actions_subepisode)
        print(metrics['mean_avg_actions_subepisode'])

        for action, count in action_counter.items():
            action_name = str(action)
            metrics[f'total_action_{action_name}'] = count

        iou_metrics[iou_threshold] = metrics

    # Save metrics as CSV
    iou_metrics_df = pd.DataFrame.from_dict(iou_metrics, orient='index')
    iou_metrics_df.index.name = 'iou_threshold'
    iou_metrics_df.to_csv(os.path.join(eval_path, 'metrics.csv'))

    print("Generating plots")

    plots_path = os.path.join(eval_path, 'plots')
    ensure_folder(plots_path)

    # Histogram of agent's actions
    fig, ax = plt.subplots()
    ax.hist(all_actions, bins=n_actions, orientation='horizontal', color='#0504aa')
    ax.set(xlabel='Frequency (Total)', ylabel='Action', title='Agent Actions')
    fig.savefig(os.path.join(plots_path, 'action_hist.png'))

    # Precision-Recall curves at different IoU thresholds
    for iou_threshold in [0.5, 0.75]:
        iou_fname_str = str(iou_threshold).replace('.', '')
        plot_path = os.path.join(plots_path, f'ap_{iou_fname_str}')
        ensure_folder(plot_path)
        evaluator.PlotPrecisionRecallCurve(
            all_boxes,
            IOUThreshold=iou_threshold,
            method=MethodAveragePrecision.EveryPointInterpolation,
            showAP=True,
            showInterpolatedPrecision=True,
            savePath=plot_path,
            showGraphic=False
        )

    # Recall-IoU curve
    x = iou_metrics_df.index.values
    y = iou_metrics_df['recall'].values
    fig, ax = plt.subplots()
    ax.plot(x, y, '-o')
    ax.set(xlabel='Intersection over Union (IoU)', ylabel='Recall', title='Recall-IoU')
    ax.grid()
    fig.savefig(os.path.join(plots_path, 'recall_iou.png'))

    # Precision-IoU curve
    x = iou_metrics_df.index.values
    y = iou_metrics_df['precision'].values
    fig, ax = plt.subplots()
    ax.plot(x, y, '-o')
    ax.set(xlabel='Intersection over Union (IoU)', ylabel='Precision', title='Precision-IoU')
    ax.grid()
    fig.savefig(os.path.join(plots_path, 'precision_iou.png'))

    print("Drawing images with predictions and ground truths")

    images_path = os.path.join(eval_path, 'images')
    ensure_folder(images_path)

    for image_idx in range(sample_size):
        image_path, _ = dataset.get(image_idx, as_image=False)
        image_name = dataset.get_image_name(image_idx)
        image = cv2.imread(image_path)
        image = all_boxes.drawAllBoundingBoxes(image, image_name)
        image_fname = Path(image_path).name
        cv2.imwrite(os.path.join(images_path, image_fname), image)
        print('Image %s created successfully!' % image_name)
def pascalvoc_eval():
    iouThreshold = args.iouThreshold

    # Arguments validation
    errors = []
    # Validate formats
    gtFormat = ValidateFormats(args.gtFormat, '-gtformat', errors)
    detFormat = ValidateFormats(args.detFormat, '-detformat', errors)
    # Groundtruth folder
    if ValidateMandatoryArgs(args.gtFolder, '-gt/--gtfolder', errors):
        gtFolder = ValidatePaths(args.gtFolder, '-gt/--gtfolder', errors)
    else:
        # errors.pop()
        gtFolder = os.path.join(currentPath, 'groundtruths')
        if os.path.isdir(gtFolder) is False:
            errors.append('folder %s not found' % gtFolder)
    # Coordinates types
    gtCoordType = ValidateCoordinatesTypes(args.gtCoordinates,
                                           '-gtCoordinates', errors)
    detCoordType = ValidateCoordinatesTypes(args.detCoordinates,
                                            '-detCoordinates', errors)
    imgSize = (0, 0)
    if gtCoordType == CoordinatesType.Relative:  # Image size is required
        imgSize = ValidateImageSize(args.imgSize, '-imgsize', '-gtCoordinates',
                                    errors)
    if detCoordType == CoordinatesType.Relative:  # Image size is required
        imgSize = ValidateImageSize(args.imgSize, '-imgsize',
                                    '-detCoordinates', errors)
    # Detection folder
    if ValidateMandatoryArgs(args.detFolder, '-det/--detfolder', errors):
        detFolder = ValidatePaths(args.detFolder, '-det/--detfolder', errors)
    else:
        # errors.pop()
        detFolder = os.path.join(currentPath, 'detections')
        if os.path.isdir(detFolder) is False:
            errors.append('folder %s not found' % detFolder)
    if args.savePath is not None:
        savePath = ValidatePaths(args.savePath, '-sp/--savepath', errors)
    else:
        savePath = os.path.join(currentPath, 'results')
    # Validate savePath
    # If error, show error messages
    if len(errors) is not 0:
        print(
            """usage: Object Detection Metrics [-h] [-v] [-gt] [-det] [-t] [-gtformat]
                                [-detformat] [-save]""")
        print('Object Detection Metrics: error(s): ')
        [print(e) for e in errors]
        sys.exit()

    # Create directory to save results
    #shutil.rmtree(savePath, ignore_errors=True)  # Clear folder
    #os.makedirs(savePath)
    # Show plot during execution
    #showPlot = args.showPlot

    # print('iouThreshold= %f' % iouThreshold)
    # print('savePath = %s' % savePath)
    # print('gtFormat = %s' % gtFormat)
    # print('detFormat = %s' % detFormat)
    # print('gtFolder = %s' % gtFolder)
    # print('detFolder = %s' % detFolder)
    # print('gtCoordType = %s' % gtCoordType)
    # print('detCoordType = %s' % detCoordType)
    # print('showPlot %s' % showPlot)

    # Get groundtruth boxes
    allBoundingBoxes, allClasses = getBoundingBoxes(gtFolder,
                                                    True,
                                                    gtFormat,
                                                    gtCoordType,
                                                    imgSize=imgSize)
    # Get detected boxes
    allBoundingBoxes, allClasses = getBoundingBoxes(detFolder,
                                                    False,
                                                    detFormat,
                                                    detCoordType,
                                                    allBoundingBoxes,
                                                    allClasses,
                                                    imgSize=imgSize)
    allClasses.sort()

    evaluator = Evaluator()
    acc_AP = 0
    validClasses = 0

    # Plot Precision x Recall curve
    detections = evaluator.PlotPrecisionRecallCurve(
        allBoundingBoxes,  # Object containing all bounding boxes (ground truths and detections)
        IOUThreshold=iouThreshold,  # IOU threshold
        method=MethodAveragePrecision.EveryPointInterpolation,
        showAP=True,  # Show Average Precision in the title of the plot
        showInterpolatedPrecision=
        False,  # Don't plot the interpolated precision curve
        savePath=savePath,
        showGraphic=None)

    #f = open(os.path.join(savePath, 'results.txt'), 'w')
    #f.write('Object Detection Metrics\n')
    #f.write('https://github.com/rafaelpadilla/Object-Detection-Metrics\n\n\n')
    #f.write('Average Precision (AP), Precision and Recall per class:')

    AP_eval = []
    # each detection is a class
    for metricsPerClass in detections:

        # Get metric values per each class
        cl = metricsPerClass['class']
        ap = metricsPerClass['AP']
        precision = metricsPerClass['precision']
        recall = metricsPerClass['recall']
        totalPositives = metricsPerClass['total positives']
        total_TP = metricsPerClass['total TP']
        total_FP = metricsPerClass['total FP']

        if totalPositives > 0:
            validClasses = validClasses + 1
            acc_AP = acc_AP + ap
            prec = ['%.2f' % p for p in precision]
            rec = ['%.2f' % r for r in recall]
            ap_str = "{0:.2f}%".format(ap * 100)
            # ap_str = "{0:.4f}%".format(ap * 100)
            print('AP: %s (%s)' % (ap_str, cl))
            #f.write('\n\nClass: %s' % cl)
            #f.write('\nAP: %s' % ap_str)
            #f.write('\nPrecision: %s' % prec)
            #f.write('\nRecall: %s' % rec)
            AP_eval.append([cl, ap_str])

    mAP = acc_AP / validClasses
    mAP_str = "{0:.2f}%".format(mAP * 100)
    print('mAP: %s' % mAP_str)
    #f.write('\n\n\nmAP: %s' % mAP_str)
    return AP_eval, mAP
Ejemplo n.º 14
0
def com_mAP_own_folder(dict_mAP,detections_foldername='detections'):
    ##### Arguments validation #####
    errors = []
    # Validate formats
    gtFormat = ValidateFormats(args.gtFormat, '-gtformat', errors)
    detFormat = ValidateFormats(args.detFormat, '-detformat', errors)
    # Validate mandatory (paths)
    # currentPath = os.path.dirname(os.path.abspath(__file__))
    # Groundtruth folder
    if ValidateMandatoryArgs(args.gtFolder, '-gt/--gtfolder', errors):
        gtFolder = ValidatePaths(args.gtFolder, '-gt/--gtfolder', errors)
    else:
        errors.pop()
        gtFolder = os.path.join(currentPath, 'groundtruths')
        if os.path.isdir(gtFolder) == False:
            errors.append('folder %s not found' % gtFolder)
    # Coordinates types
    gtCoordType = ValidateCoordinatesTypes(args.gtCoordinates, '-gtCoordinates', errors)
    detCoordType = ValidateCoordinatesTypes(args.detCoordinates, '-detCoordinates', errors)
    if gtCoordType == CoordinatesType.Relative:  # Image size is required
        ValidateImageSize(args.imgSize, '-imgsize', '-gtCoordinates', errors)
    if detCoordType == CoordinatesType.Relative:  # Image size is required
        ValidateImageSize(args.imgSize, '-imgsize', '-detCoordinates', errors)
    # Detection folder
    if ValidateMandatoryArgs(args.detFolder, '-det/--detfolder', errors):
        detFolder = ValidatePaths(args.detFolder, '-det/--detfolder', errors)
    else:
        errors.pop()
        detFolder = os.path.join(currentPath, detections_foldername)
        if os.path.isdir(detFolder) == False:
            errors.append('folder %s not found' % detFolder)
    # Validate savePath
    if args.savePath != None:
        savePath = ValidatePaths(args.savePath, '-sp/--savepath', errors)
        savePath = os.path.join(args.savePath, 'results')
    else:
        savePath = os.path.join(currentPath, 'results')
    # If error, show error messages
    if len(errors) != 0:
        print("""usage: Object Detection Metrics [-h] [-v] [-gt] [-det] [-t] [-gtformat]
                                    [-detformat] [-save]""")
        print('Object Detection Metrics: error(s): ')
        [print(e) for e in errors]
        sys.exit()

    # Create directory to save results
    shutil.rmtree(savePath, ignore_errors=True)  # Clear folder
    # os.makedirs(savePath)
    # Show plot during execution
    showPlot = args.showPlot
    showPlot = False
    # print('iouThreshold= %f' % iouThreshold)
    # print('savePath = %s' % savePath)
    # print('gtFormat = %s' % gtFormat)
    # print('detFormat = %s' % detFormat)
    # print('gtFolder = %s' % gtFolder)
    # print('detFolder = %s' % detFolder)
    # print('gtCoordType = %s' % gtCoordType)
    # print('detCoordType = %s' % detCoordType)
    # print('showPlot %s' % showPlot)

    allBoundingBoxes, allClasses = getBoundingBoxes(gtFolder, True, gtFormat)
    allBoundingBoxes, allClasses = getBoundingBoxes(detFolder, False, detFormat, allBoundingBoxes, allClasses)
    allClasses.sort()

    if (not os.path.exists(savePath)):
        os.mkdir(savePath)

    f = open(os.path.join(savePath, 'results.txt'), 'w')

    evaluator = Evaluator()
    acc_AP = 0
    validClasses = 0
    # for each class
    for c in allClasses:
        # Plot Precision x Recall curve
        metricsPerClass = evaluator.PlotPrecisionRecallCurve(c,  # Class to show
                                                             allBoundingBoxes,
                                                             # Object containing all bounding boxes (ground truths and detections)
                                                             IOUThreshold=iouThreshold,  # IOU threshold
                                                             showAP=True,
                                                             # Show Average Precision in the title of the plot
                                                             showInterpolatedPrecision=False,
                                                             # Don't plot the interpolated precision curve
                                                             savePath=os.path.join(savePath, c + '.png'),
                                                             showGraphic=showPlot)
        # Get metric values per each class
        cl = metricsPerClass['class']
        ap = metricsPerClass['AP']
        precision = metricsPerClass['precision']
        recall = metricsPerClass['recall']
        totalPositives = metricsPerClass['total positives']
        total_TP = metricsPerClass['total TP']
        total_FP = metricsPerClass['total FP']

        if totalPositives > 0:
            validClasses = validClasses + 1
            acc_AP = acc_AP + ap
            prec = ['%.2f' % p for p in precision]
            rec = ['%.2f' % r for r in recall]
            ap_str = "{0:.2f}".format(ap * 100)
            # ap_str = str('%.2f' % ap) #AQUI
            # print('AP: %s (%s)' % (ap_str, cl))
            dict_mAP[cl] = round(ap * 100, 3)
            f.write('\n\nClass: %s' % cl)
            f.write('\nAP: %s' % ap_str)
            f.write('\nPrecision: %s' % prec)
            f.write('\nRecall: %s' % rec)

    mAP = acc_AP / validClasses
    mAP_str = "{0:.2f}%".format(mAP * 100)
    # print('mAP: %s' % mAP_str)
    f.write('\n\n\nmAP: %s' % mAP_str)
    f.close()
    ap_str = "{0:.2f}".format(mAP * 100)

    dict_mAP['mAP'] = round(mAP * 100, 3)
    return dict_mAP