Beispiel #1
0
def validate_data(gtFilePath, submFilePath, evaluationParams):
    """
    Method validate_data: validates that all files in the results folder are correct (have the correct name contents).
                            Validates also that there are no missing files in the folder.
                            If some error detected, the method raises the error
    """
    gt = rrc_evaluation_funcs.load_zip_file(
        gtFilePath, evaluationParams['GT_SAMPLE_NAME_2_ID'])

    subm = rrc_evaluation_funcs.load_zip_file(
        submFilePath, evaluationParams['DET_SAMPLE_NAME_2_ID'], True)

    #Validate format of GroundTruth
    for k in gt:
        rrc_evaluation_funcs.validate_lines_in_file(k, gt[k],
                                                    evaluationParams['CRLF'],
                                                    evaluationParams['LTRB'],
                                                    True)

    #Validate format of results
    for k in subm:
        if (k in gt) == False:
            raise Exception("The sample %s not present in GT" % k)

        rrc_evaluation_funcs.validate_lines_in_file(
            k, subm[k], evaluationParams['CRLF'], evaluationParams['LTRB'],
            False, evaluationParams['CONFIDENCES'])
Beispiel #2
0
def validate_data(gtFilePath, submFilePath, evaluationParams):
    """
    Method validate_data: validates that all files in the results folder are correct (have the correct name contents).
                            Validates also that there are no missing files in the folder.
                            If some error detected, the method raises the error
    """
    gt = rrc_evaluation_funcs.load_zip_file(
        gtFilePath, evaluationParams["GT_SAMPLE_NAME_2_ID"]
    )

    subm = rrc_evaluation_funcs.load_zip_file(
        submFilePath, evaluationParams["DET_SAMPLE_NAME_2_ID"], True
    )

    if len(subm) != len(gt):
        raise Exception(
            "The Det file is not valid (invalid number of files in ZIP. Expected:"
            + str(len(gt))
            + " Found:"
            + str(len(subm))
            + ")"
        )

    # Validate format of GroundTruth
    for k in gt:
        rrc_evaluation_funcs.validate_lines_in_file(
            k, gt[k], evaluationParams["CRLF"], False, True
        )

    # Validate format of results
    for k in subm:
        if (k in gt) == False:
            raise Exception("The sample %s not present in GT" % k)
Beispiel #3
0
def gt_zip_to_list(gtFilePath='./gt.zip'):
    """Convert the gt.zip from IDCAR into our format."""
    gt = rrc_evaluation_funcs.load_zip_file(
        gtFilePath, evaluationParams['GT_SAMPLE_NAME_2_ID'])

    points = []
    for resFile in gt:
        gtFile = rrc_evaluation_funcs.decode_utf8(gt[resFile])
        pointsList, _, transcriptionsList = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(
            gtFile, evaluationParams['CRLF'], evaluationParams['LTRB'], True,
            False)
        points.append(pointsList)

    return points
Beispiel #4
0
def evaluate_method(gtFilePath, submFilePath, evaluationParams):
    """
    Method evaluate_method: evaluate method and returns the results
        Results. Dictionary with the following values:
        - method (required)  Global method metrics. Ex: { 'Precision':0.8,'Recall':0.9 }
        - samples (optional) Per sample metrics. Ex: {'sample1' : { 'Precision':0.8,'Recall':0.9 } , 'sample2' : { 'Precision':0.8,'Recall':0.9 }
    """

    for module, alias in evaluation_imports().items():
        globals()[alias] = importlib.import_module(module)

    def polygon_from_points(points):
        """
        Returns a Polygon object to use with the Polygon2 class from a list of 8 points: x1,y1,x2,y2,x3,y3,x4,y4
        """
        resBoxes = np.empty([1, 8], dtype='int32')
        resBoxes[0, 0] = int(points[0])
        resBoxes[0, 4] = int(points[1])
        resBoxes[0, 1] = int(points[2])
        resBoxes[0, 5] = int(points[3])
        resBoxes[0, 2] = int(points[4])
        resBoxes[0, 6] = int(points[5])
        resBoxes[0, 3] = int(points[6])
        resBoxes[0, 7] = int(points[7])
        pointMat = resBoxes[0].reshape([2, 4]).T
        return plg.Polygon(pointMat)

    def rectangle_to_polygon(rect):
        resBoxes = np.empty([1, 8], dtype='int32')
        resBoxes[0, 0] = int(rect.xmin)
        resBoxes[0, 4] = int(rect.ymax)
        resBoxes[0, 1] = int(rect.xmin)
        resBoxes[0, 5] = int(rect.ymin)
        resBoxes[0, 2] = int(rect.xmax)
        resBoxes[0, 6] = int(rect.ymin)
        resBoxes[0, 3] = int(rect.xmax)
        resBoxes[0, 7] = int(rect.ymax)

        pointMat = resBoxes[0].reshape([2, 4]).T

        return plg.Polygon(pointMat)

    def rectangle_to_points(rect):
        points = [
            int(rect.xmin),
            int(rect.ymax),
            int(rect.xmax),
            int(rect.ymax),
            int(rect.xmax),
            int(rect.ymin),
            int(rect.xmin),
            int(rect.ymin)
        ]
        return points

    def get_union(pD, pG):
        areaA = pD.area()
        areaB = pG.area()
        return areaA + areaB - get_intersection(pD, pG)

    def get_intersection_over_union(pD, pG):
        try:
            return get_intersection(pD, pG) / get_union(pD, pG)
        except:
            return 0

    def get_intersection(pD, pG):
        pInt = pD & pG
        if len(pInt) == 0:
            return 0
        return pInt.area()

    def compute_ap(confList, matchList, numGtCare):
        correct = 0
        AP = 0
        if len(confList) > 0:
            confList = np.array(confList)
            matchList = np.array(matchList)
            sorted_ind = np.argsort(-confList)
            confList = confList[sorted_ind]
            matchList = matchList[sorted_ind]
            for n in range(len(confList)):
                match = matchList[n]
                if match:
                    correct += 1
                    AP += float(correct) / (n + 1)

            if numGtCare > 0:
                AP /= numGtCare

        return AP

    perSampleMetrics = {}

    matchedSum = 0

    Rectangle = namedtuple('Rectangle', 'xmin ymin xmax ymax')

    gt = rrc_evaluation_funcs.load_zip_file(
        gtFilePath, evaluationParams['GT_SAMPLE_NAME_2_ID'])
    subm = rrc_evaluation_funcs.load_zip_file(
        submFilePath, evaluationParams['DET_SAMPLE_NAME_2_ID'], True)

    numGlobalCareGt = 0
    numGlobalCareDet = 0

    arrGlobalConfidences = []
    arrGlobalMatches = []

    for resFile in gt:

        gtFile = rrc_evaluation_funcs.decode_utf8(gt[resFile])
        recall = 0
        precision = 0
        hmean = 0

        detMatched = 0

        iouMat = np.empty([1, 1])

        gtPols = []
        detPols = []

        gtPolPoints = []
        detPolPoints = []

        #Array of Ground Truth Polygons' keys marked as don't Care
        gtDontCarePolsNum = []
        #Array of Detected Polygons' matched with a don't Care GT
        detDontCarePolsNum = []

        pairs = []
        detMatchedNums = []

        arrSampleConfidences = []
        arrSampleMatch = []
        sampleAP = 0

        evaluationLog = ""

        pointsList, _, transcriptionsList = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(
            gtFile, evaluationParams['CRLF'], evaluationParams['LTRB'], True,
            False)
        for n in range(len(pointsList)):
            points = pointsList[n]
            transcription = transcriptionsList[n]
            dontCare = transcription == "###"
            if evaluationParams['LTRB']:
                gtRect = Rectangle(*points)
                gtPol = rectangle_to_polygon(gtRect)
            else:
                gtPol = polygon_from_points(points)
            gtPols.append(gtPol)
            gtPolPoints.append(points)
            if dontCare:
                gtDontCarePolsNum.append(len(gtPols) - 1)

        evaluationLog += "GT polygons: " + str(len(gtPols)) + (
            " (" + str(len(gtDontCarePolsNum)) +
            " don't care)\n" if len(gtDontCarePolsNum) > 0 else "\n")

        if resFile in subm:

            detFile = rrc_evaluation_funcs.decode_utf8(subm[resFile])

            pointsList, confidencesList, _ = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(
                detFile, evaluationParams['CRLF'], evaluationParams['LTRB'],
                False, evaluationParams['CONFIDENCES'])
            for n in range(len(pointsList)):
                points = pointsList[n]

                if evaluationParams['LTRB']:
                    detRect = Rectangle(*points)
                    detPol = rectangle_to_polygon(detRect)
                else:
                    detPol = polygon_from_points(points)
                detPols.append(detPol)
                detPolPoints.append(points)
                if len(gtDontCarePolsNum) > 0:
                    for dontCarePol in gtDontCarePolsNum:
                        dontCarePol = gtPols[dontCarePol]
                        intersected_area = get_intersection(
                            dontCarePol, detPol)
                        pdDimensions = detPol.area()
                        precision = 0 if pdDimensions == 0 else intersected_area / pdDimensions
                        if (precision >
                                evaluationParams['AREA_PRECISION_CONSTRAINT']):
                            detDontCarePolsNum.append(len(detPols) - 1)
                            break

            evaluationLog += "DET polygons: " + str(len(detPols)) + (
                " (" + str(len(detDontCarePolsNum)) +
                " don't care)\n" if len(detDontCarePolsNum) > 0 else "\n")

            if len(gtPols) > 0 and len(detPols) > 0:
                #Calculate IoU and precision matrixs
                outputShape = [len(gtPols), len(detPols)]
                iouMat = np.empty(outputShape)
                gtRectMat = np.zeros(len(gtPols), np.int8)
                detRectMat = np.zeros(len(detPols), np.int8)
                for gtNum in range(len(gtPols)):
                    for detNum in range(len(detPols)):
                        pG = gtPols[gtNum]
                        pD = detPols[detNum]
                        iouMat[gtNum,
                               detNum] = get_intersection_over_union(pD, pG)

                for gtNum in range(len(gtPols)):
                    for detNum in range(len(detPols)):
                        if gtRectMat[gtNum] == 0 and detRectMat[
                                detNum] == 0 and gtNum not in gtDontCarePolsNum and detNum not in detDontCarePolsNum:
                            if iouMat[gtNum, detNum] > evaluationParams[
                                    'IOU_CONSTRAINT']:
                                gtRectMat[gtNum] = 1
                                detRectMat[detNum] = 1
                                detMatched += 1
                                pairs.append({'gt': gtNum, 'det': detNum})
                                detMatchedNums.append(detNum)
                                evaluationLog += "Match GT #" + str(
                                    gtNum) + " with Det #" + str(detNum) + "\n"

            if evaluationParams['CONFIDENCES']:
                for detNum in range(len(detPols)):
                    if detNum not in detDontCarePolsNum:
                        #we exclude the don't care detections
                        match = detNum in detMatchedNums

                        arrSampleConfidences.append(confidencesList[detNum])
                        arrSampleMatch.append(match)

                        arrGlobalConfidences.append(confidencesList[detNum])
                        arrGlobalMatches.append(match)

        numGtCare = (len(gtPols) - len(gtDontCarePolsNum))
        numDetCare = (len(detPols) - len(detDontCarePolsNum))
        if numGtCare == 0:
            recall = float(1)
            precision = float(0) if numDetCare > 0 else float(1)
            sampleAP = precision
        else:
            recall = float(detMatched) / numGtCare
            precision = 0 if numDetCare == 0 else float(
                detMatched) / numDetCare
            if evaluationParams['CONFIDENCES'] and evaluationParams[
                    'PER_SAMPLE_RESULTS']:
                sampleAP = compute_ap(arrSampleConfidences, arrSampleMatch,
                                      numGtCare)

        hmean = 0 if (
            precision +
            recall) == 0 else 2.0 * precision * recall / (precision + recall)

        matchedSum += detMatched
        numGlobalCareGt += numGtCare
        numGlobalCareDet += numDetCare

        if evaluationParams['PER_SAMPLE_RESULTS']:
            perSampleMetrics[resFile] = {
                'precision': precision,
                'recall': recall,
                'hmean': hmean,
                'pairs': pairs,
                'AP': sampleAP,
                'iouMat': [] if len(detPols) > 100 else iouMat.tolist(),
                'gtPolPoints': gtPolPoints,
                'detPolPoints': detPolPoints,
                'gtDontCare': gtDontCarePolsNum,
                'detDontCare': detDontCarePolsNum,
                'evaluationParams': evaluationParams,
                'evaluationLog': evaluationLog
            }

    # Compute MAP and MAR
    AP = 0
    if evaluationParams['CONFIDENCES']:
        AP = compute_ap(arrGlobalConfidences, arrGlobalMatches,
                        numGlobalCareGt)

    methodRecall = 0 if numGlobalCareGt == 0 else float(
        matchedSum) / numGlobalCareGt
    methodPrecision = 0 if numGlobalCareDet == 0 else float(
        matchedSum) / numGlobalCareDet
    methodHmean = 0 if methodRecall + methodPrecision == 0 else 2 * methodRecall * methodPrecision / (
        methodRecall + methodPrecision)

    methodMetrics = {
        'precision': methodPrecision,
        'recall': methodRecall,
        'hmean': methodHmean,
        'AP': AP
    }

    resDict = {
        'calculated': True,
        'Message': '',
        'method': methodMetrics,
        'per_sample': perSampleMetrics
    }

    return resDict
Beispiel #5
0
def evaluate_method(gtFilePath, submFilePath, evaluationParams):
    """
    Method evaluate_method: evaluate method and returns the results
        Results. Dictionary with the following values:
        - method (required)  Global method metrics. Ex: { 'Precision':0.8,'Recall':0.9 }
        - samples (optional) Per sample metrics. Ex: {'sample1' : { 'Precision':0.8,'Recall':0.9 } , 'sample2' : { 'Precision':0.8,'Recall':0.9 }
    """

    for module, alias in evaluation_imports().items():
        globals()[alias] = importlib.import_module(module)

    def one_to_one_match(row, col):
        cont = 0
        for j in range(len(recallMat[0])):
            if recallMat[row, j] >= evaluationParams[
                    'AREA_RECALL_CONSTRAINT'] and precisionMat[
                        row,
                        j] >= evaluationParams['AREA_PRECISION_CONSTRAINT']:
                cont = cont + 1
        if (cont != 1):
            return False
        cont = 0
        for i in range(len(recallMat)):
            if recallMat[i, col] >= evaluationParams[
                    'AREA_RECALL_CONSTRAINT'] and precisionMat[
                        i,
                        col] >= evaluationParams['AREA_PRECISION_CONSTRAINT']:
                cont = cont + 1
        if (cont != 1):
            return False

        if recallMat[row, col] >= evaluationParams[
                'AREA_RECALL_CONSTRAINT'] and precisionMat[
                    row, col] >= evaluationParams['AREA_PRECISION_CONSTRAINT']:
            return True
        return False

    def num_overlaps_gt(gtNum):
        cont = 0
        for detNum in range(len(detRects)):
            if detNum not in detDontCareRectsNum:
                if recallMat[gtNum, detNum] > 0:
                    cont = cont + 1
        return cont

    def num_overlaps_det(detNum):
        cont = 0
        for gtNum in range(len(recallMat)):
            if gtNum not in gtDontCareRectsNum:
                if recallMat[gtNum, detNum] > 0:
                    cont = cont + 1
        return cont

    def is_single_overlap(row, col):
        if num_overlaps_gt(row) == 1 and num_overlaps_det(col) == 1:
            return True
        else:
            return False

    def one_to_many_match(gtNum):
        many_sum = 0
        detRects = []
        for detNum in range(len(recallMat[0])):
            if gtRectMat[gtNum] == 0 and detRectMat[
                    detNum] == 0 and detNum not in detDontCareRectsNum:
                if precisionMat[gtNum, detNum] >= evaluationParams[
                        'AREA_PRECISION_CONSTRAINT']:
                    many_sum += recallMat[gtNum, detNum]
                    detRects.append(detNum)
        if round(many_sum, 4) >= evaluationParams['AREA_RECALL_CONSTRAINT']:
            return True, detRects
        else:
            return False, []

    def many_to_one_match(detNum):
        many_sum = 0
        gtRects = []
        for gtNum in range(len(recallMat)):
            if gtRectMat[gtNum] == 0 and detRectMat[
                    detNum] == 0 and gtNum not in gtDontCareRectsNum:
                if recallMat[
                        gtNum,
                        detNum] >= evaluationParams['AREA_RECALL_CONSTRAINT']:
                    many_sum += precisionMat[gtNum, detNum]
                    gtRects.append(gtNum)
        if round(many_sum, 4) >= evaluationParams['AREA_PRECISION_CONSTRAINT']:
            return True, gtRects
        else:
            return False, []

    def area(a, b):
        dx = min(a.xmax, b.xmax) - max(a.xmin, b.xmin) + 1
        dy = min(a.ymax, b.ymax) - max(a.ymin, b.ymin) + 1
        if (dx >= 0) and (dy >= 0):
            return dx * dy
        else:
            return 0.

    def center(r):
        x = float(r.xmin) + float(r.xmax - r.xmin + 1) / 2.
        y = float(r.ymin) + float(r.ymax - r.ymin + 1) / 2.
        return Point(x, y)

    def point_distance(r1, r2):
        distx = math.fabs(r1.x - r2.x)
        disty = math.fabs(r1.y - r2.y)
        return math.sqrt(distx * distx + disty * disty)

    def center_distance(r1, r2):
        return point_distance(center(r1), center(r2))

    def diag(r):
        w = (r.xmax - r.xmin + 1)
        h = (r.ymax - r.ymin + 1)
        return math.sqrt(h * h + w * w)

    def rectangle_to_points(rect):
        points = [
            int(rect.xmin),
            int(rect.ymax),
            int(rect.xmax),
            int(rect.ymax),
            int(rect.xmax),
            int(rect.ymin),
            int(rect.xmin),
            int(rect.ymin)
        ]
        return points

    perSampleMetrics = {}

    methodRecallSum = 0
    methodPrecisionSum = 0

    Rectangle = namedtuple('Rectangle', 'xmin ymin xmax ymax')
    Point = namedtuple('Point', 'x y')

    gt = rrc_evaluation_funcs.load_zip_file(
        gtFilePath, evaluationParams['GT_SAMPLE_NAME_2_ID'])
    subm = rrc_evaluation_funcs.load_zip_file(
        submFilePath, evaluationParams['DET_SAMPLE_NAME_2_ID'], True)

    numGt = 0
    numDet = 0

    for resFile in gt:

        gtFile = rrc_evaluation_funcs.decode_utf8(gt[resFile])
        recall = 0
        precision = 0
        hmean = 0
        recallAccum = 0.
        precisionAccum = 0.
        gtRects = []
        detRects = []
        gtPolPoints = []
        detPolPoints = []
        gtDontCareRectsNum = [
        ]  #Array of Ground Truth Rectangles' keys marked as don't Care
        detDontCareRectsNum = [
        ]  #Array of Detected Rectangles' matched with a don't Care GT
        pairs = []
        evaluationLog = ""

        recallMat = np.empty([1, 1])
        precisionMat = np.empty([1, 1])

        pointsList, _, transcriptionsList = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(
            gtFile, evaluationParams['CRLF'], True, True, False)
        for n in range(len(pointsList)):
            points = pointsList[n]
            transcription = transcriptionsList[n]
            dontCare = transcription == "###"
            gtRect = Rectangle(*points)
            gtRects.append(gtRect)
            gtPolPoints.append(points)
            if dontCare:
                gtDontCareRectsNum.append(len(gtRects) - 1)

        evaluationLog += "GT rectangles: " + str(len(gtRects)) + (
            " (" + str(len(gtDontCareRectsNum)) +
            " don't care)\n" if len(gtDontCareRectsNum) > 0 else "\n")

        if resFile in subm:
            detFile = rrc_evaluation_funcs.decode_utf8(subm[resFile])
            pointsList, _, _ = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(
                detFile, evaluationParams['CRLF'], True, False, False)
            for n in range(len(pointsList)):
                points = pointsList[n]
                detRect = Rectangle(*points)
                detRects.append(detRect)
                detPolPoints.append(points)
                if len(gtDontCareRectsNum) > 0:
                    for dontCareRectNum in gtDontCareRectsNum:
                        dontCareRect = gtRects[dontCareRectNum]
                        intersected_area = area(dontCareRect, detRect)
                        rdDimensions = ((detRect.xmax - detRect.xmin + 1) *
                                        (detRect.ymax - detRect.ymin + 1))
                        if (rdDimensions == 0):
                            precision = 0
                        else:
                            precision = intersected_area / rdDimensions
                        if (precision >
                                evaluationParams['AREA_PRECISION_CONSTRAINT']):
                            detDontCareRectsNum.append(len(detRects) - 1)
                            break

            evaluationLog += "DET rectangles: " + str(len(detRects)) + (
                " (" + str(len(detDontCareRectsNum)) +
                " don't care)\n" if len(detDontCareRectsNum) > 0 else "\n")

            if len(gtRects) == 0:
                recall = 1
                precision = 0 if len(detRects) > 0 else 1

            if len(detRects) > 0:
                #Calculate recall and precision matrixs
                outputShape = [len(gtRects), len(detRects)]
                recallMat = np.empty(outputShape)
                precisionMat = np.empty(outputShape)
                gtRectMat = np.zeros(len(gtRects), np.int8)
                detRectMat = np.zeros(len(detRects), np.int8)
                for gtNum in range(len(gtRects)):
                    for detNum in range(len(detRects)):
                        rG = gtRects[gtNum]
                        rD = detRects[detNum]
                        intersected_area = area(rG, rD)
                        rgDimensions = ((rG.xmax - rG.xmin + 1) *
                                        (rG.ymax - rG.ymin + 1))
                        rdDimensions = ((rD.xmax - rD.xmin + 1) *
                                        (rD.ymax - rD.ymin + 1))
                        recallMat[
                            gtNum,
                            detNum] = 0 if rgDimensions == 0 else intersected_area / rgDimensions
                        precisionMat[
                            gtNum,
                            detNum] = 0 if rdDimensions == 0 else intersected_area / rdDimensions

                # Find one-to-one matches
                evaluationLog += "Find one-to-one matches\n"
                for gtNum in range(len(gtRects)):
                    for detNum in range(len(detRects)):
                        if gtRectMat[gtNum] == 0 and detRectMat[
                                detNum] == 0 and gtNum not in gtDontCareRectsNum and detNum not in detDontCareRectsNum:
                            match = one_to_one_match(gtNum, detNum)
                            if match is True:
                                #in deteval we have to make other validation before mark as one-to-one
                                if is_single_overlap(gtNum, detNum) is True:
                                    rG = gtRects[gtNum]
                                    rD = detRects[detNum]
                                    normDist = center_distance(rG, rD)
                                    normDist /= diag(rG) + diag(rD)
                                    normDist *= 2.0
                                    if normDist < evaluationParams[
                                            'EV_PARAM_IND_CENTER_DIFF_THR']:
                                        gtRectMat[gtNum] = 1
                                        detRectMat[detNum] = 1
                                        recallAccum += evaluationParams[
                                            'MTYPE_OO_O']
                                        precisionAccum += evaluationParams[
                                            'MTYPE_OO_O']
                                        pairs.append({
                                            'gt': gtNum,
                                            'det': detNum,
                                            'type': 'OO'
                                        })
                                        evaluationLog += "Match GT #" + str(
                                            gtNum) + " with Det #" + str(
                                                detNum) + "\n"
                                    else:
                                        evaluationLog += "Match Discarded GT #" + str(
                                            gtNum) + " with Det #" + str(
                                                detNum) + " normDist: " + str(
                                                    normDist) + " \n"
                                else:
                                    evaluationLog += "Match Discarded GT #" + str(
                                        gtNum) + " with Det #" + str(
                                            detNum) + " not single overlap\n"
                # Find one-to-many matches
                evaluationLog += "Find one-to-many matches\n"
                for gtNum in range(len(gtRects)):
                    if gtNum not in gtDontCareRectsNum:
                        match, matchesDet = one_to_many_match(gtNum)
                        if match is True:
                            evaluationLog += "num_overlaps_gt=" + str(
                                num_overlaps_gt(gtNum))
                            #in deteval we have to make other validation before mark as one-to-one
                            if num_overlaps_gt(gtNum) >= 2:
                                gtRectMat[gtNum] = 1
                                recallAccum += (evaluationParams['MTYPE_OO_O']
                                                if len(matchesDet) == 1 else
                                                evaluationParams['MTYPE_OM_O'])
                                precisionAccum += (
                                    evaluationParams['MTYPE_OO_O']
                                    if len(matchesDet) == 1 else
                                    evaluationParams['MTYPE_OM_O'] *
                                    len(matchesDet))
                                pairs.append({
                                    'gt':
                                    gtNum,
                                    'det':
                                    matchesDet,
                                    'type':
                                    'OO' if len(matchesDet) == 1 else 'OM'
                                })
                                for detNum in matchesDet:
                                    detRectMat[detNum] = 1
                                evaluationLog += "Match GT #" + str(
                                    gtNum) + " with Det #" + str(
                                        matchesDet) + "\n"
                            else:
                                evaluationLog += "Match Discarded GT #" + str(
                                    gtNum) + " with Det #" + str(
                                        matchesDet) + " not single overlap\n"

                # Find many-to-one matches
                evaluationLog += "Find many-to-one matches\n"
                for detNum in range(len(detRects)):
                    if detNum not in detDontCareRectsNum:
                        match, matchesGt = many_to_one_match(detNum)
                        if match is True:
                            #in deteval we have to make other validation before mark as one-to-one
                            if num_overlaps_det(detNum) >= 2:
                                detRectMat[detNum] = 1
                                recallAccum += (
                                    evaluationParams['MTYPE_OO_O']
                                    if len(matchesGt) == 1 else
                                    evaluationParams['MTYPE_OM_M'] *
                                    len(matchesGt))
                                precisionAccum += (
                                    evaluationParams['MTYPE_OO_O']
                                    if len(matchesGt) == 1 else
                                    evaluationParams['MTYPE_OM_M'])
                                pairs.append({
                                    'gt':
                                    matchesGt,
                                    'det':
                                    detNum,
                                    'type':
                                    'OO' if len(matchesGt) == 1 else 'MO'
                                })
                                for gtNum in matchesGt:
                                    gtRectMat[gtNum] = 1
                                evaluationLog += "Match GT #" + str(
                                    matchesGt) + " with Det #" + str(
                                        detNum) + "\n"
                            else:
                                evaluationLog += "Match Discarded GT #" + str(
                                    matchesGt) + " with Det #" + str(
                                        detNum) + " not single overlap\n"

                numGtCare = (len(gtRects) - len(gtDontCareRectsNum))
                if numGtCare == 0:
                    recall = float(1)
                    precision = float(0) if len(detRects) > 0 else float(1)
                else:
                    recall = float(recallAccum) / numGtCare
                    precision = float(0) if (
                        len(detRects) - len(detDontCareRectsNum)
                    ) == 0 else float(precisionAccum) / (
                        len(detRects) - len(detDontCareRectsNum))
                hmean = 0 if (precision +
                              recall) == 0 else 2.0 * precision * recall / (
                                  precision + recall)

        methodRecallSum += recallAccum
        methodPrecisionSum += precisionAccum
        numGt += len(gtRects) - len(gtDontCareRectsNum)
        numDet += len(detRects) - len(detDontCareRectsNum)

        perSampleMetrics[resFile] = {
            'precision': precision,
            'recall': recall,
            'hmean': hmean,
            'pairs': pairs,
            'recallMat': [] if len(detRects) > 100 else recallMat.tolist(),
            'precisionMat':
            [] if len(detRects) > 100 else precisionMat.tolist(),
            'gtPolPoints': gtPolPoints,
            'detPolPoints': detPolPoints,
            'gtDontCare': gtDontCareRectsNum,
            'detDontCare': detDontCareRectsNum,
            'evaluationParams': evaluationParams,
            'evaluationLog': evaluationLog
        }

    methodRecall = 0 if numGt == 0 else methodRecallSum / numGt
    methodPrecision = 0 if numDet == 0 else methodPrecisionSum / numDet
    methodHmean = 0 if methodRecall + methodPrecision == 0 else 2 * methodRecall * methodPrecision / (
        methodRecall + methodPrecision)

    methodMetrics = {
        'precision': methodPrecision,
        'recall': methodRecall,
        'hmean': methodHmean
    }

    resDict = {
        'calculated': True,
        'Message': '',
        'method': methodMetrics,
        'per_sample': perSampleMetrics
    }

    return resDict
Beispiel #6
0
def evaluate_method(gtFilePath, submFilePath, evaluationParams):
    """
    Method evaluate_method: evaluate method and returns the results
        Results. Dictionary with the following values:
        - method (required)  Global method metrics. Ex: { 'Precision':0.8,'Recall':0.9 }
        - samples (optional) Per sample metrics. Ex: {'sample1' : { 'Precision':0.8,'Recall':0.9 } , 'sample2' : { 'Precision':0.8,'Recall':0.9 }
    """  
    for module,alias in evaluation_imports().items():
        globals()[alias] = importlib.import_module(module)

    def polygon_from_points(points,correctOffset=False):
        """
        Returns a Polygon object to use with the Polygon2 class from a list of 8 points: x1,y1,x2,y2,x3,y3,x4,y4
        """        
        
        if correctOffset: #this will substract 1 from the coordinates that correspond to the xmax and ymax
            points[2] -= 1
            points[4] -= 1
            points[5] -= 1
            points[7] -= 1
            
        resBoxes=np.empty([1,8],dtype='int32')
        resBoxes[0,0]=int(points[0])
        resBoxes[0,4]=int(points[1])
        resBoxes[0,1]=int(points[2])
        resBoxes[0,5]=int(points[3])
        resBoxes[0,2]=int(points[4])
        resBoxes[0,6]=int(points[5])
        resBoxes[0,3]=int(points[6])
        resBoxes[0,7]=int(points[7])
        pointMat = resBoxes[0].reshape([2,4]).T
        return plg.Polygon( pointMat)

    def rectangle_to_polygon(rect):
        resBoxes=np.empty([1,8],dtype='int32')
        resBoxes[0,0]=int(rect.xmin)
        resBoxes[0,4]=int(rect.ymax)
        resBoxes[0,1]=int(rect.xmin)
        resBoxes[0,5]=int(rect.ymin)
        resBoxes[0,2]=int(rect.xmax)
        resBoxes[0,6]=int(rect.ymin)
        resBoxes[0,3]=int(rect.xmax)
        resBoxes[0,7]=int(rect.ymax)

        pointMat = resBoxes[0].reshape([2,4]).T
        
        return plg.Polygon( pointMat)
    
    def rectangle_to_points(rect):
        points = [int(rect.xmin), int(rect.ymax), int(rect.xmax), int(rect.ymax), int(rect.xmax), int(rect.ymin), int(rect.xmin), int(rect.ymin)]
        return points
        
    def get_union(pD,pG):
        areaA = pD.area();
        areaB = pG.area();
        return areaA + areaB - get_intersection(pD, pG);
        
    def get_intersection_over_union(pD,pG):
        try:
            return get_intersection(pD, pG) / get_union(pD, pG);
        except:
            return 0
        
    def get_intersection(pD,pG):
        pInt = pD & pG
        if len(pInt) == 0:
            return 0
        return pInt.area()
    
    def compute_ap(confList, matchList,numGtCare):
        correct = 0
        AP = 0
        if len(confList)>0:
            confList = np.array(confList)
            matchList = np.array(matchList)
            sorted_ind = np.argsort(-confList)
            confList = confList[sorted_ind]
            matchList = matchList[sorted_ind]
            for n in range(len(confList)):
                match = matchList[n]
                if match:
                    correct += 1
                    AP += float(correct)/(n + 1)

            if numGtCare>0:
                AP /= numGtCare
            
        return AP  
    
    def transcription_match(transGt,transDet,specialCharacters='!?.:,*"()·[]/\'',onlyRemoveFirstLastCharacterGT=True):
        
        if onlyRemoveFirstLastCharacterGT:
            #special characters in GT are allowed only at initial or final position
            if (transGt==transDet):
                return True        

            if specialCharacters.find(transGt[0])>-1:
                if transGt[1:]==transDet:
                    return True

            if specialCharacters.find(transGt[-1])>-1:
                if transGt[0:len(transGt)-1]==transDet:
                    return True

            if specialCharacters.find(transGt[0])>-1 and specialCharacters.find(transGt[-1])>-1:
                if transGt[1:len(transGt)-1]==transDet:
                    return True
            return False
        else:
            #Special characters are removed from the begining and the end of both Detection and GroundTruth
            while len(transGt)>0 and specialCharacters.find(transGt[0])>-1:
                transGt = transGt[1:]
				
            while len(transDet)>0 and specialCharacters.find(transDet[0])>-1:
                transDet = transDet[1:]
                
            while len(transGt)>0 and specialCharacters.find(transGt[-1])>-1 :
                transGt = transGt[0:len(transGt)-1]
                
            while len(transDet)>0 and specialCharacters.find(transDet[-1])>-1:
                transDet = transDet[0:len(transDet)-1]
                
            return transGt == transDet
                    
    
    def include_in_dictionary(transcription):
        """
        Function used in Word Spotting that finds if the Ground Truth transcription meets the rules to enter into the dictionary. If not, the transcription will be cared as don't care
        """        
        #special case 's at final
        if transcription[len(transcription)-2:]=="'s" or transcription[len(transcription)-2:]=="'S":
            transcription = transcription[0:len(transcription)-2]
        
        #hypens at init or final of the word
        transcription = transcription.strip('-');
        
        specialCharacters = "'!?.:,*\"()·[]/";
        for character in specialCharacters:
            transcription = transcription.replace(character,' ')
        
        transcription = transcription.strip()
        
        if len(transcription) != len(transcription.replace(" ","")) :
            return False;
        
        if len(transcription) < evaluationParams['MIN_LENGTH_CARE_WORD']:
            return False;
        
        notAllowed = "×÷·";
        
        range1 = [ ord(u'a'), ord(u'z') ]
        range2 = [ ord(u'A'), ord(u'Z') ]
        range3 = [ ord(u'À'), ord(u'ƿ') ]
        range4 = [ ord(u'DŽ'), ord(u'ɿ') ]
        range5 = [ ord(u'Ά'), ord(u'Ͽ') ]
        range6 = [ ord(u'-'), ord(u'-') ]
        
        for char in transcription :
            charCode = ord(char)
            if(notAllowed.find(char) != -1):
                return False
            
            valid = ( charCode>=range1[0] and charCode<=range1[1] ) or ( charCode>=range2[0] and charCode<=range2[1] ) or ( charCode>=range3[0] and charCode<=range3[1] ) or ( charCode>=range4[0] and charCode<=range4[1] ) or ( charCode>=range5[0] and charCode<=range5[1] ) or ( charCode>=range6[0] and charCode<=range6[1] )
            if valid == False:
                return False
        
        return True
    
    def include_in_dictionary_transcription(transcription):
        """
        Function applied to the Ground Truth transcriptions used in Word Spotting. It removes special characters or terminations
        """
        #special case 's at final
        if transcription[len(transcription)-2:]=="'s" or transcription[len(transcription)-2:]=="'S":
            transcription = transcription[0:len(transcription)-2]
        
        #hypens at init or final of the word
        transcription = transcription.strip('-');            
        
        specialCharacters = "'!?.:,*\"()·[]/";
        for character in specialCharacters:
            transcription = transcription.replace(character,' ')
        
        transcription = transcription.strip()
        
        return transcription
    
    perSampleMetrics = {}
    
    matchedSum = 0
    
    Rectangle = namedtuple('Rectangle', 'xmin ymin xmax ymax')
    
    gt = rrc_evaluation_funcs.load_zip_file(gtFilePath,evaluationParams['GT_SAMPLE_NAME_2_ID'])
    subm = rrc_evaluation_funcs.load_zip_file(submFilePath,evaluationParams['DET_SAMPLE_NAME_2_ID'],True)
   
    numGlobalCareGt = 0;
    numGlobalCareDet = 0;
   
    arrGlobalConfidences = [];
    arrGlobalMatches = [];

    for resFile in gt:
        
        gtFile = rrc_evaluation_funcs.decode_utf8(gt[resFile])
        if (gtFile is None) :
            raise Exception("The file %s is not UTF-8" %resFile)        

        recall = 0
        precision = 0
        hmean = 0    
        detCorrect = 0
        iouMat = np.empty([1,1])
        gtPols = []
        detPols = []
        gtTrans = []
        detTrans = []
        gtPolPoints = []
        detPolPoints = []  
        gtDontCarePolsNum = [] #Array of Ground Truth Polygons' keys marked as don't Care
        detDontCarePolsNum = [] #Array of Detected Polygons' matched with a don't Care GT
        detMatchedNums = []
        pairs = []
        
        arrSampleConfidences = [];
        arrSampleMatch = [];
        sampleAP = 0;
        
        evaluationLog = ""

        pointsList,_,transcriptionsList = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(gtFile,evaluationParams['CRLF'],evaluationParams['LTRB'],True,False)
        for n in range(len(pointsList)):
            points = pointsList[n]
            transcription = transcriptionsList[n]
            dontCare = transcription == "###"
            if evaluationParams['LTRB']:
                gtRect = Rectangle(*points)
                gtPol = rectangle_to_polygon(gtRect)
            else:
                gtPol = polygon_from_points(points)
            gtPols.append(gtPol)
            gtPolPoints.append(points)

            #On word spotting we will filter some transcriptions with special characters
            if evaluationParams['WORD_SPOTTING'] :
                if dontCare == False : 
                    if include_in_dictionary(transcription) == False : 
                        dontCare = True
                    else:
                        transcription = include_in_dictionary_transcription(transcription)

            gtTrans.append(transcription)
            if dontCare:
                gtDontCarePolsNum.append( len(gtPols)-1 ) 

        evaluationLog += "GT polygons: " + str(len(gtPols)) + (" (" + str(len(gtDontCarePolsNum)) + " don't care)\n" if len(gtDontCarePolsNum)>0 else "\n")
        
        if resFile in subm:
            
            detFile = rrc_evaluation_funcs.decode_utf8(subm[resFile]) 
                    
            pointsList,confidencesList,transcriptionsList = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(detFile,evaluationParams['CRLF'],evaluationParams['LTRB'],True,evaluationParams['CONFIDENCES'])
            
            for n in range(len(pointsList)):
                points = pointsList[n]
                transcription = transcriptionsList[n]
                
                if evaluationParams['LTRB']:
                    detRect = Rectangle(*points)
                    detPol = rectangle_to_polygon(detRect)
                else:                    
                    detPol = polygon_from_points(points)
                detPols.append(detPol)
                detPolPoints.append(points)
                detTrans.append(transcription)

                if len(gtDontCarePolsNum)>0 :
                    for dontCarePol in gtDontCarePolsNum:
                        dontCarePol = gtPols[dontCarePol]
                        intersected_area = get_intersection(dontCarePol,detPol)
                        pdDimensions = detPol.area()
                        precision = 0 if pdDimensions == 0 else intersected_area / pdDimensions
                        if (precision > evaluationParams['AREA_PRECISION_CONSTRAINT'] ):
                            detDontCarePolsNum.append( len(detPols)-1 )
                            break
                            
            evaluationLog += "DET polygons: " + str(len(detPols)) + (" (" + str(len(detDontCarePolsNum)) + " don't care)\n" if len(detDontCarePolsNum)>0 else "\n")
            
            if len(gtPols)>0 and len(detPols)>0:
                #Calculate IoU and precision matrixs
                outputShape=[len(gtPols),len(detPols)]
                iouMat = np.empty(outputShape)
                gtRectMat = np.zeros(len(gtPols),np.int8)
                detRectMat = np.zeros(len(detPols),np.int8)
                for gtNum in range(len(gtPols)):
                    for detNum in range(len(detPols)):
                        pG = gtPols[gtNum]
                        pD = detPols[detNum]
                        iouMat[gtNum,detNum] = get_intersection_over_union(pD,pG)

                for gtNum in range(len(gtPols)):
                    for detNum in range(len(detPols)):
                        if gtRectMat[gtNum] == 0 and detRectMat[detNum] == 0 and gtNum not in gtDontCarePolsNum and detNum not in detDontCarePolsNum :
                            if iouMat[gtNum,detNum]>evaluationParams['IOU_CONSTRAINT']:
                                gtRectMat[gtNum] = 1
                                detRectMat[detNum] = 1
                                #detection matched only if transcription is equal
                                if evaluationParams['WORD_SPOTTING']:
                                    correct = gtTrans[gtNum].upper() == detTrans[detNum].upper()
                                else:
                                    correct = transcription_match(gtTrans[gtNum].upper(),detTrans[detNum].upper(),evaluationParams['SPECIAL_CHARACTERS'],evaluationParams['ONLY_REMOVE_FIRST_LAST_CHARACTER'])==True
                                detCorrect += (1 if correct else 0)
                                if correct:
                                    detMatchedNums.append(detNum)
                                pairs.append({'gt':gtNum,'det':detNum,'correct':correct})
                                evaluationLog += "Match GT #" + str(gtNum) + " with Det #" + str(detNum) + " trans. correct: " + str(correct) + "\n"
                                
            if evaluationParams['CONFIDENCES']:
                for detNum in range(len(detPols)):
                    if detNum not in detDontCarePolsNum :
                        #we exclude the don't care detections
                        match = detNum in detMatchedNums

                        arrSampleConfidences.append(confidencesList[detNum])
                        arrSampleMatch.append(match)

                        arrGlobalConfidences.append(confidencesList[detNum]);
                        arrGlobalMatches.append(match);                                
                
        numGtCare = (len(gtPols) - len(gtDontCarePolsNum))
        numDetCare = (len(detPols) - len(detDontCarePolsNum))
        if numGtCare == 0:
            recall = float(1)
            precision = float(0) if numDetCare >0 else float(1)
            sampleAP = precision
        else:
            recall = float(detCorrect) / numGtCare
            precision = 0 if numDetCare==0 else float(detCorrect) / numDetCare
            if evaluationParams['CONFIDENCES']:
                sampleAP = compute_ap(arrSampleConfidences, arrSampleMatch, numGtCare )                    

        hmean = 0 if (precision + recall)==0 else 2.0 * precision * recall / (precision + recall)
            
        matchedSum += detCorrect
        numGlobalCareGt += numGtCare
        numGlobalCareDet += numDetCare

        perSampleMetrics[resFile] = {
                                        'precision':precision,
                                        'recall':recall,
                                        'hmean':hmean,
                                        'pairs':pairs,
                                        'AP':sampleAP,
                                        'iouMat':[] if len(detPols)>100 else iouMat.tolist(),
                                        'gtPolPoints':gtPolPoints,
                                        'detPolPoints':detPolPoints,
                                        'gtTrans':gtTrans,
                                        'detTrans':detTrans,
                                        'gtDontCare':gtDontCarePolsNum,
                                        'detDontCare':detDontCarePolsNum,
                                        'evaluationParams': evaluationParams,
                                        'evaluationLog': evaluationLog     
                                    }
        
    # Compute AP
    AP = 0
    if evaluationParams['CONFIDENCES']:
        AP = compute_ap(arrGlobalConfidences, arrGlobalMatches, numGlobalCareGt)

    methodRecall = 0 if numGlobalCareGt == 0 else float(matchedSum)/numGlobalCareGt
    methodPrecision = 0 if numGlobalCareDet == 0 else float(matchedSum)/numGlobalCareDet
    methodHmean = 0 if methodRecall + methodPrecision==0 else 2* methodRecall * methodPrecision / (methodRecall + methodPrecision)
    
    methodMetrics = {'precision':methodPrecision, 'recall':methodRecall,'hmean': methodHmean, 'AP': AP  }

    resDict = {'calculated':True,'Message':'','method': methodMetrics,'per_sample': perSampleMetrics}
    
    
    return resDict;
Beispiel #7
0
def evaluate_method(gtFilePath, submFilePath, evaluationParams):
    """
    Method evaluate_method: evaluate method and returns the results
        Results. Dictionary with the following values:
        - method (required)  Global method metrics. Ex: { 'Precision':0.8,'Recall':0.9 }
        - samples (optional) Per sample metrics. Ex: {'sample1' : { 'Precision':0.8,'Recall':0.9 } , 'sample2' : { 'Precision':0.8,'Recall':0.9 }
    """

    for module, alias in evaluation_imports().items():
        globals()[alias] = importlib.import_module(module)

    def is_latin(s):
        try:
            s.encode(encoding="utf-8").decode("ascii")
        except UnicodeDecodeError:
            return False
        else:
            return True

    perSampleMetrics = {}

    methodRecallSum = 0
    methodPrecisionSum = 0

    gt = rrc_evaluation_funcs.load_zip_file(
        gtFilePath, evaluationParams["GT_SAMPLE_NAME_2_ID"]
    )
    subm = rrc_evaluation_funcs.load_zip_file(
        submFilePath, evaluationParams["DET_SAMPLE_NAME_2_ID"], True
    )

    numGt = 0
    numDet = 0

    for resFile in gt:

        gtFile = rrc_evaluation_funcs.decode_utf8(gt[resFile])

        _, _, gtWordsLine = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(
            gtFile, evaluationParams["CRLF"], False, True, False
        )

        gtWords = (" ").join(gtWordsLine).split(" ")

        gtNumWords = len(gtWords)
        detWords = []
        detNumWords = 0

        recall = 0
        precision = 0
        hmean = 0
        recallAccum = 0.0
        precisionAccum = 0.0

        log = ""

        if resFile in subm:
            detFile = rrc_evaluation_funcs.decode_utf8(subm[resFile])
            detLines = detFile.split("\n")
            for line in detLines:
                line = line.replace("\r", "").replace("\n", "")
                if line != "":
                    detWords.append(line)

            detNumWords = len(detWords)
            for word in detWords:
                log += "<br>det word = " + word + " "
                if word in gtWords:
                    log += "found"
                    recallAccum += 1
                    precisionAccum += 1
                    gtWords.remove(word)
                else:
                    log += "not found"

        precision = (
            float(0) if detNumWords == 0 else float(precisionAccum) / detNumWords
        )
        recall = float(1) if gtNumWords == 0 else float(recallAccum) / gtNumWords
        hmean = (
            0
            if (precision + recall) == 0
            else 2.0 * precision * recall / (precision + recall)
        )

        methodRecallSum += recallAccum
        methodPrecisionSum += precisionAccum
        numGt += gtNumWords
        numDet += detNumWords

        perSampleMetrics[resFile] = {
            "precision": precision,
            "recall": recall,
            "hmean": hmean,
            "gtWords": gtNumWords,
            "detWords": detNumWords,
            "correct": recallAccum,
            "log": log,
        }

    methodRecall = 0 if numGt == 0 else methodRecallSum / numGt
    methodPrecision = 0 if numDet == 0 else methodPrecisionSum / numDet
    methodHmean = (
        0
        if methodRecall + methodPrecision == 0
        else 2 * methodRecall * methodPrecision / (methodRecall + methodPrecision)
    )

    methodMetrics = {
        "precision": methodPrecision,
        "recall": methodRecall,
        "hmean": methodHmean,
    }

    resDict = {
        "calculated": True,
        "Message": "",
        "method": methodMetrics,
        "per_sample": perSampleMetrics,
    }

    return resDict