コード例 #1
0
ファイル: evaluate.py プロジェクト: rohanKF/UAVidToolKit
def getConfusionMatrixForImageList(classNum,
                                   predfileList,
                                   truefileList,
                                   evalLabels=LABELS):
    assert (len(predfileList) == len(truefileList))
    enc = clr_trans
    cm = np.zeros(shape=[classNum, classNum], dtype=np.uint64)
    print('CSUPPORT:', CSUPPORT)
    if CSUPPORT:
        print('This is a fast C++ way evaluation.')
    else:
        print('This is a slow python way evaluation.')
    for idx in tqdm(range(len(predfileList))):
        predfile = predfileList[idx]
        truefile = truefileList[idx]
        imagePred = np.array(Image.open(predfile))
        imageTrue = np.array(Image.open(truefile))
        if len(imagePred.shape) == 3 and len(imageTrue.shape) == 3:
            imagePred = enc.transform(imagePred, dtype=np.uint8)
            imageTrue = enc.transform(imageTrue, dtype=np.uint8)
        assert len(imagePred.shape) == 2 and len(imageTrue.shape) == 2
        if CSUPPORT:
            cm = addToConfusionMatrix.cEvaluatePair(imagePred, imageTrue, cm,
                                                    evalLabels)
        else:
            #slower python way
            cm = calculateConfusionMatrix(cm, imagePred, imageTrue, evalLabels)
    return cm
コード例 #2
0
def evaluatePair(predictionImg, groundTruthImg, confMatrix, instanceStats,
                 perImageStats, args):

    # Loading all resources for evaluation.

    predictionNp = np.array(predictionImg)
    groundTruthNp = np.array(groundTruthImg)

    imgWidth = np.shape(predictionImg)[0]
    imgHeight = np.shape(predictionImg)[1]
    nbPixels = imgWidth * imgHeight

    # Evaluate images
    if (CSUPPORT):
        # using cython
        confMatrix = addToConfusionMatrix.cEvaluatePair(
            predictionNp, groundTruthNp, confMatrix, args.evalLabels)
    else:
        # the slower python way

        for (groundTruthImgPixel,
             predictionImgPixel) in izip(np.ndarray.flatten(groundTruthImg),
                                         np.ndarray.flatten(predictionImg)):
            if (not groundTruthImgPixel in args.evalLabels):
                printError(
                    "Unknown label with id {:}".format(groundTruthImgPixel))

            confMatrix[groundTruthImgPixel][predictionImgPixel] += 1

    return nbPixels
コード例 #3
0
def evaluatePairPytorch(prediction, groundtruth, confMatrix, perImageStats,
                        args):
    # Loading all resources for evaluation.

    predictionImg = prediction
    predictionNp = np.array(predictionImg)

    groundTruthImg = groundtruth
    groundTruthNp = np.array(groundTruthImg)

    # Check for equal image sizes
    if (predictionImg.size[0] != groundTruthImg.size[0]):
        printError("Image widths are not equal.")
    if (predictionImg.size[1] != groundTruthImg.size[1]):
        printError("Image heights  are not equal.")
    if (len(predictionNp.shape) != 2):
        printError("Predicted image has multiple channels.")

    imgWidth = predictionImg.size[0]
    imgHeight = predictionImg.size[1]
    nbPixels = imgWidth * imgHeight

    # Evaluate images
    if (CSUPPORT):
        # using cython
        confMatrix = addToConfusionMatrix.cEvaluatePair(
            predictionNp, groundTruthNp, confMatrix, args.evalLabels)
    else:
        # the slower python way
        for (groundTruthImgPixel,
             predictionImgPixel) in izip(groundTruthImg.getdata(),
                                         predictionImg.getdata()):
            if (not groundTruthImgPixel in args.evalLabels):
                printError(
                    "Unknown label with id {:}".format(groundTruthImgPixel))

            confMatrix[groundTruthImgPixel][predictionImgPixel] += 1

    if args.evalPixelAccuracy:
        notIgnoredLabels = [
            l for l in args.evalLabels if not id2label[l].ignoreInEval
        ]
        notIgnoredPixels = np.in1d(groundTruthNp,
                                   notIgnoredLabels,
                                   invert=True).reshape(groundTruthNp.shape)
        erroneousPixels = np.logical_and(notIgnoredPixels,
                                         (predictionNp != groundTruthNp))
        perImageStats[predictionImgFileName] = {}
        perImageStats[predictionImgFileName][
            "nbNotIgnoredPixels"] = np.count_nonzero(notIgnoredPixels)
        perImageStats[predictionImgFileName][
            "nbCorrectPixels"] = np.count_nonzero(erroneousPixels)

    return nbPixels
def evaluatePair(predictionImgFileName, groundTruthImgFileName, confMatrix,
                 instanceStats, perImageStats, args):
    # Loading all resources for evaluation.
    try:
        predictionImg = Image.open(open(predictionImgFileName, 'rb'))
        predictionNp = np.array(predictionImg)
    except:
        printError("Unable to load " + predictionImgFileName)
    try:
        groundTruthImg = Image.open(groundTruthImgFileName)
        groundTruthNp = np.array(groundTruthImg)
    except:
        printError("Unable to load " + groundTruthImgFileName)
    # load ground truth instances, if needed
    if args.evalInstLevelScore:
        groundTruthInstanceImgFileName = groundTruthImgFileName.replace(
            "labelIds", "instanceIds")
        try:
            instanceImg = Image.open(groundTruthInstanceImgFileName)
            instanceNp = np.array(instanceImg)
        except:
            printError("Unable to load " + groundTruthInstanceImgFileName)

    #print("width",predictionImgFileName,predictionImg.size[0],groundTruthImg.size[0])
    # Check for equal image sizes
    if (predictionImg.size[0] != groundTruthImg.size[0]):
        printError("Image widths of " + predictionImgFileName + " and " +
                   groundTruthImgFileName + " are not equal.")
    if (predictionImg.size[1] != groundTruthImg.size[1]):
        printError("Image heights of " + predictionImgFileName + " and " +
                   groundTruthImgFileName + " are not equal.")
    if (len(predictionNp.shape) != 2):
        printError("Predicted image has multiple channels.")

    imgWidth = predictionImg.size[0]
    imgHeight = predictionImg.size[1]
    nbPixels = imgWidth * imgHeight

    # Evaluate images
    if (CSUPPORT):
        # using cython
        confMatrix = addToConfusionMatrix.cEvaluatePair(
            predictionNp, groundTruthNp, confMatrix, args.evalLabels)
    else:
        # the slower python way
        for (groundTruthImgPixel,
             predictionImgPixel) in izip(groundTruthImg.getdata(),
                                         predictionImg.getdata()):
            if (not groundTruthImgPixel in args.evalLabels):
                printError(
                    "Unknown label with id {:}".format(groundTruthImgPixel))

            confMatrix[groundTruthImgPixel][predictionImgPixel] += 1

    if args.evalInstLevelScore:
        # Generate category masks
        categoryMasks = {}
        for category in instanceStats["categories"]:
            categoryMasks[category] = np.in1d(
                predictionNp,
                instanceStats["categories"][category]["labelIds"]).reshape(
                    predictionNp.shape)

        instList = np.unique(instanceNp[instanceNp > 1000])
        for instId in instList:
            labelId = int(instId / 1000)
            label = id2label[labelId]
            if label.ignoreInEval:
                continue

            mask = instanceNp == instId
            instSize = np.count_nonzero(mask)

            tp = np.count_nonzero(predictionNp[mask] == labelId)
            fn = instSize - tp

            weight = args.avgClassSize[label.name] / float(instSize)
            tpWeighted = float(tp) * weight
            fnWeighted = float(fn) * weight

            instanceStats["classes"][label.name]["tp"] += tp
            instanceStats["classes"][label.name]["fn"] += fn
            instanceStats["classes"][label.name]["tpWeighted"] += tpWeighted
            instanceStats["classes"][label.name]["fnWeighted"] += fnWeighted

            category = label.category
            if category in instanceStats["categories"]:
                catTp = 0
                catTp = np.count_nonzero(
                    np.logical_and(mask, categoryMasks[category]))
                catFn = instSize - catTp

                catTpWeighted = float(catTp) * weight
                catFnWeighted = float(catFn) * weight

                instanceStats["categories"][category]["tp"] += catTp
                instanceStats["categories"][category]["fn"] += catFn
                instanceStats["categories"][category][
                    "tpWeighted"] += catTpWeighted
                instanceStats["categories"][category][
                    "fnWeighted"] += catFnWeighted

    if args.evalPixelAccuracy:
        notIgnoredLabels = [
            l for l in args.evalLabels if not id2label[l].ignoreInEval
        ]
        notIgnoredPixels = np.in1d(groundTruthNp,
                                   notIgnoredLabels,
                                   invert=True).reshape(groundTruthNp.shape)
        erroneousPixels = np.logical_and(notIgnoredPixels,
                                         (predictionNp != groundTruthNp))
        perImageStats[predictionImgFileName] = {}
        perImageStats[predictionImgFileName][
            "nbNotIgnoredPixels"] = np.count_nonzero(notIgnoredPixels)
        perImageStats[predictionImgFileName][
            "nbCorrectPixels"] = np.count_nonzero(erroneousPixels)

    return nbPixels
コード例 #5
0
def evaluatePair(predictionImgFileName, groundTruthImgFileName, confMatrix, instanceStats, perImageStats, args):
    # Loading all resources for evaluation.
    try:
        predictionImg = Image.open(predictionImgFileName)
        predictionNp  = np.array(predictionImg)
    except:
        printError("Unable to load " + predictionImgFileName)
    try:
        groundTruthImg = Image.open(groundTruthImgFileName)
        groundTruthNp = np.array(groundTruthImg)
    except:
        printError("Unable to load " + groundTruthImgFileName)
    # load ground truth instances, if needed
    if args.evalInstLevelScore:
        groundTruthInstanceImgFileName = groundTruthImgFileName.replace("labelIds","instanceIds")
        try:
            instanceImg = Image.open(groundTruthInstanceImgFileName)
            instanceNp  = np.array(instanceImg)
        except:
            printError("Unable to load " + groundTruthInstanceImgFileName)

    # Check for equal image sizes
    if (predictionImg.size[0] != groundTruthImg.size[0]):
        printError("Image widths of " + predictionImgFileName + " and " + groundTruthImgFileName + " are not equal.")
    if (predictionImg.size[1] != groundTruthImg.size[1]):
        printError("Image heights of " + predictionImgFileName + " and " + groundTruthImgFileName + " are not equal.")
    if ( len(predictionNp.shape) != 2 ):
        printError("Predicted image has multiple channels.")

    imgWidth  = predictionImg.size[0]
    imgHeight = predictionImg.size[1]
    nbPixels  = imgWidth*imgHeight

    # Evaluate images
    if (CSUPPORT):
        # using cython
        confMatrix = addToConfusionMatrix.cEvaluatePair(predictionNp, groundTruthNp, confMatrix, args.evalLabels)
    else:
        # the slower python way
        for (groundTruthImgPixel,predictionImgPixel) in itertools.izip(groundTruthImg.getdata(),predictionImg.getdata()):
            if (not groundTruthImgPixel in args.evalLabels):
                printError("Unknown label with id {:}".format(groundTruthImgPixel))

            confMatrix[groundTruthImgPixel][predictionImgPixel] += 1

    if args.evalInstLevelScore:
        # Generate category masks
        categoryMasks = {}
        for category in instanceStats["categories"]:
            categoryMasks[category] = np.in1d( predictionNp , instanceStats["categories"][category]["labelIds"] ).reshape(predictionNp.shape)

        instList = np.unique(instanceNp[instanceNp > 1000])
        for instId in instList:
            labelId = int(instId/1000)
            label = id2label[ labelId ]
            if label.ignoreInEval:
                continue

            mask = instanceNp==instId
            instSize = np.count_nonzero( mask )

            tp = np.count_nonzero( predictionNp[mask] == labelId )
            fn = instSize - tp

            weight = args.avgClassSize[label.name] / float(instSize)
            tpWeighted = float(tp) * weight
            fnWeighted = float(fn) * weight

            instanceStats["classes"][label.name]["tp"]         += tp
            instanceStats["classes"][label.name]["fn"]         += fn
            instanceStats["classes"][label.name]["tpWeighted"] += tpWeighted
            instanceStats["classes"][label.name]["fnWeighted"] += fnWeighted

            category = label.category
            if category in instanceStats["categories"]:
                catTp = 0
                catTp = np.count_nonzero( np.logical_and( mask , categoryMasks[category] ) )
                catFn = instSize - catTp

                catTpWeighted = float(catTp) * weight
                catFnWeighted = float(catFn) * weight

                instanceStats["categories"][category]["tp"]         += catTp
                instanceStats["categories"][category]["fn"]         += catFn
                instanceStats["categories"][category]["tpWeighted"] += catTpWeighted
                instanceStats["categories"][category]["fnWeighted"] += catFnWeighted

    if args.evalPixelAccuracy:
        notIgnoredLabels = [l for l in args.evalLabels if not id2label[l].ignoreInEval]
        notIgnoredPixels = np.in1d( groundTruthNp , notIgnoredLabels , invert=True ).reshape(groundTruthNp.shape)
        erroneousPixels = np.logical_and( notIgnoredPixels , ( predictionNp != groundTruthNp ) )
        perImageStats[predictionImgFileName] = {}
        perImageStats[predictionImgFileName]["nbNotIgnoredPixels"] = np.count_nonzero(notIgnoredPixels)
        perImageStats[predictionImgFileName]["nbCorrectPixels"]    = np.count_nonzero(erroneousPixels)

    return nbPixels
def evaluatePair(predictionImgFileName, groundTruthImgFileName, confMatrix,
                 instanceStats, perImageStats, args):
    # Loading all resources for evaluation.
    try:
        predictionImg = Image.open(open(predictionImgFileName, 'rb'))
        predictionNp = np.array(predictionImg)
    except:
        printError("Unable to load " + predictionImgFileName)
    try:
        groundTruthImg = Image.open(groundTruthImgFileName)
        groundTruthNp = np.array(groundTruthImg)
    except:
        printError("Unable to load " + groundTruthImgFileName)
    # load ground truth instances, if needed
    if args.evalInstLevelScore:
        groundTruthInstanceImgFileName = groundTruthImgFileName.replace(
            "labelIds", "instanceIds")
        try:
            instanceImg = Image.open(groundTruthInstanceImgFileName)
            instanceNp = np.array(instanceImg)
        except:
            printError("Unable to load " + groundTruthInstanceImgFileName)

    # Check for equal image sizes
    if (predictionImg.size[0] != groundTruthImg.size[0]):
        printError("Image widths of " + predictionImgFileName + " and " +
                   groundTruthImgFileName + " are not equal.")
    if (predictionImg.size[1] != groundTruthImg.size[1]):
        printError("Image heights of " + predictionImgFileName + " and " +
                   groundTruthImgFileName + " are not equal.")
    if (len(predictionNp.shape) != 2):
        printError("Predicted image has multiple channels.")

    imgWidth = predictionImg.size[0]
    imgHeight = predictionImg.size[1]
    nbPixels = imgWidth * imgHeight

    imageSrc = groundTruthImgFileName.replace("_gtFine_labelIds.png",
                                              "_leftImg8bit.png")
    imgSrc = imageSrc.replace("gtFine", "leftImg8bit")

    print(imgSrc)
    srcImage = Image.open(open(imgSrc, 'rb'))

    if (predictionImg.size[0] != groundTruthImg.size[0]
            or predictionImg.size[1] != groundTruthImg.size[1]):
        printError("Src sizes and pred sizes are different")

    size = (groundTruthImg.size[0], groundTruthImg.size[1])
    background = name2label['unlabeled'].color
    newLabelImg = Image.new("RGBA", size, background)

    # Evaluate images
    if (CSUPPORT):
        # using cython
        confMatrix = addToConfusionMatrix.cEvaluatePair(
            predictionNp, groundTruthNp, confMatrix, args.evalLabels)
    else:
        # the slower python way
        # a drawer to draw into the image
        #drawer = ImageDraw.Draw(newLabelImg)
        #drawer.point()

        for (groundTruthImgPixel,
             predictionImgPixel) in izip(groundTruthImg.getdata(),
                                         predictionImg.getdata()):
            if (not groundTruthImgPixel in args.evalLabels):
                printError(
                    "Unknown label with id {:}".format(groundTruthImgPixel))
            confMatrix[groundTruthImgPixel][predictionImgPixel] += 1

    drawer = ImageDraw.Draw(newLabelImg)
    groundTruthPixels = groundTruthImg.load()
    predictionImgPixels = predictionImg.load()
    srcImagePixels = srcImage.load()

    for i in range(groundTruthImg.size[0]):
        for j in range(groundTruthImg.size[1]):
            if (groundTruthPixels[i, j] != predictionImgPixels[i, j]):
                groundTruthPixelij = groundTruthPixels[i, j]
                #print(groundTruthPixelij)
                labelId = groundTruthPixelij
                if (groundTruthPixelij > 1000):
                    labelId = int(groundTruthPixelij / 1000)
                #print(labelId)
                label = id2label[labelId]
                #print(label.color)
                drawer.point([i, j], label.color)
            else:
                rgbaVal = srcImagePixels[i, j] + (170, )
                drawer.point([i, j], rgbaVal)

    dst = predictionImgFileName.replace("_labelIds.png", "_diffLabels.png")
    print(dst)
    newLabelImg.save(dst)
    print("image save done", "", "", "")

    if args.evalInstLevelScore:
        # Generate category masks
        categoryMasks = {}
        for category in instanceStats["categories"]:
            categoryMasks[category] = np.in1d(
                predictionNp,
                instanceStats["categories"][category]["labelIds"]).reshape(
                    predictionNp.shape)

        instList = np.unique(instanceNp[instanceNp > 1000])
        for instId in instList:
            labelId = int(instId / 1000)
            label = id2label[labelId]
            if label.ignoreInEval:
                continue

            mask = instanceNp == instId
            instSize = np.count_nonzero(mask)

            tp = np.count_nonzero(predictionNp[mask] == labelId)
            fn = instSize - tp

            weight = args.avgClassSize[label.name] / float(instSize)
            tpWeighted = float(tp) * weight
            fnWeighted = float(fn) * weight

            instanceStats["classes"][label.name]["tp"] += tp
            instanceStats["classes"][label.name]["fn"] += fn
            instanceStats["classes"][label.name]["tpWeighted"] += tpWeighted
            instanceStats["classes"][label.name]["fnWeighted"] += fnWeighted

            category = label.category
            if category in instanceStats["categories"]:
                catTp = 0
                catTp = np.count_nonzero(
                    np.logical_and(mask, categoryMasks[category]))
                catFn = instSize - catTp

                catTpWeighted = float(catTp) * weight
                catFnWeighted = float(catFn) * weight

                instanceStats["categories"][category]["tp"] += catTp
                instanceStats["categories"][category]["fn"] += catFn
                instanceStats["categories"][category][
                    "tpWeighted"] += catTpWeighted
                instanceStats["categories"][category][
                    "fnWeighted"] += catFnWeighted

    if args.evalPixelAccuracy:
        notIgnoredLabels = [
            l for l in args.evalLabels if not id2label[l].ignoreInEval
        ]
        notIgnoredPixels = np.in1d(groundTruthNp,
                                   notIgnoredLabels,
                                   invert=True).reshape(groundTruthNp.shape)
        erroneousPixels = np.logical_and(notIgnoredPixels,
                                         (predictionNp != groundTruthNp))
        perImageStats[predictionImgFileName] = {}
        perImageStats[predictionImgFileName][
            "nbNotIgnoredPixels"] = np.count_nonzero(notIgnoredPixels)
        perImageStats[predictionImgFileName][
            "nbCorrectPixels"] = np.count_nonzero(erroneousPixels)

    return nbPixels
コード例 #7
0
def evaluatePair_image(predictionNp, groundTruthNp, confMatrix, instanceStats,
                       perImageStats, i_im, args):
    # Loading all resources for evaluation.
    predictionNp = np.asarray(predictionNp).astype('uint8')
    groundTruthNp = np.asarray(groundTruthNp).astype('uint8')

    predictionImg = Image.fromarray(predictionNp)

    predictionNp = np.array(predictionImg)

    groundTruthImg = Image.fromarray(groundTruthNp)
    groundTruthNp = np.array(groundTruthImg)

    imgWidth = predictionImg.size[0]
    imgHeight = predictionImg.size[1]
    nbPixels = imgWidth * imgHeight

    # Evaluate images
    if (CSUPPORT):
        # using cython
        confMatrix = addToConfusionMatrix.cEvaluatePair(
            predictionNp, groundTruthNp, confMatrix, args.evalLabels)
    else:
        # the slower python way
        for (groundTruthImgPixel,
             predictionImgPixel) in izip(groundTruthImg.getdata(),
                                         predictionImg.getdata()):
            if (not groundTruthImgPixel in args.evalLabels):
                printError(
                    "Unknown label with id {:}".format(groundTruthImgPixel))

            confMatrix[groundTruthImgPixel][predictionImgPixel] += 1

    if args.evalInstLevelScore:
        # Generate category masks
        categoryMasks = {}
        for category in instanceStats["categories"]:
            categoryMasks[category] = np.in1d(
                predictionNp,
                instanceStats["categories"][category]["labelIds"]).reshape(
                    predictionNp.shape)

        instList = np.unique(instanceNp[instanceNp > 1000])
        for instId in instList:
            labelId = int(instId / 1000)
            label = id2label[labelId]
            if label.ignoreInEval:
                continue

            mask = instanceNp == instId
            instSize = np.count_nonzero(mask)

            tp = np.count_nonzero(predictionNp[mask] == labelId)
            fn = instSize - tp

            weight = args.avgClassSize[label.name] / float(instSize)
            tpWeighted = float(tp) * weight
            fnWeighted = float(fn) * weight

            instanceStats["classes"][label.name]["tp"] += tp
            instanceStats["classes"][label.name]["fn"] += fn
            instanceStats["classes"][label.name]["tpWeighted"] += tpWeighted
            instanceStats["classes"][label.name]["fnWeighted"] += fnWeighted

            category = label.category
            if category in instanceStats["categories"]:
                catTp = 0
                catTp = np.count_nonzero(
                    np.logical_and(mask, categoryMasks[category]))
                catFn = instSize - catTp

                catTpWeighted = float(catTp) * weight
                catFnWeighted = float(catFn) * weight

                instanceStats["categories"][category]["tp"] += catTp
                instanceStats["categories"][category]["fn"] += catFn
                instanceStats["categories"][category][
                    "tpWeighted"] += catTpWeighted
                instanceStats["categories"][category][
                    "fnWeighted"] += catFnWeighted

    if args.evalPixelAccuracy:
        notIgnoredLabels = [
            l for l in args.evalLabels if not id2label[l].ignoreInEval
        ]
        notIgnoredPixels = np.in1d(groundTruthNp,
                                   notIgnoredLabels,
                                   invert=True).reshape(groundTruthNp.shape)
        erroneousPixels = np.logical_and(notIgnoredPixels,
                                         (predictionNp != groundTruthNp))
        perImageStats[i_im] = {}
        perImageStats[i_im]["nbNotIgnoredPixels"] = np.count_nonzero(
            notIgnoredPixels)
        perImageStats[i_im]["nbCorrectPixels"] = np.count_nonzero(
            erroneousPixels)

    return nbPixels