def main(): global args global pargs # Parse optional arguments pargs = parser.parse_args() # Parameters that should be modified by user args.groundTruthSearch = os.path.join(pargs.minicity, 'gtFine', 'val', '*.png') predictionImgList = [] groundTruthImgList = [] # use the ground truth search string specified above groundTruthImgList = glob.glob(args.groundTruthSearch) if not groundTruthImgList: printError( 'Cannot find any ground truth images to use for evaluation. Searched for: {}' .format(args.groundTruthSearch)) # get the corresponding prediction for each ground truth imag for gt in groundTruthImgList: predictionImgList.append(getPrediction(args, gt)) # evaluate evaluateImgLists(predictionImgList, groundTruthImgList, args) return
def main(): # Where to look for Cityscapes if 'CITYSCAPES_DATASET' in os.environ: cityscapesPath = os.environ['CITYSCAPES_DATASET'] else: cityscapesPath = os.path.join( os.path.dirname(os.path.realpath(__file__)), '..', '..') # how to search for all ground truth searchFine = os.path.join( '/media/baidu/DataRepo/IMAGE_SCENE_SEGMENTATION/CITYSPACES/gt_annotation', "gtFine", "*", "*", "*_gt*_polygons.json") searchCoarse = os.path.join(cityscapesPath, "gtCoarse", "*", "*", "*_gt*_polygons.json") # search files filesFine = glob.glob(searchFine) filesFine.sort() filesCoarse = glob.glob(searchCoarse) filesCoarse.sort() # concatenate fine and coarse files = filesFine + filesCoarse # files = filesFine # use this line if fine is enough for now. # quit if we did not find anything if not files: printError("Did not find any files. Please consult the README.") # a bit verbose print("Processing {} annotation files".format(len(files))) # iterate through files progress = 0 print("Progress: {:>3} %".format(progress * 100 / len(files)), end=' ') for f in files: # create the output filename dst = f.replace("_polygons.json", "_labelTrainIds.png") # do the conversion try: json2labelImg(f, dst, "trainIds") except: print("Failed to convert: {}".format(f)) raise # status progress += 1 print("\rProgress: {:>3} %".format(progress * 100 / len(files)), end=' ') sys.stdout.flush()
def getPrediction(args, groundTruthFile): # determine the prediction path, if the method is first called if not pargs.results: rootPath = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'results') if not os.path.isdir(rootPath): printError('Could not find a result root folder.') pargs.results = rootPath # walk the prediction path, if not happened yet if not args.predictionWalk: walk = [] for root, dirnames, filenames in os.walk(pargs.results): walk.append((root, filenames)) args.predictionWalk = walk csFile = getCsFileInfo(groundTruthFile) filePattern = '{}_{}_{}*.png'.format(csFile.city, csFile.sequenceNb, csFile.frameNb) predictionFile = None for root, filenames in args.predictionWalk: for filename in fnmatch.filter(filenames, filePattern): if not predictionFile: predictionFile = os.path.join(root, filename) else: printError( 'Found multiple predictions for ground truth {}'.format( groundTruthFile)) if not predictionFile: printError( 'Found no prediction for ground truth {}'.format(groundTruthFile)) return predictionFile
def evaluatePair(predictionImgFileName, groundTruthImgFileName, confMatrix, instanceStats, perImageStats, args): # Loading all resources for evaluation. try: predictionImg = Image.open(predictionImgFileName) predictionNp = np.array(predictionImg) except: printError('Unable to load ' + predictionImgFileName) try: groundTruthImg = Image.open(groundTruthImgFileName) groundTruthNp = np.array(groundTruthImg) except: printError('Unable to load ' + groundTruthImgFileName) # load ground truth instances, if needed if args.evalInstLevelScore: groundTruthInstanceImgFileName = groundTruthImgFileName.replace( 'labelIds', 'instanceIds') try: instanceImg = Image.open(groundTruthInstanceImgFileName) instanceNp = np.array(instanceImg) except: printError('Unable to load ' + groundTruthInstanceImgFileName) # Check for equal image sizes if (predictionImg.size[0] != groundTruthImg.size[0]): printError('Image widths of ' + predictionImgFileName + ' and ' + groundTruthImgFileName + ' are not equal.') if (predictionImg.size[1] != groundTruthImg.size[1]): printError('Image heights of ' + predictionImgFileName + ' and ' + groundTruthImgFileName + ' are not equal.') if (len(predictionNp.shape) != 2): printError('Predicted image has multiple channels.') imgWidth = predictionImg.size[0] imgHeight = predictionImg.size[1] nbPixels = imgWidth * imgHeight # Evaluate images encoding_value = max(groundTruthNp.max(), predictionNp.max()).astype( np.int32) + 1 encoded = (groundTruthNp.astype(np.int32) * encoding_value) + predictionNp values, cnt = np.unique(encoded, return_counts=True) for value, c in zip(values, cnt): pred_id = value % encoding_value gt_id = int((value - pred_id) / encoding_value) if not gt_id in args.evalLabels: printError('Unknown label with id {:}'.format(gt_id)) confMatrix[gt_id][pred_id] += c if args.evalInstLevelScore: # Generate category masks categoryMasks = {} for category in instanceStats['categories']: categoryMasks[category] = np.in1d( predictionNp, instanceStats['categories'][category]['labelIds']).reshape( predictionNp.shape) instList = np.unique(instanceNp[instanceNp > 1000]) for instId in instList: labelId = int(instId / 1000) label = id2label[labelId] if label.ignoreInEval: continue mask = instanceNp == instId instSize = np.count_nonzero(mask) tp = np.count_nonzero(predictionNp[mask] == labelId) fn = instSize - tp weight = args.avgClassSize[label.name] / float(instSize) tpWeighted = float(tp) * weight fnWeighted = float(fn) * weight instanceStats['classes'][label.name]['tp'] += tp instanceStats['classes'][label.name]['fn'] += fn instanceStats['classes'][label.name]['tpWeighted'] += tpWeighted instanceStats['classes'][label.name]['fnWeighted'] += fnWeighted category = label.category if category in instanceStats['categories']: catTp = 0 catTp = np.count_nonzero( np.logical_and(mask, categoryMasks[category])) catFn = instSize - catTp catTpWeighted = float(catTp) * weight catFnWeighted = float(catFn) * weight instanceStats['categories'][category]['tp'] += catTp instanceStats['categories'][category]['fn'] += catFn instanceStats['categories'][category][ 'tpWeighted'] += catTpWeighted instanceStats['categories'][category][ 'fnWeighted'] += catFnWeighted if args.evalPixelAccuracy: notIgnoredLabels = [ l for l in args.evalLabels if not id2label[l].ignoreInEval ] notIgnoredPixels = np.in1d(groundTruthNp, notIgnoredLabels, invert=True).reshape(groundTruthNp.shape) erroneousPixels = np.logical_and(notIgnoredPixels, (predictionNp != groundTruthNp)) perImageStats[predictionImgFileName] = {} perImageStats[predictionImgFileName][ 'nbNotIgnoredPixels'] = np.count_nonzero(notIgnoredPixels) perImageStats[predictionImgFileName][ 'nbCorrectPixels'] = np.count_nonzero(erroneousPixels) return nbPixels
def evaluateImgLists(predictionImgList, groundTruthImgList, args): if len(predictionImgList) != len(groundTruthImgList): printError( 'List of images for prediction and groundtruth are not of equal size.' ) confMatrix = generateMatrix(args) instStats = generateInstanceStats(args) perImageStats = {} nbPixels = 0 if not args.quiet: print('Evaluating {} pairs of images...'.format( len(predictionImgList))) # Evaluate all pairs of images and save them into a matrix for i in range(len(predictionImgList)): predictionImgFileName = predictionImgList[i] groundTruthImgFileName = groundTruthImgList[i] #print 'Evaluate ', predictionImgFileName, '<>', groundTruthImgFileName nbPixels += evaluatePair(predictionImgFileName, groundTruthImgFileName, confMatrix, instStats, perImageStats, args) # sanity check if confMatrix.sum() != nbPixels: printError( 'Number of analyzed pixels and entries in confusion matrix disagree: contMatrix {}, pixels {}' .format(confMatrix.sum(), nbPixels)) if not args.quiet: print('\rImages Processed: {}'.format(i + 1), end=' ') sys.stdout.flush() if not args.quiet: print('\n') # sanity check if confMatrix.sum() != nbPixels: printError( 'Number of analyzed pixels and entries in confusion matrix disagree: contMatrix {}, pixels {}' .format(confMatrix.sum(), nbPixels)) # print confusion matrix if (not args.quiet): printConfMatrix(confMatrix, args) # Calculate IOU scores on class level from matrix classScoreList = {} for label in args.evalLabels: labelName = id2label[label].name classScoreList[labelName] = getIouScoreForLabel( label, confMatrix, args) # Calculate instance IOU scores on class level from matrix classInstScoreList = {} for label in args.evalLabels: labelName = id2label[label].name classInstScoreList[labelName] = getInstanceIouScoreForLabel( label, confMatrix, instStats, args) # Print IOU scores if (not args.quiet): print('') print('') printClassScores(classScoreList, classInstScoreList, args) iouAvgStr = getColorEntry(getScoreAverage( classScoreList, args), args) + '{avg:5.3f}'.format( avg=getScoreAverage(classScoreList, args)) + args.nocol niouAvgStr = getColorEntry(getScoreAverage( classInstScoreList, args), args) + '{avg:5.3f}'.format( avg=getScoreAverage(classInstScoreList, args)) + args.nocol print('--------------------------------') print('Score Average : ' + iouAvgStr + ' ' + niouAvgStr) print('--------------------------------') print('') # Calculate IOU scores on category level from matrix categoryScoreList = {} for category in category2labels.keys(): categoryScoreList[category] = getIouScoreForCategory( category, confMatrix, args) # Calculate instance IOU scores on category level from matrix categoryInstScoreList = {} for category in category2labels.keys(): categoryInstScoreList[category] = getInstanceIouScoreForCategory( category, confMatrix, instStats, args) # Print IOU scores if (not args.quiet): print('') printCategoryScores(categoryScoreList, categoryInstScoreList, args) iouAvgStr = getColorEntry(getScoreAverage( categoryScoreList, args), args) + '{avg:5.3f}'.format( avg=getScoreAverage(categoryScoreList, args)) + args.nocol niouAvgStr = getColorEntry( getScoreAverage(categoryInstScoreList, args), args) + '{avg:5.3f}'.format( avg=getScoreAverage(categoryInstScoreList, args)) + args.nocol print('--------------------------------') print('Score Average : ' + iouAvgStr + ' ' + niouAvgStr) print('--------------------------------') print('') allResultsDict = createResultDict(confMatrix, classScoreList, classInstScoreList, categoryScoreList, categoryInstScoreList, perImageStats, args) # write result file if args.JSONOutput: writeJSONFile(allResultsDict, args) writeDict2Txt(allResultsDict, 'results.txt') # return confusion matrix return allResultsDict