def main(): files = polygonsjson_path_list # quit if we did not find anything if not files: printError("j) Did not find any files~!~!~!") # a bit verbose print("Processing {} annotation files".format(len(files))) # iterate through files progress = 0 print("Progress: {:>3} %".format(progress * 100 / len(files)), end=' ') for f in files: # create the output filename dst = f.replace("_polygons.json", "_color.png") # do the conversion try: json2labelImg(f, dst, "color") except: print("Failed to convert: {}".format(f)) raise # status progress += 1 print("\rProgress: {:>3} %".format(progress * 100 / len(files)), end=' ') sys.stdout.flush()
def evaluatePanoptic(gt_json_file, gt_folder, pred_json_file, pred_folder, resultsFile, iou_type="segm", dilation_ratio=0.005): start_time = time.time() with open(gt_json_file, 'r') as f: gt_json = json.load(f) with open(pred_json_file, 'r') as f: pred_json = json.load(f) categories = {el['id']: el for el in gt_json['categories']} print("Evaluation panoptic segmentation metrics:") print("Ground truth:") print("\tSegmentation folder: {}".format(gt_folder)) print("\tJSON file: {}".format(gt_json_file)) print("Prediction:") print("\tSegmentation folder: {}".format(pred_folder)) print("\tJSON file: {}".format(pred_json_file)) if not os.path.isdir(gt_folder): printError( "Folder {} with ground truth segmentations doesn't exist".format( gt_folder)) if not os.path.isdir(pred_folder): printError( "Folder {} with predicted segmentations doesn't exist".format( pred_folder)) pred_annotations = {el['image_id']: el for el in pred_json['annotations']} matched_annotations_list = [] for gt_ann in gt_json['annotations']: image_id = gt_ann['image_id'] if image_id not in pred_annotations: raise Exception( 'no prediction for the image with id: {}'.format(image_id)) matched_annotations_list.append((gt_ann, pred_annotations[image_id])) pq_stat = pq_compute_multi_core(matched_annotations_list, gt_folder, pred_folder, categories, iou_type, dilation_ratio) results = average_pq(pq_stat, categories) if resultsFile is not None: with open(resultsFile, 'w') as f: print("Saving computed results in {}".format(resultsFile)) json.dump(results, f, sort_keys=True, indent=4) print_results(results, categories) t_delta = time.time() - start_time print("Time elapsed: {:0.2f} seconds".format(t_delta)) return results
def main(): # Where to look for Cityscapes if 'CITYSCAPES_DATASET' in os.environ: cityscapesPath = os.environ['CITYSCAPES_DATASET'] else: cityscapesPath = os.path.join( os.path.dirname(os.path.realpath(__file__)), '..', '..') # MODIFIED: customized data path cityscapesPath = os.path.join( '/home/kywongaz/All_Models/pytorch-deeplab-xception/data/cityscapes/gtFine_trainvaltest' ) # how to search for all ground truth searchFine = os.path.join(cityscapesPath, "gtFine", "*", "*", "*_gt*_polygons.json") searchCoarse = os.path.join(cityscapesPath, "gtCoarse", "*", "*", "*_gt*_polygons.json") # search files filesFine = glob.glob(searchFine) filesFine.sort() filesCoarse = glob.glob(searchCoarse) filesCoarse.sort() # concatenate fine and coarse files = filesFine + filesCoarse # files = filesFine # use this line if fine is enough for now. # quit if we did not find anything if not files: printError("Did not find any files. Please consult the README.") # a bit verbose print("Processing {} annotation files".format(len(files))) # iterate through files progress = 0 print("Progress: {:>3} %".format(progress * 100 / len(files)), end=' ') for f in files: # create the output filename dst = f.replace("_polygons.json", "_labelTrainIds.png") # do the conversion try: json2labelImg(f, dst, "trainIds") except: print("Failed to convert: {}".format(f)) raise # status progress += 1 print("\rProgress: {:>3} %".format(progress * 100 / len(files)), end=' ') sys.stdout.flush()
def main(): # Where to look for Cityscapes if 'CITYSCAPES_DATASET' in os.environ: cityscapesPath = os.environ['CITYSCAPES_DATASET'] else: cityscapesPath = os.path.join( os.path.dirname(os.path.realpath(__file__)), '..', '..') # how to search for all ground truth cityscapesPath = '/home/pengsida/Datasets/cityscape' searchFine = os.path.join(cityscapesPath, "gtFine", "*", "*", "*_gt*_polygons.json") searchCoarse = os.path.join(cityscapesPath, "gtCoarse", "*", "*", "*_gt*_polygons.json") # search files filesFine = glob.glob(searchFine) filesFine.sort() filesCoarse = glob.glob(searchCoarse) filesCoarse.sort() # concatenate fine and coarse files = filesFine + filesCoarse # files = filesFine # use this line if fine is enough for now. # quit if we did not find anything if not files: printError("Did not find any files. Please consult the README.") # a bit verbose print("Processing {} annotation files".format(len(files))) # iterate through files progress = 0 print("Progress: {:>3} %".format(progress * 100 / len(files)), end=' ') for f in files: f = '/home/pengsida/Datasets/cityscape/gtFine/val/frankfurt/frankfurt_000000_000294_gtFine_polygons.json' # create the output filename dst = f.replace("_polygons.json", "_instanceTrainIds.png") # do the conversion try: json2instanceImg(f, dst, "trainIds") except: print("Failed to convert: {}".format(f)) raise # status progress += 1 print("\rProgress: {:>3} %".format(progress * 100 / len(files)), end=' ') sys.stdout.flush()
def main(): # Where to look for Cityscapes if 'DATASET_PATH' in os.environ: datasetPath = os.environ['DATASET_PATH'] else: datasetPath = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..') # how to search for all ground truth searchFine = os.path.join(datasetPath, "annotations", "*", "*_polygons.json") searchCoarse = os.path.join(datasetPath, "original_images", "*", "*_polygons.json") # search files filesFine = glob.glob(searchFine) filesFine.sort() filesCoarse = glob.glob(searchCoarse) filesCoarse.sort() # concatenate fine and coarse files = filesFine + filesCoarse # files = filesFine # use this line if fine is enough for now. # quit if we did not find anything if not files: printError("Did not find any files. Please consult the README.") # a bit verbose print("Processing {} annotation files".format(len(files))) # iterate through files progress = 0 print("Progress: {:>3} %".format(progress * 100 / len(files)), end=' ') for f in files: # create the output filename dst = f.replace("_polygons.json", "_labelTrainIds.png") # do the conversion try: json2labelImg(f, dst, "trainIds") except: print("Failed to convert: {}".format(f)) raise # status progress += 1 print("\rProgress: {:>3} %".format(progress * 100 / len(files)), end=' ') sys.stdout.flush()
def main(): # Where to look for Cityscapes if platform.system() == 'Windows': cityscapesPath = 'D:/data/cityscapes' else: cityscapesPath = '/home/clova/data/cityscapes' # how to search for all ground truth searchFine = os.path.join(cityscapesPath, "gtFine", "*", "*", "*_gt*_polygons.json") searchCoarse = os.path.join(cityscapesPath, "gtCoarse", "*", "*", "*_gt*_polygons.json") # search files filesFine = glob.glob(searchFine) filesFine.sort() filesCoarse = glob.glob(searchCoarse) filesCoarse.sort() # concatenate fine and coarse files = filesFine + filesCoarse # files = filesFine # use this line if fine is enough for now. # quit if we did not find anything if not files: printError("Did not find any files. Please consult the README.") # a bit verbose print("Processing {} annotation files".format(len(files))) # iterate through files progress = 0 print("Progress: {:>3} %".format(progress * 100 / len(files)), end=' ') for f in files: # create the output filename dst = f.replace("_polygons.json", "_labelIds.png") # do the conversion try: json2labelImg(f, dst, "trainIds") except: print("Failed to convert: {}".format(f)) raise # status progress += 1 print("\rProgress: {:>3} %".format(progress * 100 / len(files)), end=' ') sys.stdout.flush()
def main(): if 'CITYSCAPES_DATASET' in os.environ: cityscapesPath = os.environ['CITYSCAPES_DATASET'] else: cityscapesPath = os.path.join( os.path.dirname(os.path.realpath(__file__)), '..', '..') cityscapesPath = "/home/SENSETIME/parkchanho/Desktop/etc-repo" # how to search for all ground truth searchFine = os.path.join(cityscapesPath, "gtFine", "*", "*", "*_gt*_polygons.json") #searchCoarse = os.path.join( cityscapesPath , "gtCoarse" , "*" , "*" , "*_gt*_polygons.json" ) # search files filesFine = glob.glob(searchFine) filesFine.sort() #filesCoarse = glob.glob( searchCoarse ) #filesCoarse.sort() # concatenate fine and coarse files = filesFine # quit if we did not find anything if not files: printError("Did not find any files. Please consult the README.") # a bit verbose print("Processing {} annotation files".format(len(files))) # iterate through files progress = 0 print("Progress: {:>3} %".format(progress * 100 / len(files)), end=' ') for f in files: # create the output filename dst = f.replace("_polygons.json", "_labelTrainIds.png") # do the conversion try: json2labelImg(f, dst, "trainIds") except: print("Failed to convert: {}".format(f)) raise # status progress += 1 print("\rProgress: {:>3} %".format(progress * 100 / len(files)), end=' ') sys.stdout.flush()
def main(): # Where to look for Cityscapes if 'CITYSCAPES_DATASET' in os.environ: cityscapesPath = os.environ['CITYSCAPES_DATASET'] else: cityscapesPath = os.path.join(os.path.dirname(os.path.realpath(__file__)),'..','..') # how to search for all ground truth searchFine = os.path.join( cityscapesPath , "gtFine" , "*" , "*" , "*_gt*_polygons.json" ) searchCoarse = os.path.join( cityscapesPath , "gtCoarse" , "*" , "*" , "*_gt*_polygons.json" ) # search files filesFine = glob.glob( searchFine ) filesFine.sort() filesCoarse = glob.glob( searchCoarse ) filesCoarse.sort() # concatenate fine and coarse files = filesFine + filesCoarse # files = filesFine # use this line if fine is enough for now. # quit if we did not find anything if not files: printError( "Did not find any files. Please consult the README." ) # a bit verbose print("Processing {} annotation files".format(len(files))) # iterate through files progress = 0 print("Progress: {:>3} %".format( progress * 100 / len(files) ), end=' ') for f in files: # create the output filename dst = f.replace( "_polygons.json" , "_labelTrainIds.png" ) # do the conversion try: json2labelImg( f , dst , "trainIds" ) except: print("Failed to convert: {}".format(f)) raise # status progress += 1 print("\rProgress: {:>3} %".format( progress * 100 / len(files) ), end=' ') sys.stdout.flush()
def createTrainIdInstanceImgs(cityscapesPath, fine_fold_name, coarse_fold_name): # how to search for all ground truth searchFine = os.path.join(cityscapesPath, fine_fold_name, "*", "*", "*_gt*_polygons.json") searchCoarse = os.path.join(cityscapesPath, coarse_fold_name, "*", "*", "*_gt*_polygons.json") # search files filesFine = glob.glob(searchFine) filesFine.sort() filesCoarse = glob.glob(searchCoarse) filesCoarse.sort() # concatenate fine and coarse files = filesFine + filesCoarse # files = filesFine # use this line if fine is enough for now. # quit if we did not find anything if not files: printError("Did not find any files. Please consult the README.") # a bit verbose print("Processing {} annotation files".format(len(files))) # iterate through files progress = 0 print("Progress: {:>3} %".format(progress * 100 / len(files)), end=' ') for f in files: # create the output filename dst = f.replace("_polygons.json", "_instanceTrainIds.png") # do the conversion try: json2instanceImg(f, dst, "trainIds") except: print("Failed to convert: {}".format(f)) raise # status progress += 1 print("\rProgress: {:>3} %".format(progress * 100 / len(files)), end=' ') sys.stdout.flush()
def evaluatePanoptic(gt_json_file, gt_folder, pred_json_file, pred_folder, resultsFile): start_time = time.time() with open(gt_json_file, 'r') as f: gt_json = json.load(f) with open(pred_json_file, 'r') as f: pred_json = json.load(f) categories = {el['id']: el for el in gt_json['categories']} print("Evaluation panoptic segmentation metrics:") print("Ground truth:") print("\tSegmentation folder: {}".format(gt_folder)) print("\tJSON file: {}".format(gt_json_file)) print("Prediction:") print("\tSegmentation folder: {}".format(pred_folder)) print("\tJSON file: {}".format(pred_json_file)) if not os.path.isdir(gt_folder): printError("Folder {} with ground truth segmentations doesn't exist".format(gt_folder)) if not os.path.isdir(pred_folder): printError("Folder {} with predicted segmentations doesn't exist".format(pred_folder)) pred_annotations = {el['image_id']: el for el in pred_json['annotations']} matched_annotations_list = [] for gt_ann in gt_json['annotations']: image_id = gt_ann['image_id'] if image_id not in pred_annotations: raise Exception('no prediction for the image with id: {}'.format(image_id)) matched_annotations_list.append((gt_ann, pred_annotations[image_id])) pq_stat = pq_compute_multi_core(matched_annotations_list, gt_folder, pred_folder, categories) results = average_pq(pq_stat, categories) with open(resultsFile, 'w') as f: print("Saving computed results in {}".format(resultsFile)) json.dump(results, f, sort_keys=True, indent=4) print_results(results, categories) t_delta = time.time() - start_time print("Time elapsed: {:0.2f} seconds".format(t_delta)) return results
def convert2panoptic(cityscapesPath=None, outputFolder=None, useTrainId=False, setNames=["val", "train", "test"]): # i.21.3.18.1:35) Det2 에서는 지금 이 함수에서 useTrainId 값을 False 로 해준걸로 가정하고 처리해줌!! # - Det2의 cityscapes_panoptic.py(cityscapes panoptic 데이터셋 레지스터해주는파일)의 _convert_category_id 함수는 # cityscapesscripts 의 createPanopticImgs.py (바로 지금 이 파일) 의 convert2panoptic 함수에서 # useTrainId=False 로 적용했을때(segment_info 의 "category_id" 가 카테고리의 trainId 가 아닌 그냥id로 셋팅됨)를 가정하고 작동하는거네. # 그래서결국, segment_info["category_id"] 를 카테고리의 그냥id에서 trainId 로 바꿔주는거임. # Where to look for Cityscapes if cityscapesPath is None: if 'CITYSCAPES_DATASET' in os.environ: cityscapesPath = os.environ['CITYSCAPES_DATASET'] else: cityscapesPath = os.path.join( os.path.dirname(os.path.realpath(__file__)), '..', '..') cityscapesPath = os.path.join(cityscapesPath, "gtFine") if outputFolder is None: outputFolder = cityscapesPath categories = [] for label in labels: if label.ignoreInEval: continue categories.append({ 'id': int(label.trainId) if useTrainId else int(label.id), 'name': label.name, 'color': label.color, 'supercategory': label.category, 'isthing': 1 if label.hasInstances else 0 }) for setName in setNames: # how to search for all ground truth searchFine = os.path.join(cityscapesPath, setName, "*", "*_instanceIds.png") # search files filesFine = glob.glob(searchFine) filesFine.sort() files = filesFine # quit if we did not find anything if not files: printError( "Did not find any files for {} set using matching pattern {}. Please consult the README." .format(setName, searchFine)) # a bit verbose print("Converting {} annotation files for {} set.".format( len(files), setName)) trainIfSuffix = "_trainId" if useTrainId else "" outputBaseFile = "cityscapes_panoptic_{}{}".format( setName, trainIfSuffix) outFile = os.path.join(outputFolder, "{}.json".format(outputBaseFile)) print( "Json file with the annotations in panoptic format will be saved in {}" .format(outFile)) panopticFolder = os.path.join(outputFolder, outputBaseFile) if not os.path.isdir(panopticFolder): print("Creating folder {} for panoptic segmentation PNGs".format( panopticFolder)) os.mkdir(panopticFolder) print("Corresponding segmentations in .png format will be saved in {}". format(panopticFolder)) images = [] annotations = [] for progress, f in enumerate(files): originalFormat = np.array(Image.open(f)) fileName = os.path.basename(f) imageId = fileName.replace("_gtFine_instanceIds.png", "") inputFileName = fileName.replace("_instanceIds.png", "_leftImg8bit.png") outputFileName = fileName.replace("_instanceIds.png", "_panoptic.png") # image entry, id for image is its filename without extension images.append({ "id": imageId, "width": int(originalFormat.shape[1]), "height": int(originalFormat.shape[0]), "file_name": inputFileName }) pan_format = np.zeros( (originalFormat.shape[0], originalFormat.shape[1], 3), dtype=np.uint8) segmentIds = np.unique(originalFormat) segmInfo = [] for segmentId in segmentIds: if segmentId < 1000: semanticId = segmentId isCrowd = 1 else: semanticId = segmentId // 1000 isCrowd = 0 labelInfo = id2label[semanticId] categoryId = labelInfo.trainId if useTrainId else labelInfo.id if labelInfo.ignoreInEval: continue if not labelInfo.hasInstances: isCrowd = 0 mask = originalFormat == segmentId color = [ segmentId % 256, segmentId // 256, segmentId // 256 // 256 ] pan_format[mask] = color area = np.sum(mask) # segment area computation # bbox computation for a segment hor = np.sum(mask, axis=0) hor_idx = np.nonzero(hor)[0] x = hor_idx[0] width = hor_idx[-1] - x + 1 vert = np.sum(mask, axis=1) vert_idx = np.nonzero(vert)[0] y = vert_idx[0] height = vert_idx[-1] - y + 1 bbox = [int(x), int(y), int(width), int(height)] segmInfo.append({ "id": int(segmentId), "category_id": int(categoryId), "area": int(area), "bbox": bbox, "iscrowd": isCrowd }) annotations.append({ 'image_id': imageId, 'file_name': outputFileName, "segments_info": segmInfo }) Image.fromarray(pan_format).save( os.path.join(panopticFolder, outputFileName)) print("\rProgress: {:>3.2f} %".format( (progress + 1) * 100 / len(files)), end=' ') sys.stdout.flush() print("\nSaving the json file {}".format(outFile)) d = { 'images': images, 'annotations': annotations, 'categories': categories } with open(outFile, 'w') as f: json.dump(d, f, sort_keys=True, indent=4)
def convert2panoptic(cityscapesPath=None, outputFolder=None, useTrainId=False, setNames=["val", "train", "test"]): # i.21.3.18.1:35) Det2 에서는 지금 이 함수에서 useTrainId 값을 False 로 해준걸로 가정하고 처리해줌!! # - Det2의 cityscapes_panoptic.py(cityscapes panoptic 데이터셋 레지스터해주는파일)의 _convert_category_id 함수는 # cityscapesscripts 의 createPanopticImgs.py (바로 지금 이 파일에 해당하지) 의 convert2panoptic 함수에서 # useTrainId=False 로 적용했을때(segment_info 의 "category_id" 가 카테고리의 trainId 가 아닌 그냥id로 셋팅됨)를 가정하고 작동하는거네. # 그래서결국, segment_info["category_id"] 를 카테고리의 그냥id에서 trainId 로 바꿔주는거임. print( f'j) <inputs shoud be> cityscapesPath:None, outputFolder:None, useTrainId:False, setNames:[\'train\', \'val\']' ) print( f'j) <actual inputs> cityscapesPath:{cityscapesPath}, outputFolder:{outputFolder}, useTrainId:{useTrainId}, setNames:{setNames}' ) # Where to look for Cityscapes if cityscapesPath is None: cityscapesPath = CS_ROOTDIRPATH_J if outputFolder is None: outputFolder = os.path.join(CS_ROOTDIRPATH_J, "gt") categories = [] # i. ###################################### for label in labels: if label.ignoreInEval: continue categories.append({ 'id': int(label.trainId) if useTrainId else int(label.id), 'name': label.name, 'color': label.color, 'supercategory': label.category, 'isthing': 1 if label.hasInstances else 0 }) # i. train, val 등의 폴더에 대해서. for setName in setNames: # how to search for all ground truth forSearchInstanceIdsPngJ = os.path.join(cityscapesPath, "gt", setName, "*_instanceIds.png") # search files instanceIdsPngPath_list = glob.glob(forSearchInstanceIdsPngJ) instanceIdsPngPath_list.sort() files = instanceIdsPngPath_list # quit if we did not find anything if not files: printError( "j) Did not find any files for {} set using matching pattern {} !!!" .format(setName, forSearchInstanceIdsPngJ)) # a bit verbose print( "j) Converting {} annotation files(~~instanceIds.png) for {} set.". format(len(files), setName)) trainIfSuffix = "_trainId" if useTrainId else "" # outputBaseFile = "cityscapes_panoptic_{}{}".format(setName, trainIfSuffix) outputBaseNameJ = "J_cocoformat_panoptic_{}{}".format( setName, trainIfSuffix) outAnnoJsonPathJ = os.path.join(outputFolder, "{}.json".format(outputBaseNameJ)) print( "Json file with the annotations in panoptic format will be saved in {}" .format(outAnnoJsonPathJ)) panopticFolder = os.path.join(outputFolder, outputBaseNameJ) if not os.path.isdir(panopticFolder): print("Creating folder {} for panoptic segmentation PNGs".format( panopticFolder)) os.mkdir(panopticFolder) print("Corresponding segmentations in .png format will be saved in {}". format(panopticFolder)) images = [] # i. ###################################### annotations = [] # i. ###################################### for progress, f in enumerate(files): cs_annoPng_arrJ = np.array( Image.open(f)) # i. f 는 path/to/~~instanceIds.png print(f'j) cs_annoPng_arrJ.shape: {cs_annoPng_arrJ.shape}' ) # i. ex: (976, 1976) fileName = os.path.basename(f) # imageId = fileName.replace("_gtFine_instanceIds.png", "") # i. 변경필요 ########### # inputFileName = fileName.replace("_instanceIds.png", "_leftImg8bit.png") # i. 변경필요 ########### # outputFileName = fileName.replace("_instanceIds.png", "_panoptic.png") # # fileName ex: imp2_0_instanceIds.png, imp4_120_instanceIds.png (imp{A}_{00B}_instanceIds.png 이런식. A:2,3,4, B:0~3자리수) # # i.21.3.8.오후쯤) ->여기서 A00B 이런식으로 이미지id 만들어줄거임. B가 두자리면 A0bb, 세자리면 Abbb 이런식으로. # # i.21.3.10.19:37) 굳이이렇게할필요없음. 이미지id 는 그냥 문자열 숫자열 섞여있어도 상관없음. # # 그리고 Det2 의 데이터셋레지스터하는 코드 사용하려면.. 이미지id를 베이스네임?같은식으로 좀 맞춰줘야해서.. 암튼 이코드부분 수정필요. # # ->바로아래에서수정함/21.3.16.11:42. # implDatasetGroupNumJ = fileName[3] # "2", "4" # implSubNumJ = fileName[len("impX_"):-len("_instanceIds.png")] # "0", "120" # imageIdJ = implDatasetGroupNumJ + (3-len(implSubNumJ))*"0" + implSubNumJ # "2000", "4120" (A00B 이런식) # inputImgFileNameJ = fileName.replace("_instanceIds.png", ".jpg") # outAnnoPngNameJ = fileName.replace("_instanceIds.png", "_panopticAnno.png") # i.21.3.16.11:37) 내가 기존엔 바로위코드처럼 이미지id 를 A00B이런식으로 만들어줬었는데, # 굳이그럴필요없고 그냥 'imp2_0' 이런식으로 스트링으로 해줘도돼서, 그렇게하기로했음. imageIdJ = fileName[:-len("_instanceIds.png")] # 'imp2_0' 이런식. inputImgFileNameJ = fileName.replace("_instanceIds.png", ".jpg") outAnnoPngNameJ = fileName.replace("_instanceIds.png", "_panopticAnno.png") # image entry, id for image is its filename without extension images.append({ "id": imageIdJ, "width": int(cs_annoPng_arrJ.shape[1]), "height": int(cs_annoPng_arrJ.shape[0]), "file_name": inputImgFileNameJ }) # i.21.3.18.11:37) 여기서 coco_annoPng_arrJ 의 모든픽셀을 0으로 초기화해서 시작하는데, # ignoreInEval 이 True 인 카테고리들은 그려지지않고 스킵되니까 초기화상태의 값인 [0,0,0] 이 그대로 백그라운드 픽셀의 값이 됨. # 따라서, foreground 카테고리중에 id값을 0으로 해준게 있으면 256진법 RGB 로 변환시 [0,0,0] 이 돼버려서 백그라운드랑 똑같아져버리는 문제 발생. coco_annoPng_arrJ = np.zeros( (cs_annoPng_arrJ.shape[0], cs_annoPng_arrJ.shape[1], 3), dtype=np.uint8 # i.21.3.9.8:04) Unsigned integer 0 to 255 ) print(f'j) coco_annoPng_arrJ.shape: {coco_annoPng_arrJ.shape}' ) # i. ex: (976, 1976, 3) segmentIds = np.unique( cs_annoPng_arrJ ) # i. ex: [ 0 1 2 3 4 5 6 7000 7001 7002 7003 7004 7005 7006 8000 8001 8002 8003 9000] print( f'j) segmentIds = np.unique(cs_annoPng_arrJ): {np.unique(cs_annoPng_arrJ)}' ) segmsInfo = [] # i.21.3.8.00:25) 여기서 segmentIds 를 z-order에 맞게 정렬해줘야겠네. # ->아니지. 지금 cs_annoPng_arrJ(~~instanceIds.png 를 읽어들인거)은 이미 내가정해준 zorder대로 그려진상태니까 # 이제는 zorder 상관할필요가 없겠네. 그냥 2차원 이미지일 뿐이니까. for segmentId in segmentIds: if segmentId < 1000: # i. id값이 1000미만이면, stuff(COCO형식에서 stuff는 iscrowd의미없고 기본적으로 0임)이거나, iscrowd=1인 thing임. 참고로 iscrowd 는 COCO형식에 나오는 값./21.3.9.9:38. semanticId = segmentId isCrowd = 1 else: # i. id값이 1000이상이면, iscrowd=0 인 thing임./21.3.9.9:38. semanticId = segmentId // 1000 isCrowd = 0 # # i.21.3.17.23:06) 내가 'unlabeled_Label' 을 없애줬고 백그라운드의 segmentId 값은 내가정해준대로 255 일것이므로, # # 바로아래의 labelInfo = id2label[semanticId] 가 실행되면 KeyError: 255 가 발생함. # # 근데 어차피 기존 cityscapes 의 labels.py 대로라고 해도, label 의 ignoreInEval 이 True 일 경우 continue 해서 무시해주고있음(아래 보면 나오지). # # 즉, 백그라운드는 걍 무시하면 됨. 따라서 semanticId(=segmentId) 값이 255이면 continue 해줌. # # i.21.3.18.10:06) ->백그라운드는 무시하면 되는건 맞는데, mandible 의 id값을 0으로 해버려서 요아래 color 정해줄때 # # mandible 의 color 가 [0,0,0] 으로 돼버려서, mandible 이랑 백그라운드가 모두 [0,0,0] 으로 돼버림. # # (위에서 coco_annoPng_arrJ 만들어줄때 모든원소들을 0으로 초기화해서 시작하니까 백그라운드 픽셀들은 기본적으로 [0,0,0] 인거지.) # # 그래서 걍 다시 바꾸는중. # if semanticId == 255: # continue labelInfo = id2label[semanticId] # i. Det2 에서는 여기서 useTrainId 가 False 인걸로 가정하고 처리함. # 즉, 여기서 categoryId 가 trainId 말고 그냥 id 인것으로 가정하고 처리함./21.3.18.9:57. categoryId = labelInfo.trainId if useTrainId else labelInfo.id if labelInfo.ignoreInEval: # i. <- 요거 기존코드인데, ignoreInEval 이 True 면 걍 continue 해서 무시하는것을 볼수있음./21.3.17.23:14. continue if not labelInfo.hasInstances: # i. stuff면, iscrowd 의미없고 기본적으로 0임.(COCO형식 참고하삼)/21.3.9.9:45. isCrowd = 0 mask = cs_annoPng_arrJ == segmentId # print(f'j) mask.shape: {mask.shape}') # i. ex: (976, 1976) # color = [segmentId % 256, segmentId // 256, segmentId // 256 // 256] # i.21.3.8.22:28)->요게 기존 코드. 잘못됐음. cityscapes 데이터셋은 클래스가 35갠가 뿐이라 이렇게해도 문제되진 않지만, # 만약 클래스갯수가 65개고 인스턴스갯수가 엄청많다거나, 클래스갯수가 66개 이상이된다거나 하면 문제됨. 현 cityscapes 의 id정해주는방식이라면. # 즉, 예를들어 id값이 65900(클래스번호65, 해당클래스의 901번째 인스턴스)라든가, id값이 66000(클래스번호66, 해당클래스의 0번째인스턴스)라든가 이럴경우 문제됨. # (참고1: 256^2=65536) # (참고2: COCO panoptic 형식에서 id=R+G*256+B*256^2, RGB는 어노png파일의 각 픽셀의 값.) color = [ segmentId % 256, segmentId % (256 * 256) // 256, segmentId % (256 * 256 * 256) // (256 * 256) ] print(f'j) id:{segmentId} -> color(RGB):{color}') # i.21.3.8.22:28)->요게 내가 수정한거. 세번쨋놈은 그냥 segmentId//(256*256) # 또는 segmentId//256/256 으로 해도 되지만(id가 256^3보다 작을것이라서), 일반화를 위해 저렇게 적었음. 규칙성도 눈에잘보이고. # # i.21.3.10.19:44) cocodataset깃헙보면 cocoapi 말고도 panopticapi 라고 있음. # 거기서 panopticapi.utils.id2rgb 함수가 바로 위의 변환이랑 내내 같은걸 해주고있음. (Det2 문서의 "pan_seg_file_name" 설명에 나오는 함수) # 거깄는 id2rgb 함수는 2차원 id맵(numpy어레이)을 넣으면 RGB맵으로 변환해주고, 그냥 하나의 id값만 넣으면 [R,G,B] 리스트를 반환해줌. coco_annoPng_arrJ[ mask] = color # i. ->coco_annoPng_arrJ 은 HxWx3, mask 는 HxW. 그래도 상관없지. # ->color 는 list 지만, 일케해줘도 상관x. 넘파이어레이로 됨. area = np.sum(mask) # segment area computation # bbox computation for a segment # i.21.3.20.21:42) 참고로, 지금 이 계산에서는, 만약 stuff 라서 원 인풋이미지상에서 되게 여기저기 넓게 분포한다해도, bbox는 그걸 다 커버할수있는 최소의 박스로 만들어짐. # 그러니까 예를들어 원 인풋이미지에서 좌하 끝부분 코너, 우상 끝부분 코너 위치에 어던 stuff 가 있다치면, 예를들어 뭐 grass 가 있다면, # 지금 여기서의 bbox는 그걸 다 커버하니까 엄청 커짐. 거의 뭐 이미지 전체를 뒤덮는 bbox 가 되겠지. hor = np.sum(mask, axis=0) hor_idx = np.nonzero(hor)[0] x = hor_idx[0] width = hor_idx[-1] - x + 1 vert = np.sum(mask, axis=1) vert_idx = np.nonzero(vert)[0] y = vert_idx[0] height = vert_idx[-1] - y + 1 bbox = [int(x), int(y), int(width), int(height)] # i.21.3.20.21:47) 참고로, thing 카테고리들과 달리, stuff 카테고리들의 경우 # 여기저기 산발적으로 여러개의 세그멘트들이 있다해도 그게 다 똑같은 id값을 가지니까, # 그 모든 세그먼트들이 하나의 "segment_info"(지금 여기서 segmsInfo 에 append 해주는 dict) 에 대응되게 됨. # 참고: createPanopticImgs.py 에선 원래 요 변수명 segmInfo 였는데, # s 붙고 안붙고는 매우 중요하기때문에, s 붙여서 segmsInfo 로 내가 바꿔줬음. segmsInfo.append({ # i. segmentId 는 ~~instanceIds.png 의 각 픽셀값. "id": int(segmentId), # i. Det2에선 이값이 trainId 아닌 그냥id 인걸로 가정하고, # 이값을 trainId 로 바꿔서 Det2형식으로 만듦(데이터셋 레지스터해주는 cityscapse_panoptic.py 에서.) /21.3.18.10:00. "category_id": int(categoryId), "area": int(area), "bbox": bbox, "iscrowd": isCrowd }) annotations.append({ 'image_id': imageIdJ, 'file_name': outAnnoPngNameJ, "segments_info": segmsInfo }) Image.fromarray(coco_annoPng_arrJ).save( os.path.join(panopticFolder, outAnnoPngNameJ)) print("\rProgress: {:>3.2f} %".format( (progress + 1) * 100 / len(files)), end=' ') sys.stdout.flush() print("\nSaving the json file {}".format(outAnnoJsonPathJ)) d = { 'images': images, 'annotations': annotations, 'categories': categories } with open(outAnnoJsonPathJ, 'w') as f: json.dump(d, f, sort_keys=True, indent=4)
def main(): # Where to look for Cityscapes if 'BraTS_DATASET' in os.environ: bratsPath = os.environ['BraTS_DATASET'] else: bratsPath = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..') if 'CITYSCAPES_DATASET' in os.environ: cityscapesPath = os.environ['CITYSCAPES_DATASET'] else: cityscapesPath = os.path.join( os.path.dirname(os.path.realpath(__file__)), '..', '..') # how to search for all ground truth searchFine = os.path.join(cityscapesPath, "gtFine", "*", "*", "*_gt*_polygons.json") searchCoarse = os.path.join(cityscapesPath, "gtCoarse", "*", "*", "*_gt*_polygons.json") # searchCoarse = os.path.join(cityscapesPath, "gtCoarse", "*", "*", "*_gt*_polygons.json") # ot_path = glob.glob(os.path.join(os.path.join(bratsPath, "Training", "*", "*"), "*", "*OT*.mha")) # t1_path = glob.glob(os.path.join(os.path.join(bratsPath, "Training", "*", "*"), "*", "*T1*.mha")) # t1c_path = glob.glob(os.path.join(os.path.join(bratsPath, "Training", "*", "*"), "*", "*T1c*.mha")) # flair_path = glob.glob(os.path.join(os.path.join(bratsPath, "Training", "*", "*"), "*", "*Flair*.mha")) # t2_path = glob.glob(os.path.join(os.path.join(bratsPath, "Training", "*", "*"), "*", "*T2*.mha")) # search files filesFine = glob.glob(searchFine) filesFine.sort() # filesCoarse = glob.glob( searchCoarse ) # filesCoarse.sort() # concatenate fine and coarse files = filesFine # files = filesFine # use this line if fine is enough for now. # quit if we did not find anything if not files: printError("Did not find any files. Please consult the README.") # a bit verbose print("Processing {} annotation files".format(len(files))) # iterate through files progress = 0 print("Progress: {:>3} %".format(progress * 100 / len(files)), end=' ') for f in files: # create the output filename dst = f.replace("_polygons.json", "_labelTrainIds.png") # do the conversion try: json2labelImg(f, dst, "trainIds") except: print("Failed to convert: {}".format(f)) raise # status progress += 1 print("\rProgress: {:>3} %".format(progress * 100 / len(files)), end=' ') sys.stdout.flush()
def main(): cityscapesPath = os.environ.get( 'CITYSCAPES_DATASET', os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..')) gtJsonFile = os.path.join(cityscapesPath, "gtFine", "cityscapes_panoptic_val.json") predictionPath = os.environ.get('CITYSCAPES_RESULTS', os.path.join(cityscapesPath, "results")) predictionJsonFile = os.path.join(predictionPath, "cityscapes_panoptic_val.json") parser = argparse.ArgumentParser() parser.add_argument( "--gt-json-file", dest="gtJsonFile", help= '''path to json file that contains ground truth in COCO panoptic format. By default it is $CITYSCAPES_DATASET/gtFine/cityscapes_panoptic_val.json. ''', default=gtJsonFile, type=str) parser.add_argument( "--gt-folder", dest="gtFolder", help='''path to folder that contains ground truth *.png files. If the argument is not provided this script will look for the *.png files in 'name' if --gt-json-file set to 'name.json'. ''', default=None, type=str) parser.add_argument( "--prediction-json-file", dest="predictionJsonFile", help= '''path to json file that contains prediction in COCO panoptic format. By default is either $CITYSCAPES_RESULTS/cityscapes_panoptic_val.json or $CITYSCAPES_DATASET/results/cityscapes_panoptic_val.json if $CITYSCAPES_RESULTS is not set. ''', default=predictionJsonFile, type=str) parser.add_argument( "--prediction-folder", dest="predictionFolder", help='''path to folder that contains prediction *.png files. If the argument is not provided this script will look for the *.png files in 'name' if --prediction-json-file set to 'name.json'. ''', default=None, type=str) resultFile = "resultPanopticSemanticLabeling.json" parser.add_argument( "--results_file", dest="resultsFile", help="File to store computed panoptic quality. Default: {}".format( resultFile), default=resultFile, type=str) parser.add_argument("--iou-type", default="segm") parser.add_argument("--dilation-ratio", default="0.005", type=float) args = parser.parse_args() if not os.path.isfile(args.gtJsonFile): printError( "Could not find a ground truth json file in {}. Please run the script with '--help'" .format(args.gtJsonFile)) if args.gtFolder is None: args.gtFolder = os.path.splitext(args.gtJsonFile)[0] if not os.path.isfile(args.predictionJsonFile): printError( "Could not find a prediction json file in {}. Please run the script with '--help'" .format(args.predictionJsonFile)) if args.predictionFolder is None: args.predictionFolder = os.path.splitext(args.predictionJsonFile)[0] evaluatePanoptic(args.gtJsonFile, args.gtFolder, args.predictionJsonFile, args.predictionFolder, args.resultsFile, args.iou_type, args.dilation_ratio) return
def main(): # Where to look for Cityscapes ''' __file__ 得到当前文件路径 但是若按绝对路径执行该文件,得到绝对路径 若按相对路径,或者在sys.path下执行,则得到相对路径。 为了保证得到绝对路径,用os.path.realpath() os.path.dirname获得该文件所在的文件夹名称 os.path.join,组合成 目录/../..表示当前目录网上两次。 ''' if 'CITYSCAPES_DATASET' in os.environ: cityscapesPath = os.environ['CITYSCAPES_DATASET'] # else: cityscapesPath = os.path.join( os.path.dirname(os.path.realpath(__file__)), '..', '..') # how to search for all ground truth ''' 在文件往上两个的目录,即cityscapes根目录下,找gtFine的目录。 加上*方便glob ''' searchFine = os.path.join(cityscapesPath, "gtFine", "*", "*", "*_gt*_polygons.json") searchCoarse = os.path.join(cityscapesPath, "gtCoarse", "*", "*", "*_gt*_polygons.json") # search files filesFine = glob.glob(searchFine) filesFine.sort() filesCoarse = glob.glob(searchCoarse) filesCoarse.sort() # concatenate fine and coarse files = filesFine + filesCoarse # files = filesFine # use this line if fine is enough for now. # quit if we did not find anything ''' 在python中 None, False, 空字符串"", 0, 空列表[], 空字典{}, 空元组()都相当于False 这里这个检查不太好,因为没检查图片为空的情形,只检查了label ''' if not files: printError("Did not find any files. Please consult the README.") # a bit verbose print("Processing {} annotation files".format(len(files))) # iterate through files progress = 0 print("Progress: {:>3} %".format(progress * 100 / len(files)), end=' ') for f in files: # create the output filename dst = f.replace("_polygons.json", "_labelTrainIds.png") ''' 替换并建一个新的list放图片。这里是选其中labelTrainIds这个类型 ''' # do the conversion try: json2labelImg(f, dst, "trainIds") except: print("Failed to convert: {}".format(f)) raise # status 更新进度条 progress += 1 print("\rProgress: {:>3} %".format(progress * 100 / len(files)), end=' ') # 输出刷新 sys.stdout.flush()
createPanopticImgs.convert2panoptic(pred_in_path, pred_out_path, True) cityscapesPath = os.environ.get( 'CITYSCAPES_DATASET', os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..')) gtJsonFile = os.path.join(cityscapesPath, "gtFine", "cityscapes_panoptic_val.json") predictionPath = os.environ.get('CITYSCAPES_RESULTS', os.path.join(cityscapesPath, "results")) predictionJsonFile = os.path.join(predictionPath, "cityscapes_panoptic_val.json") if not os.path.isfile(gt_jsonfile): printError( "Could not find a ground truth json file in {}. Please run the script with '--help'" .format(gt_jsonfile)) if gt_folder is None: gt_folder = os.path.splitext(gt_jsonfile)[0] if not os.path.isfile(pred_jsonfile): printError( "Could not find a prediction json file in {}. Please run the script with '--help'" .format(pred_jsonfile)) if pred_folder is None: pred_folder = os.path.splitext(pred_jsonfile)[0] evalPanopticSemanticLabeling.evaluatePanoptic(gt_jsonfile, gt_folder, pred_jsonfile, pred_folder, result)
def main(): cityscapesPath = os.environ.get( 'CITYSCAPES_DATASET', os.path.join(os.path.dirname(os.path.realpath(__file__)),'..','..') ) gtJsonFile = os.path.join(cityscapesPath, "gtFine", "cityscapes_panoptic_val.json") predictionPath = os.environ.get( 'CITYSCAPES_RESULTS', os.path.join(cityscapesPath, "results") ) predictionJsonFile = os.path.join(predictionPath, "cityscapes_panoptic_val.json") parser = argparse.ArgumentParser() parser.add_argument("--gt-json-file", dest="gtJsonFile", help= '''path to json file that contains ground truth in COCO panoptic format. By default it is $CITYSCAPES_DATASET/gtFine/cityscapes_panoptic_val.json. ''', default=gtJsonFile, type=str) parser.add_argument("--gt-folder", dest="gtFolder", help= '''path to folder that contains ground truth *.png files. If the argument is not provided this script will look for the *.png files in 'name' if --gt-json-file set to 'name.json'. ''', default=None, type=str) parser.add_argument("--prediction-json-file", dest="predictionJsonFile", help='''path to json file that contains prediction in COCO panoptic format. By default is either $CITYSCAPES_RESULTS/cityscapes_panoptic_val.json or $CITYSCAPES_DATASET/results/cityscapes_panoptic_val.json if $CITYSCAPES_RESULTS is not set. ''', default=predictionJsonFile, type=str) parser.add_argument("--prediction-folder", dest="predictionFolder", help='''path to folder that contains prediction *.png files. If the argument is not provided this script will look for the *.png files in 'name' if --prediction-json-file set to 'name.json'. ''', default=None, type=str) resultFile = "resultPanopticSemanticLabeling.json" parser.add_argument("--results_file", dest="resultsFile", help="File to store computed panoptic quality. Default: {}".format(resultFile), default=resultFile, type=str) args = parser.parse_args() if not os.path.isfile(args.gtJsonFile): printError("Could not find a ground truth json file in {}. Please run the script with '--help'".format(args.gtJsonFile)) if args.gtFolder is None: args.gtFolder = os.path.splitext(args.gtJsonFile)[0] if not os.path.isfile(args.predictionJsonFile): printError("Could not find a prediction json file in {}. Please run the script with '--help'".format(args.predictionJsonFile)) if args.predictionFolder is None: args.predictionFolder = os.path.splitext(args.predictionJsonFile)[0] evaluatePanoptic(args.gtJsonFile, args.gtFolder, args.predictionJsonFile, args.predictionFolder, args.resultsFile) return
def main(): # # Where to look for Cityscapes # if 'CITYSCAPES_DATASET' in os.environ: # cityscapesPath = os.environ['CITYSCAPES_DATASET'] # else: # cityscapesPath = os.path.join(os.path.dirname(os.path.realpath(__file__)),'..','..') # # how to search for all ground truth # searchFine = os.path.join( cityscapesPath , "gtFine" , "*" , "*" , "*_gt*_polygons.json" ) # i. ex: ~~\gtFine\val\frankfurt\~~polygons.json # searchCoarse = os.path.join( cityscapesPath , "gtCoarse" , "*" , "*" , "*_gt*_polygons.json" ) # # search files # filesFine = glob.glob( searchFine ) # filesFine.sort() # filesCoarse = glob.glob( searchCoarse ) # filesCoarse.sort() # # concatenate fine and coarse # files = filesFine + filesCoarse # # files = filesFine # use this line if fine is enough for now. # Where to look for Cityscapes # i.21.3.11.12:45) 기존의 convertTestJ 폴더에서 panopticSeg_dentPanoJ 로 폴더명 바꿨고, 그안에 gt 및 inputOriPano 이렇게 두개 폴더 다시 만들어줬음. # 따라서 ~~polygons.json 경로 바꼈음. 바뀐 ~~polygons.json 경로 ex: panopticSeg_dentPanoJ\gt\train\imp2_1_polygons.json # MYROOTDIRPATH_J = r"C:\Users\starriet\Downloads\panopticSeg_dentPanoJ" # ~~polygons.json 경로 ex: convertTestJ\train\imp2_1_polygons.json # <-요건 기존경로. # i.21.3.14.22:41) 코랩컴에서의 경로로 수정. 내 구글드라이브에 커스텀데이터 올려놓고, 코랩컴에서 구글드라이브의 압축파일을 (코랩컴의 디렉토리에다가)압축풀어서 사용할거니까. # 즉, 구글코랩에서 구글드라이브 연동해서 돌리는걸 가정한것임. 뭐 사실상 코랩에서만 할테니까 일단은. MYROOTDIRPATH_J = "/content/datasetsJ/panopticSeg_dentPanoJ" # how to search for all ground truth(i. ~~polygons.json) forSearchAllPolygonsJson = os.path.join(MYROOTDIRPATH_J, "gt", "*", "*_polygons.json") # search files polygonsjson_path_list = glob.glob(forSearchAllPolygonsJson) files = polygonsjson_path_list # quit if we did not find anything if not files: printError("j) Did not find any files(~~polygons.json)!!!") # a bit verbose print("Processing {} annotation files".format(len(files))) # iterate through files progress = 0 print("Progress: {:>3} %".format(progress * 100 / len(files)), end=' ') for f in files: # create the output filename dst = f.replace("_polygons.json", "_labelTrainIds.png") # do the conversion try: json2labelImg(f, dst, "trainIds") except: print("Failed to convert: {}".format(f)) raise # status progress += 1 print("\rProgress: {:>3} %".format(progress * 100 / len(files)), end=' ') sys.stdout.flush()
def convert2panoptic(cityscapesPath=None, outputFolder=None, useTrainId=False): # Where to look for Cityscapes if cityscapesPath is None: if 'CITYSCAPES_DATASET' in os.environ: cityscapesPath = os.environ['CITYSCAPES_DATASET'] else: cityscapesPath = os.path.join(os.path.dirname(os.path.realpath(__file__)),'..','..') cityscapesPath = os.path.join(cityscapesPath, "gtFine") if outputFolder is None: outputFolder = cityscapesPath categories = [] for label in labels: if label.ignoreInEval: continue categories.append({'id': int(label.trainId) if useTrainId else int(label.id), 'name': label.name, 'color': label.color, 'supercategory': label.category, 'isthing': 1 if label.hasInstances else 0}) # if only val set needs the conversion. # for setName in ["val"]: for setName in ["val", "train", "test"]: # how to search for all ground truth searchFine = os.path.join(cityscapesPath, setName, "*", "*_instanceIds.png") # search files filesFine = glob.glob(searchFine) filesFine.sort() files = filesFine # quit if we did not find anything if not files: printError( "Did not find any files for {} set using matching pattern {}. Please consult the README.".format(setName, searchFine) ) # a bit verbose print("Converting {} annotation files for {} set.".format(len(files), setName)) trainIfSuffix = "_trainId" if useTrainId else "" outputBaseFile = "cityscapes_panoptic_{}{}".format(setName, trainIfSuffix) outFile = os.path.join(outputFolder, "{}.json".format(outputBaseFile)) print("Json file with the annotations in panoptic format will be saved in {}".format(outFile)) panopticFolder = os.path.join(outputFolder, outputBaseFile) if not os.path.isdir(panopticFolder): print("Creating folder {} for panoptic segmentation PNGs".format(panopticFolder)) os.mkdir(panopticFolder) print("Corresponding segmentations in .png format will be saved in {}".format(panopticFolder)) images = [] annotations = [] for progress, f in enumerate(files): originalFormat = np.array(Image.open(f)) fileName = os.path.basename(f) imageId = fileName.replace("_gtFine_instanceIds.png", "") inputFileName = fileName.replace("_instanceIds.png", "_leftImg8bit.png") outputFileName = fileName.replace("_instanceIds.png", "_panoptic.png") # image entry, id for image is its filename without extension images.append({"id": imageId, "width": int(originalFormat.shape[1]), "height": int(originalFormat.shape[0]), "file_name": inputFileName}) pan_format = np.zeros( (originalFormat.shape[0], originalFormat.shape[1], 3), dtype=np.uint8 ) segmentIds = np.unique(originalFormat) segmInfo = [] for segmentId in segmentIds: if segmentId < 1000: semanticId = segmentId isCrowd = 1 else: semanticId = segmentId // 1000 isCrowd = 0 labelInfo = id2label[semanticId] categoryId = labelInfo.trainId if useTrainId else labelInfo.id if labelInfo.ignoreInEval: continue if not labelInfo.hasInstances: isCrowd = 0 mask = originalFormat == segmentId color = [segmentId % 256, segmentId // 256, segmentId // 256 // 256] pan_format[mask] = color area = np.sum(mask) # segment area computation # bbox computation for a segment hor = np.sum(mask, axis=0) hor_idx = np.nonzero(hor)[0] x = hor_idx[0] width = hor_idx[-1] - x + 1 vert = np.sum(mask, axis=1) vert_idx = np.nonzero(vert)[0] y = vert_idx[0] height = vert_idx[-1] - y + 1 bbox = [int(x), int(y), int(width), int(height)] segmInfo.append({"id": int(segmentId), "category_id": int(categoryId), "area": int(area), "bbox": bbox, "iscrowd": isCrowd}) annotations.append({'image_id': imageId, 'file_name': outputFileName, "segments_info": segmInfo}) Image.fromarray(pan_format).save(os.path.join(panopticFolder, outputFileName)) print("\rProgress: {:>3.2f} %".format((progress + 1) * 100 / len(files)), end=' ') sys.stdout.flush() print("\nSaving the json file {}".format(outFile)) d = {'images': images, 'annotations': annotations, 'categories': categories} with open(outFile, 'w') as f: json.dump(d, f, sort_keys=True, indent=4)