def main(): parser = argparse.ArgumentParser(description='Train model.') parser.add_argument('--tissue', help='select tissue to train.', default=None) parser.add_argument('--inputFolder', help='Select input folder.', default=None) parser.add_argument('--outputFolder', help='select output folder', default=None) parser.add_argument('--scale', help='select output folder', default=None) args = parser.parse_args() config = Config if args.tissue: config.diagnosis = [args.tissue] if args.outputFolder: config.outputFolder = args.outputFolder if args.scale: config.scale = int(args.scale) if args.tissue == 'Ganglioneuroma': n_freq = 20 else: n_freq = 30 print("Scale: " + args.scale) print(config.diagnosis) print(config.outputFolder) tools = Tools() annotated_nuclei = AnnotatedObjectSet() ids_images = glob.glob( os.path.join(args.inputFolder, config.diagnosis[0], 'images', '*.tif')) ids_masks = glob.glob( os.path.join(args.inputFolder, config.diagnosis[0], 'masks', '*.tif')) # Create dataset for training the pix2pix-network based on image pairs #for index,elem in enumerate(ids_paths): for index, elem in enumerate(ids_images): test = AnnotatedImage() #test.readFromPath(tools.getLocalDataPath(elem[1],1),tools.getLocalDataPath(groundtruth_path[0],3)) test.readFromPath(ids_images[index], ids_masks[index], type='uint16') enhanced_images = tools.enhanceImage(test, flip_left_right=True, flip_up_down=True, deform=False) for index, img in enumerate(enhanced_images): annotated_nuclei.addObjectImage( img, useBorderObjects=config.useBorderObjects) # Create the image pairs tools.createPix2pixDataset(annotated_nuclei, config, n_freq=n_freq, tissue=args.tissue) e = 1
def main(): parser = argparse.ArgumentParser(description='Train model.') parser.add_argument('--tissue', help='select tissue to train.', default=None) parser.add_argument('--inputFolder', help='Select input folder.', default=None) parser.add_argument('--outputFolder', help='select output folder', default=None) parser.add_argument('--scale', help='select output folder', default=None) parser.add_argument('--mode', help='select output folder', default='train') parser.add_argument('--resultsfile', help='select output folder', default=None) parser.add_argument('--overlap', help='select output folder', default=None) args = parser.parse_args() config = Config if args.tissue: config.diagnosis = [args.tissue] if args.outputFolder: config.outputFolder = args.outputFolder if args.scale == '1': config.scale = True if args.mode: config.mode = args.mode if args.resultsfile: config.resultsfile = args.resultsfile if args.overlap: config.overlap = int(args.overlap) print(config.diagnosis) print(config.outputFolder) print(config.scale) tools = Tools() annotated_nuclei = AnnotatedObjectSet() ids_images = glob.glob( os.path.join(args.inputFolder, config.diagnosis[0], 'images', '*.tif')) ids_masks = glob.glob( os.path.join(args.inputFolder, config.diagnosis[0], 'masks', '*.tif')) # Create dataset for training the pix2pix-network based on image pairs #for index,elem in enumerate(ids_paths): for index, elem in enumerate(ids_images): test = AnnotatedImage() test.readFromPath(ids_images[index], ids_masks[index], type='uint16') #enhanced_images = tools.enhanceImage(test,flip_left_right=True,flip_up_down=True,deform=True) #for index,img in enumerate(enhanced_images): # annotated_nuclei.addObjectImage(img,useBorderObjects=config.useBorderObjects) annotated_nuclei.addObjectImage( test, useBorderObjects=config.useBorderObjects, path_to_img=ids_images[index]) # Create and save the image tiles tools.createAndSaveTiles(annotated_nuclei, config)
def main(): parser = argparse.ArgumentParser(description='Train model.') parser.add_argument('--tissue', help='select tissue to train.', default="Ganglioneuroma") parser.add_argument('--inputFolder', help='Select input folder.', default=None) parser.add_argument('--outputFolder', help='select output folder', default=None) parser.add_argument('--scale', help='select output folder', default=None) parser.add_argument('--mode', help='select output folder', default='train') parser.add_argument('--resultsfile', help='select output folder', default=None) parser.add_argument('--overlap', help='select output folder', default=None) parser.add_argument('--ending', help='select output folder', default=None) parser.add_argument('--scalesize', help='select output folder', default=None) args = parser.parse_args() config = Config if args.tissue: config.diagnosis = [args.tissue] if args.outputFolder: config.outputFolder = args.outputFolder if args.scale == '1': config.scale=True if args.mode: config.mode=args.mode if args.resultsfile: config.resultsfile=args.resultsfile if args.overlap: config.overlap=int(args.overlap) print(config.diagnosis) print(config.outputFolder) print(config.scale) tools = Tools() def takeSecond(elem): return os.path.basename(elem) annotated_nuclei = AnnotatedObjectSet() print("Input folder: " + args.inputFolder) ids_images = glob.glob(os.path.join(args.inputFolder,'*.' + args.ending)) ids_images.sort(key=takeSecond) # Create dataset for training the pix2pix-network based on image pairs #for index,elem in enumerate(ids_paths): for index, elem in enumerate(ids_images): print(ids_images[index]) test = AnnotatedImage() test.readFromPathOnlyImage(ids_images[index]) #enhanced_images = tools.enhanceImage(test,flip_left_right=True,flip_up_down=True,deform=True) #for index,img in enumerate(enhanced_images): # annotated_nuclei.addObjectImage(img,useBorderObjects=config.useBorderObjects) annotated_nuclei.addObjectImage(test, useBorderObjects=config.useBorderObjects,path_to_img=ids_images[index]) cv2.imwrite(r"/root/flo/tmp/test_before_tiling.jpg",test.getRaw()) # Create and save the image tiles cv2.imwrite(r"/root/flo/tmp/test_before_tiling.jpg",test.getRaw()) tools.createAndSaveTilesForSampleSegmentation(annotated_nuclei,config,float(args.scalesize))
def main(): parser = argparse.ArgumentParser(description='Train model.') parser.add_argument('--tissue', help='select tissue to train.', default=None) parser.add_argument('--inputFolder', help='Select input folder.', default=None) parser.add_argument('--outputFolder', help='select output folder', default=None) parser.add_argument('--nr_images', help='select number of images to create', default=None) parser.add_argument('--overlapProbability', help='select overlapProbability', default=None) parser.add_argument('--samplingrate', help='how fine the contour shall be sampled', default=None) parser.add_argument('--ending', help='how fine the contour shall be sampled', default=None) args = parser.parse_args() config = Config if args.tissue: config.diagnosis = [args.tissue] if args.outputFolder: config.outputFolder = args.outputFolder #print(config.diagnosis) print(args.ending) print(args.inputFolder) tools = Tools() svg_tools = SVGTools(samplingrate=int(args.samplingrate)) folder=args.inputFolder #=r"\\chubaka\home\florian.kromp\settings\desktop\nucleusanalyzer\FFG COIN VISIOMICS\Ongoing\EvaluationMetrics\HaCat_grown\10" print(os.path.join(folder,"*[!mask]."+args.ending)) #images = glob.glob(os.path.join(folder,"*[!mask]."+args.ending)) images = glob.glob(os.path.join(folder,"*."+args.ending)) masks = [x.replace('.' + os.path.basename(x).split('.')[1],"_mask.TIF") for x in images] print (images) print(masks) for index,elem in enumerate(images): img = AnnotatedImage() img.readFromPath(images[index], masks[index]) cv2.imwrite(os.path.join(folder, os.path.basename(elem).replace('.'+os.path.basename(elem).split('.')[1],'_raw.jpg')),(img.getRaw() * 255.0).astype(np.uint8)) svg_tools.openSVG(img.getRaw().shape[0],img.getRaw().shape[1]) svg_tools.addRawImage(name='Raw image', img_path=(os.path.basename(elem).replace('.'+os.path.basename(elem).split('.')[1],'_raw.jpg'))) svg_tools.addMaskLayer(img.getMask(),'Single nuclei','#00FF00',0.5) svg_tools.closeSVG() print("Path to SVG: " + os.path.join(folder, os.path.basename(elem).replace('.' + os.path.basename(elem).split('.')[1],'_svg.svg'))) svg_tools.writeToPath(os.path.join(folder, os.path.basename(elem).replace('.' + os.path.basename(elem).split('.')[1],'_svg.svg')))
def main(): parser = argparse.ArgumentParser(description='Train model.') parser.add_argument('--tissue', help='select tissue to train.', default=None) parser.add_argument('--inputFolder', help='Select input folder.', default=None) parser.add_argument('--outputFolder', help='select output folder', default=None) parser.add_argument('--nr_images', help='select number of images to create', default=None) parser.add_argument('--overlapProbability', help='select overlapProbability', default=None) parser.add_argument('--scale', help='select output folder', default=None) args = parser.parse_args() tisquant = TisQuantExtract() config = Config if args.tissue: config.diagnosis = [args.tissue] if args.outputFolder: config.outputFolder = args.outputFolder if args.overlapProbability: args.overlapProbability = float(args.overlapProbability) else: args.overlapProbability = 0.5 if args.scale == '1': config.scale = True print(config.diagnosis) tools = Tools() annotated_nuclei = AnnotatedObjectSet() annotated_images = [] #ids_paths = tisquant.dbconnector.execute(query=tisquant.getLevel3AnnotatedImagesByDiagnosis_Query(diagnosis = config.diagnosis,magnification = config.magnification, staining_type = config.staining_type, staining = config.staining, segmentation_function = config.segmentation_function, annotator = config.annotator, device = config.device)) ids_images = glob.glob( os.path.join(args.inputFolder, config.diagnosis[0], 'images', '*.tif')) ids_masks = glob.glob( os.path.join(args.inputFolder, config.diagnosis[0], 'masks', '*.tif')) #for index,elem in enumerate(ids_paths): for index, elem in enumerate(ids_images): #groundtruth_paths = tisquant.dbconnector.execute(tisquant.getLevel3AnnotationByImageId_Query(elem[0],config.annotator)) #groundtruth_paths = tisquant.dbconnector.execute(tisquant.getLevel3AnnotationByImageIdUsingMaxExperience_Query(elem[0], config.annotator)) #for groundtruth_path in groundtruth_paths: test = AnnotatedImage() # test.readFromPath(tools.getLocalDataPath(elem[1],1),tools.getLocalDataPath(groundtruth_path[0],3)) test.readFromPath(ids_images[index], ids_masks[index]) annotated_images.append(test) # Create artificial new dataset scales = tools.getNormalizedScales(annotated_images) for index, img in enumerate(annotated_images): test = AnnotatedImage() if config.scale: test.createWithArguments( tools.rescale_image(img.getRaw(), (scales[index], scales[index])), tools.rescale_mask(img.getMask(), (scales[index], scales[index]), make_labels=True)) else: test.createWithArguments(img.getRaw(), img.getMask()) annotated_nuclei.addObjectImage( test, useBorderObjects=config.useBorderObjects) if config.scale == 0: if args.tissue == 'Ganglioneuroma': possible_numbers = [9, 16, 25, 36, 49] else: possible_numbers = [4, 4, 9] else: possible_numbers = [9, 16, 25, 36, 49] # How many images? if not args.nr_images: args.nr_images = 10 else: args.nr_images = int(args.nr_images) for t in tqdm(range(0, args.nr_images)): # Create artificial image number_nuclei = random.randint(0, possible_numbers.__len__() - 1) img = ArtificialAnnotatedImage( width=256, height=256, number_nuclei=possible_numbers[number_nuclei], probabilityOverlap=args.overlapProbability) total_added = 0 for i in range(0, possible_numbers[number_nuclei]): test = annotated_nuclei.returnArbitraryObject() if (randint(0, 1)): test = tools.arbitraryEnhance(test) total_added += img.addImageAtGridPosition(test) if (total_added > 0): shape_y = img.getRaw().shape[0] shape_x = img.getRaw().shape[1] img_new = np.zeros((shape_y, shape_x * 2, 3), dtype=np.float32) img_new[:, 0:shape_x, 0] = img_new[:, 0:shape_x, 1] = img_new[:, 0:shape_x, 2] = img_new[:, shape_x:2 * shape_x, 0] = img_new[:, shape_x: 2 * shape_x, 1] = img_new[:, shape_x: 2 * shape_x, 2] = img.getRaw( ) scipy.misc.toimage( img_new, cmin=0.0, cmax=1.0).save(config.outputFolder + config.diagnosis[0] + '\\images\\Img_' + str(t) + '.jpg') tifffile.imsave(config.outputFolder + config.diagnosis[0] + '\\masks\\Mask_' + str(t) + '.tif', img.getMask(), dtype=np.uint8) e = 1
def main(): tools = Tools() structuredEvaluation = StructuredEvaluation() parser = argparse.ArgumentParser(description='Train model.') parser.add_argument('--inputFolder', help='Select input folder.', default=None) parser.add_argument('--outputFolder', help='select output folder', default=None) parser.add_argument( '--resultfile', help='select result file', default=r"E:\NuclearSegmentationPipeline\Results\results_scaled.csv") args = parser.parse_args() # Read csv file to get position mapping_images = dict() natureclass = dict() abs_index = 0 mapping_class = dict() with open(args.resultfile) as csv_file: csv_reader = csv.reader(csv_file) for row in csv_reader: if int(row[2]) == 0: mapping_images[os.path.basename(row[0])] = abs_index abs_index = abs_index + 1 # Read csv file to get naturepaperclass with open( r"E:\NuclearSegmentationPipeline\DataGenerator\image_description_final_revision.csv" ) as csv_file2: csv_reader2 = csv.reader(csv_file2) for row in csv_reader2: mapping_class[row[0].split(';')[0]] = row[0].split(';')[1] natureclass[row[0].split(';')[0]] = row[0].split(';')[4] rawimages = [] groundtruth = [] img_pathes = [] base_path = r"E:\NuclearSegmentationPipeline\DataGenerator\dataset_singlecellgroundtruth" type_list = ["Ganglioneuroma", "Neuroblastoma", "normal"] abs_index = 0 target = "masks" ids_predictions = glob.glob( os.path.join(r"E:\NuclearSegmentationPipeline\Results\\", '*.pkl')) predictions = [] prediction_parameters = [] print("Loading predictions...") for i in ids_predictions: prediction_parameters.append( os.path.basename(i).split('_reconstructed')[0]) #predictions.append(pickle.load(open(i, "rb"))) # Workaround for rwf pickles with open(i, "rb") as f: u = pickle._Unpickler(f) u.encoding = "latin1" predictions.append(u.load()) for index, elem in enumerate(predictions): structuredEvaluation.addArchitecture( Architecture(name=prediction_parameters[index])) type_list_sorted = [] img_name_list = [] abs_index = 0 print("Reading images ...") for type in type_list: imgs = glob.glob(os.path.join(base_path, type, "images", "*.tif")) for index, img in enumerate(imgs): groundtruth.append( imread( img.replace('images', 'masks').replace('.tif', 'singlemask.tif'))) img_name_list.append(os.path.basename(img)) type_tmp = type structuredEvaluation.addTestImage( TestImage( position=abs_index, naturepaperclass=natureclass[os.path.basename(img).replace( '.tif', '')])) abs_index = abs_index + 1 print("Start prediction") for index in range(0, groundtruth.__len__()): print("Calculating metrics for image number " + str(index) + " from " + str(groundtruth.__len__()) + " ...") for ix, prediction in enumerate(predictions): # Calculate metrics for Evi pred_index = mapping_images[img_name_list[index]] erg = calculateSinglecellDiceJI( groundtruth[index], label(prediction['masks'][pred_index].astype(np.float32))) structuredEvaluation.addMetric( Metric(dice=erg["DICE"], ji=erg["JI"]), image=index, architecture=prediction_parameters[ix]) structuredEvaluation.printMetrics( r"E:\NuclearSegmentationPipeline\Results\test_singlecellannotation_GNB.csv", structuredEvaluation.calculateMetricsForDiagnosis( target='naturepaperclass', targetlist=[NaturepaperClasses.GNB_I])) structuredEvaluation.printMetrics( r"E:\NuclearSegmentationPipeline\Results\test_singlecellannotation__NB.csv", structuredEvaluation.calculateMetricsForDiagnosis( target='naturepaperclass', targetlist=[NaturepaperClasses.NB_IV])) structuredEvaluation.printMetrics( r"E:\NuclearSegmentationPipeline\Results\test_singlecellannotation__normal.csv", structuredEvaluation.calculateMetricsForDiagnosis( target='naturepaperclass', targetlist=[NaturepaperClasses.NC_I])) structuredEvaluation.printMetrics( r"E:\NuclearSegmentationPipeline\Results\test_singlecellannotation__alldiagnosis.csv", structuredEvaluation.calculateMetricsForDiagnosis( target='naturepaperclass', targetlist=[ NaturepaperClasses.GNB_I, NaturepaperClasses.NB_IV, NaturepaperClasses.NC_I ]))
def main(): parser = argparse.ArgumentParser(description='Train model.') parser.add_argument('--tissue', help='select tissue to train.', default=None) parser.add_argument('--outputFolder', help='select output folder', default=None) args = parser.parse_args() tisquant = TisQuantExtract() config = Config if args.tissue: config.diagnosis = [args.tissue] if args.outputFolder: config.outputFolder = args.outputFolder print(config.diagnosis) print(config.outputFolder) tools = Tools() annotated_nuclei = AnnotatedObjectSet() for fold in ['train', 'val', 'test', 'train_and_val']: path_train = os.path.join(config.outputFolder, fold) if not os.path.exists(path_train): os.makedirs(path_train) path_output = os.path.join(config.outputFolder, fold, config.diagnosis[0]) if not os.path.exists(path_output): os.makedirs(path_output) path_output_images = os.path.join(config.outputFolder, fold, config.diagnosis[0], 'images') if not os.path.exists(path_output_images): os.makedirs(path_output_images) path_output_masks = os.path.join(config.outputFolder, fold, config.diagnosis[0], 'masks') if not os.path.exists(path_output_masks): os.makedirs(path_output_masks) # Create dataset for training the pix2pix-network based on image pairs fold = 'train' running_ind = 0 ids_paths = tisquant.dbconnector.execute( query=tisquant.getLevel3AnnotatedImagesByDiagnosis_Query( diagnosis=config.diagnosis, magnification=config.magnification, staining_type=config.staining_type, staining=config.staining, segmentation_function=config.segmentation_function, annotator=config.annotator, device=config.device)) random.shuffle(ids_paths) nr_trainval = round(ids_paths.__len__() * 0.8) nr_val = round(nr_trainval * 0.8) for index, elem in enumerate(ids_paths): groundtruth_paths = tisquant.dbconnector.execute( tisquant.getLevel3AnnotationByImageIdUsingMaxExperience_Query( elem[0], config.annotator)) if (index < nr_val): folds = ['train', 'train_and_val'] elif (index < nr_trainval): folds = ['val', 'train_and_val'] else: folds = ['test'] for fold in folds: path_output_images = os.path.join(config.outputFolder, fold, config.diagnosis[0], 'images') path_output_masks = os.path.join(config.outputFolder, fold, config.diagnosis[0], 'masks') for groundtruth_path in groundtruth_paths: copyfile( tools.getLocalDataPath(elem[1], 1), os.path.join( path_output_images, config.diagnosis[0] + '_' + str(running_ind) + '.tif')) print('Copying rawimage ' + tools.getLocalDataPath(elem[1], 1) + '...') copyfile( tools.getLocalDataPath(groundtruth_path[0], 3), os.path.join( path_output_masks, config.diagnosis[0] + '_' + str(running_ind) + '.tif')) print('Copying mask ' + tools.getLocalDataPath(groundtruth_path[0], 3) + '...') running_ind = running_ind + 1
def main(): parser = argparse.ArgumentParser(description='Train model.') parser.add_argument('--tissue', help='select tissue to train.', default=None) parser.add_argument('--inputFolder', help='Select input folder.', default=None) parser.add_argument('--outputFolder', help='select output folder', default=None) parser.add_argument('--scale', help='select output folder', default=None) args = parser.parse_args() tisquant = TisQuantExtract() config = Config if args.tissue: config.diagnosis = [args.tissue] if args.outputFolder: config.outputFolder = args.outputFolder if args.scale: config.scale = int(args.scale) print("Scale: " + args.scale) print(config.diagnosis) print(config.outputFolder) tools = Tools() annotated_nuclei = AnnotatedObjectSet() #ids_paths = tisquant.dbconnector.execute(query=tisquant.getLevel3AnnotatedImagesByDiagnosis_Query(diagnosis = config.diagnosis,magnification = config.magnification, staining_type = config.staining_type, staining = config.staining, segmentation_function = config.segmentation_function, annotator = config.annotator, device = config.device)) ids_images = glob.glob( os.path.join(args.inputFolder, config.diagnosis[0], 'images', '*.tif')) ids_masks = glob.glob( os.path.join(args.inputFolder, config.diagnosis[0], 'masks', '*.tif')) # Create dataset for training the pix2pix-network based on image pairs #for index,elem in enumerate(ids_paths): for index, elem in enumerate(ids_images): #groundtruth_paths = tisquant.dbconnector.execute(tisquant.getLevel3AnnotationByImageId_Query(elem[0],config.annotator)) # Pathes from groundtruth from all annotators #groundtruth_paths = tisquant.dbconnector.execute(tisquant.getLevel3AnnotationByImageIdUsingMaxExperience_Query(elem[0], config.annotator)) # Pathes from groundtruth from most experienced annotator #for groundtruth_path in groundtruth_paths: test = AnnotatedImage() #test.readFromPath(tools.getLocalDataPath(elem[1],1),tools.getLocalDataPath(groundtruth_path[0],3)) test.readFromPath(ids_images[index], ids_masks[index]) enhanced_images = tools.enhanceImage(test, flip_left_right=True, flip_up_down=True, deform=True) for index, img in enumerate(enhanced_images): annotated_nuclei.addObjectImage( img, useBorderObjects=config.useBorderObjects) # Create the image pairs tools.createPix2pixDataset(annotated_nuclei, config) """ #img = ArtificialAnnotatedImage.transformToArtificialImage(annotated_nuclei.images[0]) img = ArtificialAnnotatedImage(width=256,height=256) for i in range(0,10): test = annotated_nuclei.returnArbitraryObject() img.addImageAtRandomPosition(test) plt.figure(1) plt.imshow(img.getRaw(),cmap='gray') plt.figure(2) plt.imshow(img.getMask()) plt.show() """ e = 1
def main(): parser = argparse.ArgumentParser(description='Train model.') parser.add_argument('--tissue', help='select tissue to train.', default=None) parser.add_argument('--inputFolder', help='Select input folder.', default=None) parser.add_argument('--outputFolder', help='select output folder', default=None) parser.add_argument('--nr_images', help='select number of images to create', default=None) parser.add_argument('--overlapProbability', help='select overlapProbability', default=None) parser.add_argument('--samplingrate', help='how fine the contour shall be sampled', default=None) args = parser.parse_args() tisquant = TisQuantExtract() config = Config if args.tissue: config.diagnosis = [args.tissue] if args.outputFolder: config.outputFolder = args.outputFolder print(config.diagnosis) tools = Tools() svg_tools = SVGTools(samplingrate=args.samplingrate) ids_paths = tisquant.dbconnector.execute( query=tisquant.getLevel3AnnotatedImagesByDiagnosis_Query( diagnosis=config.diagnosis, magnification=config.magnification, staining_type=config.staining_type, staining=config.staining, segmentation_function=config.segmentation_function, annotator=config.annotator, device=config.device)) for index, elem in enumerate(ids_paths): groundtruth_path_l3 = tisquant.dbconnector.execute( tisquant.getLevel3AnnotationByImageIdUsingMaxExperience_Query( elem[0], config.annotator))[0] groundtruth_path_l2 = tisquant.dbconnector.execute( tisquant.getLevel2AnnotationByImageIdUsingMaxExperience_Query( elem[0], config.annotator))[0] #copyfile(tools.getLocalDataPath(elem[1], 1),os.path.join(config.outputFolder, config.diagnosis[0], str(elem[0]) + '.tif')) img = AnnotatedImage() img.readFromPath(tools.getLocalDataPath(elem[1], 1), tools.getLocalDataPath(groundtruth_path_l2[0], 3)) cv2.imwrite( os.path.join(config.outputFolder, config.diagnosis[0], str(elem[0]) + '_raw.jpg'), (img.getRaw() * 255.0).astype(np.uint8)) svg_tools.openSVG(img.getRaw().shape[0], img.getRaw().shape[1]) #svg_tools.addRawImage(name='Raw image',img_path=os.path.join(config.outputFolder, config.diagnosis[0], str(elem[0]) + '_raw.jpg')) svg_tools.addRawImage(name='Raw image', img_path=(str(elem[0]) + '_raw.jpg')) svg_tools.addMaskLayer(img.getMask()[:, :, 0], 'Not annotated', '#0000FF', 0.5) svg_tools.addMaskLayer(img.getMask()[:, :, 2], 'Clumps', '#FF0000', 0.5) img.readFromPath(tools.getLocalDataPath(elem[1], 1), tools.getLocalDataPath(groundtruth_path_l3[0], 3)) svg_tools.addMaskLayer(img.getMask(), 'Single nuclei', '#00FF00', 0.5) svg_tools.closeSVG() svg_tools.writeToPath( os.path.join(config.outputFolder, config.diagnosis[0], str(elem[0]) + '_svg.svg'))
def main(): parser = argparse.ArgumentParser(description='Train model.') parser.add_argument('--scale', help='select output folder', default=None) parser.add_argument('--resultfile', help='select result file', default=None) parser.add_argument('--predictionfile', help='select result file', default=None) parser.add_argument('--net', help='describe net', default=None) parser.add_argument('--overlap', help='select output folder', default=None) parser.add_argument('--dilate', help='select output folder', default=False) args = parser.parse_args() config = Config if args.scale == '1': config.scale = True if args.resultfile: config.resultfile = args.resultfile else: print("No result file provided") exit() if args.net: config.net = args.net if args.overlap: config.overlap = int(args.overlap) path_to_img = [] tiles = [] images = [] scales = [] scales_new = [] path_to_save = [] start = 1 with open(args.resultfile) as csv_file: csv_reader = csv.reader(csv_file) for row in csv_reader: path_to_img.append(row[0]) if start == 1: path_to_save.append(row[0]) start = 0 else: if path_to_save[-1] != row[0]: path_to_save.append(row[0]) scales.append(float(row[1])) tiles.append(int(row[2])) print(config.scale) tools = Tools() print("Loading predictions ...") predictions = h5py.File(args.predictionfile, 'r')['predictions'] tile_ind = 0 for i in range(0, tiles.__len__()): if (tiles[i] == 0): if (os.path.basename(path_to_img[i]).split('.')[1] == 'tif') or (os.path.basename( path_to_img[i]).split('.')[1] == 'TIF'): img = imread(path_to_img[i]) else: img = cv2.imread(path_to_img[i]) images.append(Image.pre_process_img(img, color='gray')) scales_new.append(scales[i]) # Create and save the reconstructed images print("Reconstruct images ...") reconstructed_predictions, reconstructed_masks = tools.reconstruct_images( images=images, predictions=predictions, scales=scales_new, rescale=config.scale, overlap=config.overlap, config=config, label_output=True, dilate_objects=int(args.dilate)) for index, i in enumerate(reconstructed_masks): print(path_to_save[index].replace('.TIF', '_mask.TIF')) #imsave(path_to_save[index].replace('.TIF','_mask.TIF'),i) #imsave(path_to_save[index].replace('.tif','_mask.TIF'),i) print(path_to_save[index].replace( '.' + os.path.basename(path_to_save[index]).split('.')[1], '_mask.TIF')) imsave( path_to_save[index].replace( '.' + os.path.basename(path_to_save[index]).split('.')[1], '_mask.TIF'), i)
def main(): parser = argparse.ArgumentParser(description='Train model.') parser.add_argument('--scale', help='select output folder', default=None) parser.add_argument('--resultfile', help='select result file', default=None) parser.add_argument('--predictionfile', help='select result file', default=None) parser.add_argument('--net', help='describe net', default=None) parser.add_argument('--overlap', help='select output folder', default=None) args = parser.parse_args() tisquant = TisQuantExtract() config = Config if args.scale == '1': config.scale = True if args.resultfile: config.resultfile = args.resultfile else: print("No result file provided") exit() if args.net: config.net = args.net if args.overlap: config.overlap = int(args.overlap) path_to_img = [] tiles = [] images = [] scales = [] scales_new = [] with open(args.resultfile) as csv_file: csv_reader = csv.reader(csv_file) for row in csv_reader: path_to_img.append(row[0]) scales.append(float(row[1])) tiles.append(int(row[2])) print(config.scale) tools = Tools() predictions = h5py.File(args.predictionfile, 'r')['predictions'] tile_ind = 0 for i in range(0, tiles.__len__()): if (tiles[i] == 0): images.append( Image.pre_process_img(imread(path_to_img[i]), color='gray')) scales_new.append(scales[i]) # Create and save the reconstructed images reconstructed_predictions, reconstructed_masks = tools.reconstruct_images( images=images, predictions=predictions, scales=scales_new, rescale=config.scale, overlap=config.overlap, config=config) pickle.dump(({ "masks": reconstructed_masks, "predictions": reconstructed_predictions }), open( os.path.join( os.path.dirname(args.predictionfile), os.path.basename(args.predictionfile).replace( '.h5', '_reconstructed.pkl')), "wb"))
def main(): parser = argparse.ArgumentParser(description='Train model.') parser.add_argument('--tissue', help='select tissue to train.', default=None) parser.add_argument('--inputFolder', help='Select input folder.', default=None) parser.add_argument('--outputFolder', help='select output folder', default=None) parser.add_argument('--nr_images', help='select number of images to create', default=None) parser.add_argument('--overlapProbability', help='select overlapProbability', default=None) parser.add_argument('--scale', help='select output folder', default=None) parser.add_argument('--img_prefix', help='select output folder', default='Img_') parser.add_argument('--mask_prefix', help='select output folder', default='Mask_') #random.seed(13431) args = parser.parse_args() config = Config if args.tissue: config.diagnosis = [args.tissue] if args.outputFolder: config.outputFolder = args.outputFolder if args.overlapProbability: args.overlapProbability = float(args.overlapProbability) else: args.overlapProbability = 0.5 if args.tissue == 'Ganglioneuroma': n_freq = 20#15 else: n_freq = 30 if args.scale == '1': config.scale=True print(config.diagnosis) tools = Tools() annotated_nuclei =[] annotated_images = [] ids_images = glob.glob(os.path.join(args.inputFolder,config.diagnosis[0],'images','*.tif')) ids_masks = glob.glob(os.path.join(args.inputFolder, config.diagnosis[0], 'masks', '*.tif')) for index, elem in enumerate(ids_images): test = AnnotatedImage() test.readFromPath(ids_images[index], ids_masks[index],type='uint16') annotated_images.append(test) # Create artificial new dataset scales = tools.getNormalizedScales(annotated_images) running = 0 for index,img in enumerate(annotated_images): test = AnnotatedImage() annotated_nuclei.append(AnnotatedObjectSet()) if config.scale: test.createWithArguments(tools.rescale_image(img.getRaw(),(scales[index],scales[index])),tools.rescale_mask(img.getMask(),(scales[index],scales[index]), make_labels=True)) else: test.createWithArguments(img.getRaw(),img.getMask()) annotated_nuclei[running].addObjectImage(test, useBorderObjects=config.useBorderObjects, tissue=args.tissue, scale=Config.scale) running += 1 del test if config.scale == 0: if args.tissue == 'Ganglioneuroma': possible_numbers = [9, 16, 25, 36, 49] else: possible_numbers = [4, 4, 9] else: possible_numbers = [9,16,25,36,49] # How many images? if not args.nr_images: args.nr_images=10 else: args.nr_images=int(args.nr_images) for t in tqdm(range(0,args.nr_images)): nr_img = random.randint(0,annotated_nuclei.__len__()-1) # Create artificial image number_nuclei = random.randint(0, possible_numbers.__len__()-1) # calculate Background tmp_image = annotated_nuclei[nr_img].images[0].getRaw() tmp_mask = annotated_nuclei[nr_img].images[0].getMask() kernel = np.ones((15, 15), np.uint8) bg = cv2.erode((tmp_mask == 0).astype(np.uint8), kernel, iterations=1) bg = np.sort(tmp_image[np.where(bg>0)]) img = ArtificialAnnotatedImage(width=256,height=256,number_nuclei=possible_numbers[number_nuclei],probabilityOverlap=args.overlapProbability,background=bg) total_added = 0 for i in range(0,possible_numbers[number_nuclei]): test = annotated_nuclei[nr_img].returnArbitraryObject() if (randint(0,1)): test = tools.arbitraryEnhance(test) total_added += img.addImageAtGridPosition(test) if (total_added > 0): shape_y = img.getRaw().shape[0] shape_x = img.getRaw().shape[1] img_new = np.zeros((shape_y,shape_x*2,3),dtype=np.float32) img_new[:,0:shape_x,0] = img_new[:,0:shape_x,1] = img_new[:,0:shape_x,2] = img_new[:,shape_x:2*shape_x,0] = img_new[:,shape_x:2*shape_x,1] = img_new[:,shape_x:2*shape_x,2] = img.getRaw() scipy.misc.toimage(img_new, cmin=0.0, cmax=1.0).save(config.outputFolder + config.diagnosis[0] + '\\images\\' + args.img_prefix + str(t) + '.jpg') tifffile.imsave(config.outputFolder + config.diagnosis[0] + '\\masks\\' + args.mask_prefix + str(t) + '.tif',img.getMask(),dtype=np.uint8) e=1
def main(): tools = Tools() structuredEvaluation = StructuredEvaluation() parser = argparse.ArgumentParser(description='Train model.') parser.add_argument( '--resultfile', help='select result file', default=r"E:\NuclearSegmentationPipeline\Results\results_scaled.csv") args = parser.parse_args() vals = np.linspace(0.1, 0.9, 256) np.random.shuffle(vals) vals[0] = 1 cmap = plt.cm.colors.ListedColormap(plt.cm.cubehelix(vals)) # read csv file and images raw_images = [] groundtruth = [] basefolder = r"D:\DeepLearning\Results_revision\Dataset_revision" reader = csv.reader( open( r"E:\NuclearSegmentationPipeline\DataGenerator\image_description_final_revision.csv", 'r')) next(reader) abs_index = 0 mapping = dict() try: for row in reader: entrys = row[0].split(';') #if entrys[3] == 'test': mapping[entrys[0]] = row[0] except: print('Unable to open csv file') # Update evaluation according to image position type_list = [ "Ganglioneuroblastoma", "Ganglioneuroblastoma_differentconditions", "Neuroblastoma_bmcytospin", "Neuroblastoma_cellline_differentconditions", "Neuroblastoma_cellline_LSM", "Neuroblastoma_touchimprint", "normal_cyto", "normal_differentconditions", "normal_grown", "otherspecimen_tissuesections" ] base_path = r"D:\DeepLearning\DataGenerator\tisquant_train_val_test_gold_revision\test" abs_index = 0 path_to_img = [] tiles = [] scales = [] count = 0 # Read images from tiling file with open(args.resultfile) as csv_file: csv_reader = csv.reader(csv_file) for row in csv_reader: if int(row[2]) == 0 and count < 37: raw_images.append(tifffile.imread(row[0])) groundtruth.append( tifffile.imread(row[0].replace('images', 'masks'))) scales.append(row[1]) entrys = mapping[os.path.basename( row[0]).split('.')[0]].split(';') structuredEvaluation.addTestImage( TMIImage(position=abs_index, diagnosis=entrys[1], preparation=entrys[2], magnification=entrys[5], modality=entrys[7], signal_to_noise=entrys[12], naturepaperclass=entrys[4], challengelevel=entrys[14])) abs_index = abs_index + 1 min_objects = [] min_label = [] for img_nr in range(0, abs_index): minsize, labeli = getminObjectSize(groundtruth[img_nr]) min_objects.append(minsize) min_label.append(labeli) target = "masks" #ids_predictions = glob.glob(os.path.join(r"D:\DeepLearning\Results_revision\Results_gold", '*_reconstructed.pkl')) ids_predictions = glob.glob( os.path.join(r"E:\NuclearSegmentationPipeline\Results\\", '*.pkl')) predictions = [] prediction_parameters = [] for i in ids_predictions: prediction_parameters.append( os.path.basename(i).split('_reconstructed')[0]) #predictions.append(pickle.load(open(i, "rb"))) # Workaround for rwf pickles with open(i, "rb") as f: u = pickle._Unpickler(f) u.encoding = "latin1" predictions.append(u.load()) for index, elem in enumerate(predictions): for j in range(0, min_objects.__len__()): predictions[index]["masks"][j] = tools.postprocess_mask( label(predictions[index]["masks"][j]), threshold=min_objects[j]) structuredEvaluation.addArchitecture( Architecture(name=prediction_parameters[index])) for img_nr in range(0, abs_index): print("Calcualting metrics for image " + str(img_nr) + "/" + str(abs_index)) cnt = 0 for index, elem in enumerate(predictions): erg = objectBasedMeasures4(groundtruth[img_nr] * 255, predictions[index][target][img_nr]) [AJI_C, AJI_U] = calculateAggregatedJaccardIndex( groundtruth[img_nr] * 255, predictions[index][target][img_nr]) results = getMetrics(erg["masks"]) structuredEvaluation.addMetric( Metric(FP=results["FP"], TP=results["TP"], FN=results["FN"], dice=erg["DICE"], ji=erg["JI"], AJI_C=AJI_C, AJI_U=AJI_U, US=results["US"], OS=results["OS"]), image=img_nr, architecture=prediction_parameters[index]) structuredEvaluation.printMetrics( r"E:\NuclearSegmentationPipeline\Results\test_GNB.csv", structuredEvaluation.calculateMetricsForDiagnosis( target='naturepaperclass', targetlist=[NaturepaperClasses.GNB_I])) structuredEvaluation.printMetrics( r"E:\NuclearSegmentationPipeline\Results\test_NB.csv", structuredEvaluation.calculateMetricsForDiagnosis( target='naturepaperclass', targetlist=[NaturepaperClasses.NB_I, NaturepaperClasses.NB_IV])) structuredEvaluation.printMetrics( r"E:\NuclearSegmentationPipeline\Results\test_normal.csv", structuredEvaluation.calculateMetricsForDiagnosis( target='naturepaperclass', targetlist=[NaturepaperClasses.NC_I, NaturepaperClasses.NC_III])) e = 1