def run(self): ##### Create identifiers myconfig = self.config ##### Set network specifics # check if patch size isn't too large. If so make it smaller # patchsize = myconfig["Patch size"] patchsize = set_patch_size(myconfig) ##### Load model from source step loc_model = myconfig["Model path"] dataset = SingleInstanceDataset( myconfig['Nifti paths'], brainmask_path=myconfig['Brainmask path'], transform=ToTensor()) dataloader = DataLoader(dataset, batch_size=1, num_workers=0, shuffle=False) logDataLoader(dataloader, self.tmpdir) evaluator = StandardEvaluator.loadFromCheckpoint(loc_model) sample_batched = next(iter(dataloader)) images = sample_batched["data"] header_sources = sample_batched["header_source"] resultpaths = [os.path.join(self.tmpdir, 'segmentation.nii.gz')] evaluator.segmentNifti(images, header_sources, patchsize, resultpaths) self.logger.info('Nifti image segmented for.') self.tearDown()
def run(self): ##### Create identifiers myconfig = self.config ##### Set network specifics patchsize = myconfig["Patch size"] batchsize = myconfig["Batch size"] output_type = myconfig["Output type"] only_first = myconfig["Only first"] #### Data specifics splits = myconfig["Splits"] ##### load datamanager loc = myconfig["Nifti Source"] if self.config['Dataset'] == 'Brats18': dataset = Brats18.fromFile(loc) elif self.config['Dataset'] == 'BTD': dataset = BTD.fromFile(loc) elif self.config['Dataset'] == 'Generic': dataset = GenericData.fromFile(loc) if "Splits from File" in myconfig: dataset.loadSplits(myconfig["Splits from File"]) ##### Load model from source step sourcestep = myconfig["Model Source"] loc_model = os.path.join(self.datadir, sourcestep) config_model = self.loadConfig(os.path.join(loc_model, 'config.json')) sequences = config_model["Sequences"] transform = ToTensor() testset = dataset.getDataset(splits, sequences, transform=transform) dataloader = DataLoader(testset, batch_size=batchsize, num_workers=0, shuffle=True) evaluator = StandardEvaluator.loadFromCheckpoint(os.path.join(loc_model, 'model.pt')) self.logger.info('Dataloader has {n} images.'.format(n=len(testset))) for i_batch, sample_batched in enumerate(dataloader): images = sample_batched["data"] subjects = sample_batched["subject"] header_sources = sample_batched["header_source"] t1_sources = sample_batched["t1_source"] resultpaths = [os.path.join(self.tmpdir, s + '_segmented.nii.gz') for s in subjects] evaluator.segmentNifti(images, header_sources, patchsize, resultpaths) for i in range(0, len(subjects)): plotResultImageWithoutGT(t1_sources[i], resultpaths[i], self.tmpdir, subjects[i], output_type=output_type) self.logger.info('Nifti image segmented for {}.'.format(subjects[i])) if only_first: break self.tearDown()
def getDataloader(self): n_workers = self.config['Num Workers'] batchsize = self.config['Batch size'] sequences = self.config["Sequences"] loc = os.path.join(self.datadir, self.config["Nifti Source"]) # locate, organize and split the dataset dataset = Brats18.fromFile(loc) #Data specifics splits = self.config['Splits'] testsplits = self.config['Testsplits'] dataset.saveSplits(self.tmpdir) targetsize = tuple(self.config["Patch size"]) imgsize = targetsize # initialize input transforms transforms = [RandomCrop(output_size=imgsize), ToTensor()] transform = Compose(transforms) # prepare the training set loader trainset = dataset.getDataset(splits, sequences, transform=transform) trainset.saveListOfPatients(os.path.join(self.tmpdir, 'trainset.json')) self.logger.info('Generating patches with input size ' + str(imgsize) + ' and outputsize ' + str(targetsize)) trainloader = DataLoader(trainset, batch_size=batchsize, num_workers=n_workers, shuffle=True) # prepare the testing set loader if len(testsplits) > 0: testset = dataset.getDataset(testsplits, sequences, transform=transform) testloader = DataLoader(testset, batch_size=batchsize, num_workers=n_workers, shuffle=True) else: testloader = None # plot and save samples of a mini-batch logDataLoader(trainloader, self.tmpdir) return trainloader, testloader
def evaluate(self): # Create identifiers myconfig = self.config # Set network specifics patchsize = myconfig["Patch size"] batchsize = myconfig["Batch size"] output_type = myconfig["Output type"] only_first = myconfig["Only first"] sequences = myconfig["Sequences"] # Data specifics splits = myconfig["Testsplits"] # load data-manager loc = os.path.join(self.datadir, self.config["Nifti Source"]) dataset = Brats18.fromFile(loc) transforms = [ToTensor()] transform = Compose(transforms) testset = dataset.getDataset(splits, sequences, transform=transform) dataloader = DataLoader(testset, batch_size=batchsize, num_workers=0, shuffle=True) self.logger.info('Dataloader has {n} images.'.format(n=len(testset))) # Load model from source step sourcestep = self.name loc_model = os.path.join(self.datadir, sourcestep) evaluator = StandardEvaluator.loadFromCheckpoint( os.path.join(loc_model, 'model.pt')) all_dice = [] all_dice_core = [] all_dice_enhancing = [] results = pd.DataFrame(columns=[ 'sample', 'subject', 'class', 'TP', 'FP', 'FN', 'TN', 'dice' ]) for i_batch, sample_batched in enumerate(dataloader): images = sample_batched['data'] segfiles = sample_batched['seg_file'] subjects = sample_batched['subject'] segs = sample_batched['seg'] resultpaths = [ os.path.join(self.tmpdir, s + '_segmented.nii.gz') for s in subjects ] uncertaintypaths = [ os.path.join(self.tmpdir, s + '_epistimci.nii.gz') for s in subjects ] classifications, epistemicUncertainty = evaluator.segmentNifti( images, segfiles, patchsize, resultpaths, uncertaintypaths) for i in range(0, len(subjects)): seg = segs[i].numpy() plotResultImage(dataset, resultpaths[i], uncertaintypaths[i], self.tmpdir, subjects[i], output_type=output_type) for c in range(0, 5): truth = seg == c positive = classifications[i] == c (dice, TT, FP, FN, TN) = getPerformanceMeasures(positive, truth) results = results.append( { 'sample': i_batch, 'class': c, 'subject': subjects[i], 'TP': TT, 'FP': FP, 'FN': FN, 'TN': TN, 'dice': dice }, ignore_index=True) if c == 4: all_dice_enhancing.append(dice) class_whole = classifications[i] > 0 result_core = (classifications[i] == 1) | (classifications[i] == 4) truth_whole = seg > 0 truth_core = (seg == 1) | (seg == 4) (dice, TT, FP, FN, TN) = getPerformanceMeasures(class_whole, truth_whole) (dice_core, TT_core, FP_core, FN_core, TN_core) = getPerformanceMeasures(result_core, truth_core) all_dice.append(dice) all_dice_core.append(dice_core) self.logger.info('Nifti image segmented for ' + subjects[i] + '. Dice: ' + str(dice)) results = results.append( { 'sample': i_batch, 'class': 'whole', 'subject': subjects[i], 'TP': TT, 'FP': FP, 'FN': FN, 'TN': TN, 'dice': dice }, ignore_index=True) results = results.append( { 'sample': i_batch, 'class': 'core', 'subject': subjects[i], 'TP': TT_core, 'FP': FP_core, 'FN': FN_core, 'TN': TN_core, 'dice': dice_core }, ignore_index=True) if only_first: break dice_mean = sum(all_dice) / len(all_dice) dice_core = sum(all_dice_core) / len(all_dice_core) dice_enhancing = sum(all_dice_enhancing) / len(all_dice_enhancing) plt.boxplot(all_dice) plt.savefig(os.path.join(self.tmpdir, 'boxplot_dice.png')) plt.close() results.to_csv(os.path.join(self.tmpdir, 'results_eval.csv')) dataset.saveSplits(self.tmpdir) self.logger.info( 'evaluation finished. Dice coefficient: whole: {}, core: {}, enhancing: {}' .format(dice_mean, dice_core, dice_enhancing))
def getDataloader(self): batchsize = self.config['Batch size'] loc = os.path.join(self.datadir, self.config["Nifti Source"]) sequences = self.config[ "Sequences"] if "Sequences" in self.config else None if self.config['Dataset'] == 'Brats18': dataset = Brats18.fromFile(loc) elif self.config['Dataset'] == 'BTD': if self.config['Brainmask'] == False: dataset = BTD.fromFile(loc, brainmask=False) else: dataset = BTD.fromFile(loc, brainmask=True) elif self.config['Dataset'] == 'Hippocampus': dataset = Hippocampus.fromFile(loc) if "Splits from File" in self.config: dataset.setSplits(self.config["Splits from File"]) elif "Crossval Splits" in self.config: dataset.createCVSplits(self.config["Crossval Splits"]) #### Data specifics splits = self.config['Splits'] testsplits = self.config['Testsplits'] dataset.saveSplits(self.tmpdir) # check if patch size isn't too large. If so make it smaller # targetsize = tuple(self.config["Patch size"]) targetsize = tuple(set_patch_size(self.config)) imgsize = targetsize transforms = [RandomCrop(output_size=imgsize), ToTensor()] if 'Whole Tumor' in self.config and self.config["Whole Tumor"]: transforms = [BinarySegmentation()] + transforms transform = Compose(transforms) if 'Target' in self.config and self.config['Target'] == 'Brainmask': trainset = dataset.getBrainmaskDataset(splits, sequences, transform=transform) else: trainset = dataset.getDataset(splits, sequences, transform=transform) trainset.saveListOfPatients(os.path.join(self.tmpdir, 'trainset.json')) self.logger.info('Generating patches with input size ' + str(imgsize) + ' and outputsize ' + str(targetsize)) n_workers = self.config['Num Workers'] trainloader = DataLoader(trainset, batch_size=batchsize, num_workers=n_workers, shuffle=True) if len(testsplits) > 0: if 'Target' in self.config and self.config['Target'] == 'Brainmask': testset = dataset.getBrainmaskDataset(testsplits, sequences, transform=transform) else: testset = dataset.getDataset(testsplits, sequences, transform=transform) testloader = DataLoader(testset, batch_size=batchsize, num_workers=n_workers, shuffle=True) else: testloader = None logDataLoader(trainloader, self.tmpdir) return trainloader, testloader
def getDataloader(self): batchsize = self.config['Batch size'] loc = os.path.join(self.datadir, self.config["Nifti Source"]) sequences = self.config[ "Sequences"] if "Sequences" in self.config else None # Loading the specialized config settings from file print("normalization technique is:", self.config["technique"]) print("using otsu threshold for normalization:", self.config["using_otsu_ROI"]) print("resampling factor is:", self.config["resampling_factor"]) if self.config['Dataset'] == 'Brats18': dataset = Brats18.fromFile(loc) elif self.config['Dataset'] == 'BTD': print(self.config['Brainmask']) if self.config['Brainmask'] == False: dataset = BTD.fromFile(loc, brainmask=False) else: dataset = BTD.fromFile(loc, brainmask=True) elif self.config['Dataset'] == 'Hippocampus': dataset = Hippocampus.fromFile(loc) elif self.config['Dataset'] == 'LipoData': dataset = LipoData.fromFile(loc) elif self.config['Dataset'] == 'LitsData': dataset = LitsData.fromFile(loc) elif self.config['Dataset'] == 'ErgoData': dataset = ErgoData.fromFile(loc) if "Splits from File" in self.config: dataset.setSplits(self.config["Splits from File"]) elif "Crossval Splits" in self.config: dataset.createCVSplits(self.config["Crossval Splits"]) #### Data specifics splits = self.config['Splits'] testsplits = self.config['Testsplits'] dataset.saveSplits(self.tmpdir) # check if patch size isn't too large. If so make it smaller # targetsize = tuple(self.config["Patch size"]) targetsize = tuple(set_patch_size(self.config)) imgsize = targetsize transforms = [RandomCrop(output_size=imgsize), ToTensor()] if 'Whole Tumor' in self.config and self.config["Whole Tumor"]: transforms = [BinarySegmentation()] + transforms transform = Compose(transforms) if 'Target' in self.config and self.config['Target'] == 'Brainmask': trainset = dataset.getBrainmaskDataset(splits, sequences, transform=transform) else: trainset = dataset.getDataset(splits, sequences, transform=transform, preprocess_config=self.config) trainset.saveListOfPatients(os.path.join(self.tmpdir, 'trainset.json')) self.logger.info('Generating patches with input size ' + str(imgsize) + ' and outputsize ' + str(targetsize)) n_workers = self.config['Num Workers'] now = datetime.now() print('train loader is initializing', now) trainloader = DataLoader(trainset, batch_size=batchsize, num_workers=n_workers, shuffle=True) later = datetime.now() print('train loader is done initializing', later - now) if len(testsplits) > 0: if 'Target' in self.config and self.config['Target'] == 'Brainmask': testset = dataset.getBrainmaskDataset(testsplits, sequences, transform=transform) else: testset = dataset.getDataset(testsplits, sequences, transform=transform, preprocess_config=self.config) testloader = DataLoader(testset, batch_size=batchsize, num_workers=n_workers, shuffle=True) else: testloader = None logDataLoader(trainloader, self.tmpdir) return trainloader, testloader
def run(self): ##### Create identifiers myconfig = self.config ##### Set network specifics patchsize = myconfig["Patch size"] batchsize = myconfig["Batch size"] output_type = myconfig["Output type"] only_first = myconfig["Only first"] #### Data specifics splits = myconfig["Splits"] ##### load datamanager loc = os.path.join(self.datadir, self.config["Nifti Source"]) if self.config['Dataset'] == 'Brats18': dataset = Brats18.fromFile(loc) elif self.config['Dataset'] == 'BTD': dataset = BTD.fromFile(loc) elif self.config['Dataset'] == 'Hippocampus': dataset = Hippocampus.fromFile(loc) if "Splits from File" in myconfig: dataset.loadSplits(myconfig["Splits from File"]) ##### Load model from source step sourcestep = myconfig["Model Source"] loc_model = os.path.join(self.datadir, sourcestep) config_model = self.loadConfig(os.path.join(loc_model, 'config.json')) if "Sequences" in self.config: sequences = self.config["Sequences"] else: sequences = config_model["Sequences"] transforms = [ToTensor()] if 'Whole Tumor' in self.config and self.config["Whole Tumor"]: transforms = [BinarySegmentation()] + transforms transform = Compose(transforms) if 'Target' in self.config and self.config['Target'] == 'Brainmask': testset = dataset.getBrainmaskDataset(splits, sequences, transform=transform) else: testset = dataset.getDataset(splits, sequences, transform=transform) dataloader = DataLoader(testset, batch_size=batchsize, num_workers=0, shuffle=True) evaluator = StandardEvaluator.loadFromCheckpoint(os.path.join(loc_model, 'model.pt')) self.logger.info('Dataloader has {n} images.'.format(n=len(testset))) all_dice = [] all_dice_core = [] all_dice_enhancing = [] results = pd.DataFrame(columns=['sample', 'subject', 'class', 'TP', 'FP', 'FN', 'TN', 'dice']) for i_batch, sample_batched in enumerate(dataloader): ######### TODO: work with original dataset images = sample_batched['data'] segfiles = sample_batched['seg_file'] subjects = sample_batched['subject'] segs = sample_batched['seg'] resultpaths = [os.path.join(self.tmpdir, s+'_segmented.nii.gz') for s in subjects] uncertaintypaths = [os.path.join(self.tmpdir, s + '_epistimci.nii.gz') for s in subjects] classifications, epistemicUncertainty = evaluator.segmentNifti(images, segfiles, patchsize, resultpaths, uncertaintypaths) import pickle segmentations = segs.numpy() picklepath = myconfig["pickleVar"] with open(picklepath, 'wb') as f: pickle.dump([segmentations, classifications, epistemicUncertainty, segfiles, subjects], f) for i in range(0, len(subjects)): seg = segs[i].numpy() plotResultImage(dataset, resultpaths[i], uncertaintypaths[i], self.tmpdir, subjects[i], output_type=output_type) for c in range(0,5): truth = seg == c positive = classifications[i] == c (dice, TT, FP, FN, TN) = getPerformanceMeasures(positive, truth) results = results.append({'sample':i_batch, 'class':c, 'subject':subjects[i], 'TP': TT, 'FP': FP, 'FN': FN, 'TN': TN, 'dice': dice}, ignore_index=True) if c == 4: all_dice_enhancing.append(dice) class_whole = classifications[i] > 0 result_core = (classifications[i] == 1) | (classifications[i] == 4) truth_whole = seg > 0 truth_core = (seg == 1) | (seg == 4) (dice, TT, FP, FN, TN) = getPerformanceMeasures(class_whole, truth_whole) (dice_core, TT_core, FP_core, FN_core, TN_core) = getPerformanceMeasures(result_core, truth_core) all_dice.append(dice) all_dice_core.append(dice_core) self.logger.info('Nifti image segmented for ' + subjects[i] + '. Dice: ' + str(dice)) results = results.append({'sample':i_batch, 'class':'whole', 'subject':subjects[i], 'TP': TT, 'FP': FP, 'FN': FN,'TN': TN, 'dice': dice}, ignore_index=True) results = results.append( {'sample': i_batch, 'class': 'core', 'subject': subjects[i], 'TP': TT_core, 'FP': FP_core, 'FN': FN_core, 'TN': TN_core, 'dice': dice_core}, ignore_index=True) if only_first: break dice_mean = sum(all_dice)/len(all_dice) dice_core = sum(all_dice_core)/len(all_dice_core) dice_enhancing = sum(all_dice_enhancing) / len(all_dice_enhancing) plt.boxplot(all_dice) plt.savefig(os.path.join(self.tmpdir, 'boxplot_dice.png')) plt.close() results.to_csv(os.path.join(self.tmpdir, 'results_eval.csv')) dataset.saveSplits(self.tmpdir) self.logger.info('evaluation finished. Dice coefficient: whole: {}, core: {}, enhancing: {}'.format(dice_mean, dice_core, dice_enhancing)) self.tearDown()