def segment(self): self.copy_data_to_tmp() # go to tmp directory os.chdir(self.tmp_dir) # load model self.model.load_model() self.target_im, self.info_preprocessing = pre_processing(self.param_seg.fname_im, self.param_seg.fname_seg, self.param_seg.fname_level, new_res=self.param_data.axial_res, square_size_size_mm=self.param_data.square_size_size_mm, denoising=self.param_data.denoising, verbose=self.param.verbose, rm_tmp=self.param.rm_tmp) printv('\nRegister target image to model data...', self.param.verbose, 'normal') # register target image to model dictionary space path_warp = self.register_target() if self.param_data.normalization: printv('\nNormalize intensity of target image...', self.param.verbose, 'normal') self.normalize_target() printv('\nProject target image into the model reduced space...', self.param.verbose, 'normal') self.project_target() printv('\nCompute similarities between target slices and model slices using model reduced space...', self.param.verbose, 'normal') list_dic_indexes_by_slice = self.compute_similarities() printv('\nLabel fusion of model slices most similar to target slices...', self.param.verbose, 'normal') self.label_fusion(list_dic_indexes_by_slice) printv('\nWarp back segmentation into image space...', self.param.verbose, 'normal') self.warp_back_seg(path_warp) printv('\nPost-processing...', self.param.verbose, 'normal') self.im_res_gmseg, self.im_res_wmseg = self.post_processing() if (self.param_seg.path_results != './') and (not os.path.exists('../' + self.param_seg.path_results)): # create output folder printv('\nCreate output folder ...', self.param.verbose, 'normal') os.chdir('..') os.mkdir(self.param_seg.path_results) os.chdir(self.tmp_dir) if self.param_seg.fname_manual_gmseg is not None: # compute validation metrics printv('\nCompute validation metrics...', self.param.verbose, 'normal') self.validation() if self.param_seg.ratio is not '0': printv('\nCompute GM/WM CSA ratio...', self.param.verbose, 'normal') self.compute_ratio() # go back to original directory os.chdir('..') printv('\nSave resulting GM and WM segmentations...', self.param.verbose, 'normal') self.fname_res_gmseg = self.param_seg.path_results + add_suffix(''.join(extract_fname(self.param_seg.fname_im)[1:]), '_gmseg') self.fname_res_wmseg = self.param_seg.path_results + add_suffix(''.join(extract_fname(self.param_seg.fname_im)[1:]), '_wmseg') self.im_res_gmseg.setFileName(self.fname_res_gmseg) self.im_res_wmseg.setFileName(self.fname_res_wmseg) self.im_res_gmseg.save() self.im_res_wmseg.save()
def segment(self): self.copy_data_to_tmp() # go to tmp directory curdir = os.getcwd() os.chdir(self.tmp_dir) # load model self.model.load_model() self.target_im, self.info_preprocessing = pre_processing(self.param_seg.fname_im, self.param_seg.fname_seg, self.param_seg.fname_level, new_res=self.param_data.axial_res, square_size_size_mm=self.param_data.square_size_size_mm, denoising=self.param_data.denoising, verbose=self.param.verbose, rm_tmp=self.param.rm_tmp) printv('\nRegister target image to model data...', self.param.verbose, 'normal') # register target image to model dictionary space path_warp = self.register_target() if self.param_data.normalization: printv('\nNormalize intensity of target image...', self.param.verbose, 'normal') self.normalize_target() printv('\nProject target image into the model reduced space...', self.param.verbose, 'normal') self.project_target() printv('\nCompute similarities between target slices and model slices using model reduced space...', self.param.verbose, 'normal') list_dic_indexes_by_slice = self.compute_similarities() printv('\nLabel fusion of model slices most similar to target slices...', self.param.verbose, 'normal') self.label_fusion(list_dic_indexes_by_slice) printv('\nWarp back segmentation into image space...', self.param.verbose, 'normal') self.warp_back_seg(path_warp) printv('\nPost-processing...', self.param.verbose, 'normal') self.im_res_gmseg, self.im_res_wmseg = self.post_processing() if (self.param_seg.path_results != './') and (not os.path.exists(os.path.join(curdir, self.param_seg.path_results))): # create output folder printv('\nCreate output folder ...', self.param.verbose, 'normal') os.chdir(curdir) os.mkdir(self.param_seg.path_results) os.chdir(self.tmp_dir) if self.param_seg.fname_manual_gmseg is not None: # compute validation metrics printv('\nCompute validation metrics...', self.param.verbose, 'normal') self.validation() # go back to original directory os.chdir(curdir) printv('\nSave resulting GM and WM segmentations...', self.param.verbose, 'normal') self.fname_res_gmseg = os.path.join(self.param_seg.path_results, add_suffix(''.join(extract_fname(self.param_seg.fname_im)[1:]), '_gmseg')) self.fname_res_wmseg = os.path.join(self.param_seg.path_results, add_suffix(''.join(extract_fname(self.param_seg.fname_im)[1:]), '_wmseg')) self.im_res_gmseg.absolutepath = self.fname_res_gmseg self.im_res_wmseg.absolutepath = self.fname_res_wmseg self.im_res_gmseg.save() self.im_res_wmseg.save()
def load_model_data(self): ''' Data should be organized with one folder per subject containing: - A WM/GM contrasted image containing 'im' in its name - a segmentation of the SC containing 'seg' in its name - a/several manual segmentation(s) of GM containing 'gm' in its/their name(s) - a file containing vertebral level information as a nifti image or as a text file containing 'level' in its name ''' path_data = slash_at_the_end(self.param_model.path_data, slash=1) list_sub = [sub for sub in os.listdir(path_data) if os.path.isdir(os.path.join(path_data, sub))] if self.param_model.ind_rm is not None and self.param_model.ind_rm < len(list_sub): list_sub.pop(self.param_model.ind_rm) # total number of slices: J j = 0 for sub in list_sub: # load images of each subject fname_data = None fname_sc_seg = None list_fname_gmseg = [] fname_level = None for file_name in os.listdir(path_data+sub): fname = path_data+sub+'/'+file_name if os.path.isfile(fname): if 'gm' in file_name: list_fname_gmseg.append(fname) elif 'seg' in file_name: fname_sc_seg = fname elif 'im' in file_name: fname_data = fname if 'level' in file_name: fname_level = fname info_data = 'Loaded files: \n' info_data += 'Image: ....... '+str(fname_data)+'\n' info_data += 'SC seg: ...... ' + str(fname_sc_seg)+ '\n' info_data += 'GM seg: ...... ' + str(list_fname_gmseg) + '\n' info_data += 'Levels: ...... ' + str(fname_level) + '\n' if fname_data == None or fname_sc_seg == None or list_fname_gmseg == []: printv(info_data, self.param.verbose, 'error') else: printv(info_data, self.param.verbose, 'normal') # preprocess data list_slices_sub, info = pre_processing(fname_data, fname_sc_seg, fname_level=fname_level, fname_manual_gmseg=list_fname_gmseg, new_res=self.param_data.axial_res, square_size_size_mm=self.param_data.square_size_size_mm, denoising=self.param_data.denoising, for_model=True) for i_slice, slice_sub in enumerate(list_slices_sub): slice_sub.set(slice_id=i_slice+j) self.slices.append(slice_sub) j += len(list_slices_sub)
def load_model_data(self): ''' Data should be organized with one folder per subject containing: - A WM/GM contrasted image containing 'im' in its name - a segmentation of the SC containing 'seg' in its name - a/several manual segmentation(s) of GM containing 'gm' in its/their name(s) - a file containing vertebral level information as a nifti image or as a text file containing 'level' in its name ''' path_data = self.param_model.path_data list_sub = [sub for sub in os.listdir(path_data) if os.path.isdir(os.path.join(path_data, sub))] if self.param_model.ind_rm is not None and self.param_model.ind_rm < len(list_sub): list_sub.pop(self.param_model.ind_rm) # total number of slices: J j = 0 for sub in list_sub: # load images of each subject fname_data = None fname_sc_seg = None list_fname_gmseg = [] fname_level = None for file_name in os.listdir(os.path.join(path_data, sub)): fname = os.path.join(path_data, sub, file_name) if os.path.isfile(fname): if 'gm' in file_name: list_fname_gmseg.append(fname) elif 'seg' in file_name: fname_sc_seg = fname elif 'im' in file_name: fname_data = fname if 'level' in file_name: fname_level = fname info_data = 'Loaded files: \n' info_data += 'Image: ....... ' + str(fname_data) + '\n' info_data += 'SC seg: ...... ' + str(fname_sc_seg) + '\n' info_data += 'GM seg: ...... ' + str(list_fname_gmseg) + '\n' info_data += 'Levels: ...... ' + str(fname_level) + '\n' if fname_data == None or fname_sc_seg == None or list_fname_gmseg == []: printv(info_data, self.param.verbose, 'error') else: printv(info_data, self.param.verbose, 'normal') # preprocess data list_slices_sub, info = pre_processing(fname_data, fname_sc_seg, fname_level=fname_level, fname_manual_gmseg=list_fname_gmseg, new_res=self.param_data.axial_res, square_size_size_mm=self.param_data.square_size_size_mm, denoising=self.param_data.denoising, for_model=True) for i_slice, slice_sub in enumerate(list_slices_sub): slice_sub.set(slice_id=i_slice + j) self.slices.append(slice_sub) j += len(list_slices_sub)
def segment(self): self.copy_data_to_tmp() # go to tmp directory os.chdir(self.tmp_dir) # load model self.model.load_model() self.target_im, self.info_preprocessing = pre_processing(self.param_seg.fname_im, self.param_seg.fname_seg, self.param_seg.fname_level, new_res=self.param_data.axial_res, square_size_size_mm=self.param_data.square_size_size_mm, denoising=self.param_data.denoising, verbose=self.param.verbose, rm_tmp=self.param.rm_tmp) printv('\nRegister target image to model data...', self.param.verbose, 'normal') # register target image to model dictionary space path_warp = self.register_target() printv('\nNormalize intensity of target image...', self.param.verbose, 'normal') self.normalize_target() printv('\nProject target image into the model reduced space...', self.param.verbose, 'normal') self.project_target() printv('\nCompute similarities between target slices and model slices using model reduced space...', self.param.verbose, 'normal') list_dic_indexes_by_slice = self.compute_similarities() printv('\nLabel fusion of model slices most similar to target slices...', self.param.verbose, 'normal') self.label_fusion(list_dic_indexes_by_slice) printv('\nWarp back segmentation into image space...', self.param.verbose, 'normal') self.warp_back_seg(path_warp) printv('\nPost-processing...', self.param.verbose, 'normal') self.im_res_gmseg, self.im_res_wmseg = self.post_processing() if (self.param_seg.path_results != './') and (not os.path.exists('../'+self.param_seg.path_results)): # create output folder printv('\nCreate output folder ...', self.param.verbose, 'normal') os.chdir('..') os.mkdir(self.param_seg.path_results) os.chdir(self.tmp_dir) if self.param_seg.fname_manual_gmseg is not None: # compute validation metrics printv('\nCompute validation metrics...', self.param.verbose, 'normal') self.validation() if self.param_seg.ratio is not '0': printv('\nCompute GM/WM CSA ratio...', self.param.verbose, 'normal') self.compute_ratio() # go back to original directory os.chdir('..') printv('\nSave resulting GM and WM segmentations...', self.param.verbose, 'normal') fname_res_gmseg = self.param_seg.path_results+add_suffix(''.join(extract_fname(self.param_seg.fname_im)[1:]), '_gmseg') fname_res_wmseg = self.param_seg.path_results+add_suffix(''.join(extract_fname(self.param_seg.fname_im)[1:]), '_wmseg') self.im_res_gmseg.setFileName(fname_res_gmseg) self.im_res_wmseg.setFileName(fname_res_wmseg) self.im_res_gmseg.save() self.im_res_wmseg.save() # save quality control and print info if self.param_seg.type_seg == 'bin': wm_col = 'Red' gm_col = 'Blue' b = '0,1' else: wm_col = 'Blue-Lightblue' gm_col = 'Red-Yellow' b = '0.4,1' if self.param_seg.qc: # output QC image printv('\nSave quality control images...', self.param.verbose, 'normal') im = Image(self.tmp_dir+self.param_seg.fname_im) im.save_quality_control(plane='axial', n_slices=5, seg=self.im_res_gmseg, thr=float(b.split(',')[0]), cmap_col='red-yellow', path_output=self.param_seg.path_results) printv('\nDone! To view results, type:', self.param.verbose) printv('fslview '+self.param_seg.fname_im_original+' '+fname_res_gmseg+' -b '+b+' -l '+gm_col+' -t 0.7 '+fname_res_wmseg+' -b '+b+' -l '+wm_col+' -t 0.7 & \n', self.param.verbose, 'info') if self.param.rm_tmp: # remove tmp_dir shutil.rmtree(self.tmp_dir)