def test_reorientation_backport(): pixdims = ((1, 1, 1), (2, 2, 3)) data = np.random.normal(size=(17, 18, 19, 2)) for pixdim in pixdims: # Generate a randomly rotated affine angles = np.random.uniform(-np.pi, np.pi, 3) * [1, 0.5, 1] rot = nb.eulerangles.euler2mat(*angles) scale = np.diag(pixdim) translation = np.array((17, 18, 19)) / 2 affine = nb.affines.from_matvec(rot.dot(scale), translation) # Create image img = nb.Nifti1Image(data, affine) dim_info = {"freq": 0, "phase": 1, "slice": 2} img.header.set_dim_info(**dim_info) # Find a random, non-identity transform targ_ornt = orig_ornt = nb.io_orientation(affine) while np.array_equal(targ_ornt, orig_ornt): new_code = np.random.choice(_orientations) targ_ornt = axcodes2ornt(new_code) identity = ornt_transform(orig_ornt, orig_ornt) transform = ornt_transform(orig_ornt, targ_ornt) # Identity transform returns exact image assert img.as_reoriented(identity) is img assert _as_reoriented_backport(img, identity) is img reoriented_a = img.as_reoriented(transform) reoriented_b = _as_reoriented_backport(img, transform) flips_only = img.shape == reoriented_a.shape # Reorientation changes affine and data array assert not np.allclose(img.affine, reoriented_a.affine) assert not (flips_only and np.allclose(img.get_fdata(), reoriented_a.get_fdata())) # Dimension info changes iff axes are reordered assert flips_only == np.array_equal(img.header.get_dim_info(), reoriented_a.header.get_dim_info()) # Both approaches produce equivalent images assert np.allclose(reoriented_a.affine, reoriented_b.affine) assert np.array_equal(reoriented_a.get_fdata(), reoriented_b.get_fdata()) assert np.array_equal(reoriented_a.header.get_dim_info(), reoriented_b.header.get_dim_info())
def test_reorientation_backport(): pixdims = ((1, 1, 1), (2, 2, 3)) data = np.random.normal(size=(17, 18, 19, 2)) for pixdim in pixdims: # Generate a randomly rotated affine angles = np.random.uniform(-np.pi, np.pi, 3) * [1, 0.5, 1] rot = nb.eulerangles.euler2mat(*angles) scale = np.diag(pixdim) translation = np.array((17, 18, 19)) / 2 affine = nb.affines.from_matvec(rot.dot(scale), translation) # Create image img = nb.Nifti1Image(data, affine) dim_info = {'freq': 0, 'phase': 1, 'slice': 2} img.header.set_dim_info(**dim_info) # Find a random, non-identity transform targ_ornt = orig_ornt = nb.io_orientation(affine) while np.array_equal(targ_ornt, orig_ornt): new_code = np.random.choice(_orientations) targ_ornt = axcodes2ornt(new_code) identity = ornt_transform(orig_ornt, orig_ornt) transform = ornt_transform(orig_ornt, targ_ornt) # Identity transform returns exact image assert img.as_reoriented(identity) is img assert _as_reoriented_backport(img, identity) is img reoriented_a = img.as_reoriented(transform) reoriented_b = _as_reoriented_backport(img, transform) flips_only = img.shape == reoriented_a.shape # Reorientation changes affine and data array assert not np.allclose(img.affine, reoriented_a.affine) assert not (flips_only and np.allclose(img.get_data(), reoriented_a.get_data())) # Dimension info changes iff axes are reordered assert flips_only == np.array_equal(img.header.get_dim_info(), reoriented_a.header.get_dim_info()) # Both approaches produce equivalent images assert np.allclose(reoriented_a.affine, reoriented_b.affine) assert np.array_equal(reoriented_a.get_data(), reoriented_b.get_data()) assert np.array_equal(reoriented_a.header.get_dim_info(), reoriented_b.header.get_dim_info())
def reorient_to(img, axcodes_to=('P', 'I', 'R'), verb=False): """Reorients the nifti from its original orientation to another specified orientation Parameters: ---------- img: nibabel image axcodes_to: a tuple of 3 characters specifying the desired orientation Returns: ---------- newimg: The reoriented nibabel image """ aff = img.affine arr = np.asanyarray(img.dataobj, dtype=img.dataobj.dtype) ornt_fr = nio.io_orientation(aff) ornt_to = nio.axcodes2ornt(axcodes_to) ornt_trans = nio.ornt_transform(ornt_fr, ornt_to) arr = nio.apply_orientation(arr, ornt_trans) aff_trans = nio.inv_ornt_aff(ornt_trans, arr.shape) newaff = np.matmul(aff, aff_trans) newimg = nib.Nifti1Image(arr, newaff) if verb: print("[*] Image reoriented from", nio.ornt2axcodes(ornt_fr), "to", axcodes_to) return newimg
def rescale_centroids(ctd_list, img, voxel_spacing=(1, 1, 1)): """rescale centroid coordinates to new spacing in current x-y-z-orientation Parameters: ---------- ctd_list: list of centroids img: nibabel image voxel_spacing: desired spacing Returns: ---------- out_list: rescaled list of centroids """ ornt_img = nio.io_orientation(img.affine) ornt_ctd = nio.axcodes2ornt(ctd_list[0]) if np.array_equal(ornt_img, ornt_ctd): zms = img.header.get_zooms() else: ornt_trans = nio.ornt_transform(ornt_img, ornt_ctd) aff_trans = nio.inv_ornt_aff(ornt_trans, img.dataobj.shape) new_aff = np.matmul(img.affine, aff_trans) zms = nib.affines.voxel_sizes(new_aff) ctd_arr = np.transpose(np.asarray(ctd_list[1:])) v_list = ctd_arr[0].astype(int).tolist() # vertebral labels ctd_arr = ctd_arr[1:] ctd_arr[0] = np.around(ctd_arr[0] * zms[0] / voxel_spacing[0], decimals=1) ctd_arr[1] = np.around(ctd_arr[1] * zms[1] / voxel_spacing[1], decimals=1) ctd_arr[2] = np.around(ctd_arr[2] * zms[2] / voxel_spacing[2], decimals=1) out_list = [ctd_list[0]] ctd_list = np.transpose(ctd_arr).tolist() for v, ctd in zip(v_list, ctd_list): out_list.append([v] + ctd) print("[*] Rescaled centroid coordinates to spacing (x, y, z) =", voxel_spacing, "mm") return out_list
def saveROIFunc(self): #save out segmentation aff = self.affine2 outImage = deepcopy(self.segImg)#np.rot90(np.fliplr(self.segImg),-1) [x_si,y_si,z_si] = np.shape(outImage) #This method works (for fastsegs)... but need more robust #for i in range(0,z_si): # outImage[:,:,i] = np.rot90(self.segImg[:,:,z_si-1-i],-1) #try new method (more robust to header and affine mix-ups) ornt = orx.axcodes2ornt((self.Orx,self.Ory,self.Orz)) refOrnt = orx.axcodes2ornt(('R','S','A')) newOrnt = orx.ornt_transform(refOrnt,ornt) #reversed these outImage= orx.apply_orientation(np.rot90(np.fliplr(outImage),-1),newOrnt) #outImage = orx.apply_orientation(outImage,newOrnt) #outImage = np.rot90(np.fliplr(outImage),-1) new_image = nib.Nifti1Image(outImage,aff) os.chdir(os.path.dirname(str(self.getFileNAME))) self.roiSaveName=QFileDialog.getSaveFileName() #nib.save(new_image,fileNAME[:-7]+'_FASTseg_TK_edit.nii.gz') nib.save(new_image,str(self.roiSaveName))
def do_nibabel_transform_to_ras(img): print(f'Transforming Images to {DEFAULT_ORIENTATION}.....') affine = img.affine orig_ornt = nb.io_orientation(affine) targ_ornt = axcodes2ornt(DEFAULT_ORIENTATION) transform = ornt_transform(orig_ornt, targ_ornt) img = img.as_reoriented(transform) return img
def _run_interface(self, runtime): import numpy as np import nibabel as nb from nibabel.orientations import (axcodes2ornt, ornt_transform, inv_ornt_aff) fname = self.inputs.in_file orig_img = nb.load(fname) # Find transform from current (approximate) orientation to # target, in nibabel orientation matrix and affine forms orig_ornt = nb.io_orientation(orig_img.affine) targ_ornt = axcodes2ornt(self.inputs.orientation) transform = ornt_transform(orig_ornt, targ_ornt) affine_xfm = inv_ornt_aff(transform, orig_img.shape) # Check can be eliminated when minimum nibabel version >= 2.2 if hasattr(orig_img, 'as_reoriented'): reoriented = orig_img.as_reoriented(transform) else: reoriented = _as_reoriented_backport(orig_img, transform) # Image may be reoriented if reoriented is not orig_img: suffix = '_' + self.inputs.orientation.lower() out_name = fname_presuffix(fname, suffix=suffix, newpath=runtime.cwd) reoriented.to_filename(out_name) else: out_name = fname mat_name = fname_presuffix(fname, suffix='.mat', newpath=runtime.cwd, use_ext=False) np.savetxt(mat_name, affine_xfm, fmt='%.08f') self._results['out_file'] = out_name self._results['transform'] = mat_name return runtime
def load3DSegFunc(self): if self.micos_flag == 1: getFileNAME = self.micos_name self.micos_flag = 0 self.roiload_flag = 0 else: getFileNAME = QFileDialog.getOpenFileName() self.roiload_flag = 1 segImgObj = nib.load(str(getFileNAME)) segImgData = segImgObj.get_data() ornt = orx.axcodes2ornt((self.Orx,self.Ory,self.Orz)) refOrnt = orx.axcodes2ornt(('R','S','A')) newOrnt = orx.ornt_transform(ornt,refOrnt) segImgData = orx.apply_orientation(segImgData,newOrnt) segImgData = np.fliplr(np.rot90(segImgData,1)) self.segImg = deepcopy(segImgData) self.ui.overlaySeg.setChecked(True) self.imshowFunc()
def _run_interface(self, runtime): import numpy as np import nibabel as nb from nibabel.orientations import ( axcodes2ornt, ornt_transform, inv_ornt_aff) fname = self.inputs.in_file orig_img = nb.load(fname) # Find transform from current (approximate) orientation to # target, in nibabel orientation matrix and affine forms orig_ornt = nb.io_orientation(orig_img.affine) targ_ornt = axcodes2ornt(self.inputs.orientation) transform = ornt_transform(orig_ornt, targ_ornt) affine_xfm = inv_ornt_aff(transform, orig_img.shape) # Check can be eliminated when minimum nibabel version >= 2.4 if LooseVersion(nb.__version__) >= LooseVersion('2.4.0'): reoriented = orig_img.as_reoriented(transform) else: reoriented = _as_reoriented_backport(orig_img, transform) # Image may be reoriented if reoriented is not orig_img: suffix = '_' + self.inputs.orientation.lower() out_name = fname_presuffix(fname, suffix=suffix, newpath=runtime.cwd) reoriented.to_filename(out_name) else: out_name = fname mat_name = fname_presuffix(fname, suffix='.mat', newpath=runtime.cwd, use_ext=False) np.savetxt(mat_name, affine_xfm, fmt='%.08f') self._results['out_file'] = out_name self._results['transform'] = mat_name return runtime
def reorient_centroids_to(ctd_list, img, decimals=1, verb=False): """reorient centroids to image orientation Parameters: ---------- ctd_list: list of centroids img: nibabel image decimals: rounding decimal digits Returns: ---------- out_list: reoriented list of centroids """ ctd_arr = np.transpose(np.asarray(ctd_list[1:])) if len(ctd_arr) == 0: print("[#] No centroids present") return ctd_list v_list = ctd_arr[0].astype(int).tolist() # vertebral labels ctd_arr = ctd_arr[1:] ornt_fr = nio.axcodes2ornt(ctd_list[0]) # original centroid orientation axcodes_to = nio.aff2axcodes(img.affine) ornt_to = nio.axcodes2ornt(axcodes_to) trans = nio.ornt_transform(ornt_fr, ornt_to).astype(int) perm = trans[:, 0].tolist() shp = np.asarray(img.dataobj.shape) ctd_arr[perm] = ctd_arr.copy() for ax in trans: if ax[1] == -1: size = shp[ax[0]] ctd_arr[ax[0]] = np.around(size - ctd_arr[ax[0]], decimals) out_list = [axcodes_to] ctd_list = np.transpose(ctd_arr).tolist() for v, ctd in zip(v_list, ctd_list): out_list.append([v] + ctd) if verb: print("[*] Centroids reoriented from", nio.ornt2axcodes(ornt_fr), "to", axcodes_to) return out_list
def MGDMBrainSegmentation(input_filename_type_list, output_dir = None, num_steps = 5, atlas_file=None, topology_lut_dir = None): """ Perform MGDM segmentation :param input_filename_type_list: list of [[fname1,type1],[fname2,type2],...] - for a maximum of 4 inputs :param output_dir: full path to the output directory :param num_steps: number of steps for (default 5, set to 0 for testing) :param atlas_file: full path to the atlas file, default set in defaults.py :param topology_lut_dir: full path to the directory with the topology files, default set in defaults.py :return: """ from nibabel.orientations import io_orientation, inv_ornt_aff, apply_orientation, ornt_transform import os print("Thank you for choosing the MGDM segmentation from the cbstools for your brain segmentation needs") print("Sit back and relax, let the magic of algorithms happen...") print("") if output_dir is None: output_dir = os.path.dirname(input_filename_type_list[0][0]) if atlas_file is None: atlas = os.path.join(ATLAS_DIR,'brain-atlas-3.0.3.txt') else: atlas = atlas_file if topology_lut_dir is None: topology_lut_dir = TOPOLOGY_LUT_DIR # grabbing this from the default settings in defaults.py else: if not(topology_lut_dir[-1] == os.sep): #if we don't end in a path sep, we need to make sure that we add it topology_lut_dir += os.sep print("Atlas file: " + atlas) print("Topology LUT durectory: " + topology_lut_dir) print("") if not any(isinstance(el, list) for el in input_filename_type_list): #make into list of lists input_filename_type_list = [input_filename_type_list] #now we setup the mgdm specfic settings mgdm = cj.BrainMgdmMultiSegmentation2() mgdm.setAtlasFile(atlas) mgdm.setTopologyLUTdirectory(topology_lut_dir) mgdm.setOutputImages('segmentation') # --> mgdm.setOrientations(mgdm.AXIAL, mgdm.R2L, mgdm.A2P, mgdm.I2S) # this is the default for MGDM, <-- # mgdm.setOrientations(mgdm.AXIAL, mgdm.L2R, mgdm.P2A, mgdm.I2S) #LR,PA,IS is always how they are returned from nibabel mgdm.setAdjustIntensityPriors(False) # default is True mgdm.setComputePosterior(False) mgdm.setDiffuseProbabilities(False) mgdm.setSteps(num_steps) mgdm.setTopology('wcs') # {'wcs','no'} no=off for testing, wcs=default for idx,con in enumerate(input_filename_type_list): print("Input files and filetypes:") print(" " + str(idx+1) + " "), print(con) #flipLR = False #flipAP = False #flipIS = False fname = con[0] type = con[1] d,d_aff,d_head = niiLoad(fname,return_header=True) ## usage example in the proc_file function of : https://github.com/nipy/nibabel/blob/master/bin/parrec2nii ornt_orig = io_orientation(d_aff) ornt_mgdm = io_orientation(np.diag([-1, -1, 1, 1]).dot(d_aff)) # -1 -1 1 LPS (mgdm default); 1 1 1 is RAS ornt_chng = ornt_transform(ornt_mgdm, ornt_orig) # to get from MGDM to our original input # convert orientation information to mgdm slice and orientation info aff_orients,aff_slc = get_affine_orientation_slice(d_aff) print("data orientation: " + str(aff_orients)), print("slice settings: " + aff_slc) print("mgdm orientation: " + str(ornt_mgdm)) print("data orientation: " + str(ornt_orig)) if aff_slc == "AXIAL": SLC=mgdm.AXIAL elif aff_slc == "SAGITTAL": SLC=mgdm.SAGITTAL else: SLC=mgdm.CORONAL for aff_orient in aff_orients: #TODO: if anything is different from the default MGDM settings, we need to flip axes of the data at the end if aff_orient == "L": LR=mgdm.R2L elif aff_orient == "R": LR = mgdm.L2R # flipLR = True elif aff_orient == "A": AP = mgdm.P2A #flipAP = True elif aff_orient == "P": AP = mgdm.A2P elif aff_orient == "I": IS = mgdm.S2I #flipIS = True elif aff_orient == "S": IS = mgdm.I2S mgdm.setOrientations(SLC, LR, AP, IS) #L2R,P2A,I2S is nibabel default (i.e., RAS) if idx+1 == 1: # we use the first image to set the dimensions and resolutions res = d_head.get_zooms() res = [a1.item() for a1 in res] # cast to regular python float type mgdm.setDimensions(d.shape[0], d.shape[1], d.shape[2]) mgdm.setResolutions(res[0], res[1], res[2]) # keep the shape and affine from the first image for saving d_shape = np.array(d.shape) out_root_fname = os.path.basename(fname)[0:os.path.basename(fname).find('.')] #assumes no periods in filename, :-/ mgdm.setContrastImage1(cj.JArray('float')((d.flatten('F')).astype(float))) mgdm.setContrastType1(type) elif idx+1 == 2: mgdm.setContrastImage2(cj.JArray('float')((d.flatten('F')).astype(float))) mgdm.setContrastType2(type) elif idx + 1 == 3: mgdm.setContrastImage3(cj.JArray('float')((d.flatten('F')).astype(float))) mgdm.setContrastType3(type) elif idx + 1 == 4: mgdm.setContrastImage4(cj.JArray('float')((d.flatten('F')).astype(float))) mgdm.setContrastType4(type) try: print("Executing MGDM on your inputs") print("Don't worry, the magic is happening!") mgdm.execute() print(os.path.join(output_dir, out_root_fname + '_seg_cjs.nii.gz')) # outputs # reshape fortran stype to convert back to the format the nibabel likes seg_im = np.reshape(np.array(mgdm.getSegmentedBrainImage(), dtype=np.uint32), d_shape,'F') lbl_im = np.reshape(np.array(mgdm.getPosteriorMaximumLabels4D(), dtype=np.uint32), d_shape, 'F') ids_im = np.reshape(np.array(mgdm.getSegmentedIdsImage(), dtype=np.uint32), d_shape, 'F') # fix orientation back to the input orientation :-/ not really working # seg_im = apply_orientation(seg_im, ornt_chng) # this takes care of the orientations between mipav and input # lbl_im = apply_orientation(lbl_im, ornt_chng) # TODO: fix the origin point offset?, 2x check possible RL flip # ids_im = apply_orientation(ids_im, ornt_chng) # alternative: register? https://github.com/pyimreg # # save seg_file = os.path.join(output_dir, out_root_fname + '_seg_cjs.nii.gz') lbl_file = os.path.join(output_dir, out_root_fname + '_lbl_cjs.nii.gz') ids_file = os.path.join(output_dir, out_root_fname + '_ids_cjs.nii.gz') ## this will work, but the solution with nibabel.orientations is much cleaner # if our settings were not the same as MGDM likes, we need to flip the relevant settings: #d_aff_new = flip_affine_orientation(d_aff, flipLR=flipLR, flipAP=flipAP, flipIS=flipIS) d_head['data_type'] = np.array(32).astype('uint32') #convert the header as well d_head['cal_max'] = np.max(seg_im) #max for display niiSave(seg_file, seg_im, d_aff, header=d_head, data_type='uint32') d_head['cal_max'] = np.max(lbl_im) niiSave(lbl_file, lbl_im, d_aff, header=d_head, data_type='uint32') d_head['cal_max'] = np.max(ids_im) # convert the header as well niiSave(ids_file, ids_im, d_aff, header=d_head, data_type='uint32') print("Data stored in: " + output_dir) except: print("--- MGDM failed. Go cry. ---") return print("Execution completed") return seg_im,d_aff,d_head
def MICOS(fileNAME): tic = time.clock() selfz =0 selfz1 = 0 selfz2 = 0 selfrotD = -90 imgObj2= nib.load(str(fileNAME)) imgObj1 = imgObj2 im = imgObj2 selfaffine2 = imgObj2.get_affine() selfheaderdtype = imgObj2.get_data_dtype() selfPSx = imgObj1.get_header()['pixdim'][1] selfPSy = imgObj1.get_header()['pixdim'][2] selfPSz = imgObj1.get_header()['pixdim'][3] (x,y,z) = orx.aff2axcodes(selfaffine2) selfOrx = x selfOry = y selfOrz = z ornt = orx.axcodes2ornt((x,y,z)) refOrnt = orx.axcodes2ornt(('R','S','A')) #was 'R', 'A', 'S' newOrnt = orx.ornt_transform(ornt,refOrnt) selfornt = ornt selfrefOrnt = refOrnt selfimg_data2 = imgObj2.get_data() selfimg_data2 = orx.apply_orientation(selfimg_data2,newOrnt) selfimg_data2 = np.fliplr(np.rot90(selfimg_data2,1)) im_data = selfimg_data2 [x_si,y_si,z_si] = np.shape(im_data) #do 99% norm to 1000 im_data = np.array(im_data,dtype='float') im_data = im_data * 1000/np.percentile(im_data,99) #print np.shape(im_data) initialSeg = im_data.copy() * 0 #begin user roi drawing... #go from middle up... for i in xrange(np.round(z_si/2),z_si,3): img = (im_data[:,:,i]) # show the image if i > np.round(z_si/2): plt.figure(figsize=(ROI1.figwidth,ROI1.figheight)) plt.imshow(img,cmap='gray') plt.colorbar() plt.title("outline one kidney, slice = " + str(i)) # let user draw first ROI ROI1 = polydraw(roicolor='r') #let user draw first ROI # show the image with the first ROI plt.figure(figsize=(ROI1.figwidth,ROI1.figheight)) plt.imshow(img,cmap='gray') plt.colorbar() ROI1.displayROI() plt.title("outline other kidney, slice = " + str(i)) # let user draw second ROI ROI2 = polydraw(roicolor='b') #let user draw ROI initialSeg[:,:,i] = ROI1.getMask(img) + ROI2.getMask(img) #go from middle up... for i in xrange(np.round(z_si/2)-1,0,-3): img = (im_data[:,:,i]) # show the image plt.figure(figsize=(ROI1.figwidth,ROI1.figheight)) plt.imshow(img,cmap='gray') plt.colorbar() plt.title("outline one kidney, slice = " + str(i)) # let user draw first ROI ROI1 = polydraw(roicolor='r') #let user draw first ROI # show the image with the first ROI plt.figure(figsize=(ROI1.figwidth,ROI1.figheight)) plt.imshow(img,cmap='gray') plt.colorbar() ROI1.displayROI() plt.title("outline other kidney, slice = " + str(i)) # let user draw second ROI ROI2 = polydraw(roicolor='b') #let user draw ROI initialSeg[:,:,i] = ROI1.getMask(img) + ROI2.getMask(img) toc = time.clock() #save out drawn polygon aff = selfaffine2 outImage = deepcopy(initialSeg)#np.rot90(np.fliplr(self.segImg),-1) [x_si,y_si,z_si] = np.shape(outImage) #print np.shape(outImage) #This method works (for fastsegs)... but need more robust #for i in range(0,z_si): # outImage[:,:,i] = np.rot90(self.segImg[:,:,z_si-1-i],-1) #try new method (more robust to header and affine mix-ups) ornt = orx.axcodes2ornt((selfOrx,selfOry,selfOrz)) refOrnt = orx.axcodes2ornt(('R','S','A')) newOrnt = orx.ornt_transform(refOrnt,ornt) #reversed these outImage= orx.apply_orientation(np.rot90(np.fliplr(outImage),-1),newOrnt) #outImage = orx.apply_orientation(outImage,newOrnt) #outImage = np.rot90(np.fliplr(outImage),-1) #print np.shape(outImage) #outImage = np.array(outImage,dtype=selfheaderdtype) new_image = nib.Nifti1Image(outImage,aff) nib.save(new_image,fileNAME[:-7]+'_polygon_MICOS.nii.gz') # Dilate and fill in missing slices initialSeg = dilation(initialSeg,iterations = 1) finalSeg = initialSeg.copy() * 0 # now try convex hull method instead to better approximate missing slices (previous method is above) # This works but way too long. Also, would likely need to do object finding first, so compute # Convex hull for each kidney separately. while 0: xlist,ylist,zlist = find_3D_object_voxel_list(initialSeg) voxlist = np.zeros(shape=(np.shape(xlist)[0],3),dtype='int16') voxlist[:,0] = xlist voxlist[:,1] = ylist voxlist[:,2] = zlist tri = dtri(voxlist) # construct full voxel list xxlist,yylist,zzlist = find_3D_object_voxel_list((initialSeg+1)>0) fullvoxlist = np.zeros(shape=(np.shape(xxlist)[0],3),dtype='int16') fullvoxlist[:,0] = xxlist fullvoxlist[:,1] = yylist fullvoxlist[:,2] = zzlist finalSeg = np.array(in_hull(fullvoxlist,tri),dtype=float) finalSeg = np.reshape(finalSeg,(x_si,y_si,z_si)) # Now do gaussian blur of polygon to smooth initialSeg = (filt.gaussian_filter(initialSeg.copy()*255,sigma=[3,3,1])) > 100 #Begin optimized method... for i in xrange(0,z_si): img = (im_data[:,:,i]) if np.max(initialSeg[:,:,i]>0): mgac = [] gI = msnake.gborders(img,alpha=1E5,sigma=3.0) # increasing sigma allows more changes in contour mgac = msnake.MorphGAC(gI,smoothing=3,threshold=0.01,balloon=0.0) #was 2.5 mgac.levelset = initialSeg[:,:,i]>0.5 for ijk123 in xrange(100): mgac.step() finalSeg[:,:,i] = mgac.levelset #print i # Now do gaussian blur and threshold to finalize segmentation... finalSeg = (filt.gaussian_filter(finalSeg.copy()*255,sigma=[3,3,1])) > 100 #using this helps with single slice errors of the active contour # Try adding now narrow band sobel/watershed technique. for i in xrange(0,z_si): img = (im_data[:,:,i]) segslice = finalSeg[:,:,i] if np.max(finalSeg[:,:,i]>0): erodeimg = erosion(segslice.copy(),iterations=1) dilateimg = dilation(segslice.copy(),iterations=1) seeds = img * 0 seeds[:] = 1 seeds[dilateimg>0] = 0 seeds[erodeimg>0] = 2 sobelFilt = sobel(np.array(img.copy(),dtype='int16')) mgac = watershed(sobelFilt,seeds)>1 finalSeg[:,:,i] = mgac>0 #save out segmentation aff = selfaffine2 outImage = deepcopy(finalSeg)#np.rot90(np.fliplr(self.segImg),-1) outImage = np.array(outImage,dtype='float') [x_si,y_si,z_si] = np.shape(outImage) #This method works (for fastsegs)... but need more robust #for i in range(0,z_si): # outImage[:,:,i] = np.rot90(self.segImg[:,:,z_si-1-i],-1) #try new method (more robust to header and affine mix-ups) ornt = orx.axcodes2ornt((selfOrx,selfOry,selfOrz)) refOrnt = orx.axcodes2ornt(('R','S','A')) newOrnt = orx.ornt_transform(refOrnt,ornt) #reversed these outImage= orx.apply_orientation(np.rot90(np.fliplr(outImage),-1),newOrnt) #outImage = orx.apply_orientation(outImage,newOrnt) #outImage = np.rot90(np.fliplr(outImage),-1) new_image = nib.Nifti1Image(outImage,aff) nib.save(new_image,fileNAME[:-7]+'_FASTseg_MICOS.nii.gz') print 'time = ' print toc - tic return (fileNAME[:-7]+'_FASTseg_MICOS.nii.gz')
def MICOS(fileNAME): tic = time.clock() selfz =0 selfz1 = 0 selfz2 = 0 selfrotD = -90 imgObj2= nib.load(str(fileNAME)) imgObj1 = imgObj2 im = imgObj2 selfaffine2 = imgObj2.get_affine() selfheaderdtype = imgObj2.get_data_dtype() selfPSx = imgObj1.get_header()['pixdim'][1] selfPSy = imgObj1.get_header()['pixdim'][2] selfPSz = imgObj1.get_header()['pixdim'][3] (x,y,z) = orx.aff2axcodes(selfaffine2) selfOrx = x selfOry = y selfOrz = z ornt = orx.axcodes2ornt((x,y,z)) refOrnt = orx.axcodes2ornt(('R','S','A')) #was 'R', 'A', 'S' newOrnt = orx.ornt_transform(ornt,refOrnt) selfornt = ornt selfrefOrnt = refOrnt selfimg_data2 = imgObj2.get_data() selfimg_data2 = orx.apply_orientation(selfimg_data2,newOrnt) selfimg_data2 = np.fliplr(np.rot90(selfimg_data2,1)) im_data = selfimg_data2 [x_si,y_si,z_si] = np.shape(im_data) #do 99% norm to 1000 im_data = np.array(im_data,dtype='float') im_data = im_data * 1000/np.percentile(im_data,99) #print np.shape(im_data) initialSeg = im_data.copy() * 0 #begin user roi drawing... #go from middle up... for i in xrange(np.round(z_si/2),z_si,3): img = (im_data[:,:,i]) # show the image if i > np.round(z_si/2): plt.figure(figsize=(ROI1.figwidth,ROI1.figheight)) plt.imshow(img,cmap='gray') plt.colorbar() plt.title("outline one kidney, slice = " + str(i)) # let user draw first ROI ROI1 = polydraw(roicolor='r') #let user draw first ROI # show the image with the first ROI plt.figure(figsize=(ROI1.figwidth,ROI1.figheight)) plt.imshow(img,cmap='gray') plt.colorbar() ROI1.displayROI() plt.title("outline other kidney, slice = " + str(i)) # let user draw second ROI ROI2 = polydraw(roicolor='b') #let user draw ROI initialSeg[:,:,i] = ROI1.getMask(img) + ROI2.getMask(img) #go from middle up... for i in xrange(np.round(z_si/2)-1,0,-3): img = (im_data[:,:,i]) # show the image plt.figure(figsize=(ROI1.figwidth,ROI1.figheight)) plt.imshow(img,cmap='gray') plt.colorbar() plt.title("outline one kidney, slice = " + str(i)) # let user draw first ROI ROI1 = polydraw(roicolor='r') #let user draw first ROI # show the image with the first ROI plt.figure(figsize=(ROI1.figwidth,ROI1.figheight)) plt.imshow(img,cmap='gray') plt.colorbar() ROI1.displayROI() plt.title("outline other kidney, slice = " + str(i)) # let user draw second ROI ROI2 = polydraw(roicolor='b') #let user draw ROI initialSeg[:,:,i] = ROI1.getMask(img) + ROI2.getMask(img) toc = time.clock() #save out drawn polygon aff = selfaffine2 outImage = deepcopy(initialSeg)#np.rot90(np.fliplr(self.segImg),-1) [x_si,y_si,z_si] = np.shape(outImage) #print np.shape(outImage) #This method works (for fastsegs)... but need more robust #for i in range(0,z_si): # outImage[:,:,i] = np.rot90(self.segImg[:,:,z_si-1-i],-1) #try new method (more robust to header and affine mix-ups) ornt = orx.axcodes2ornt((selfOrx,selfOry,selfOrz)) refOrnt = orx.axcodes2ornt(('R','S','A')) newOrnt = orx.ornt_transform(refOrnt,ornt) #reversed these outImage= orx.apply_orientation(np.rot90(np.fliplr(outImage),-1),newOrnt) #outImage = orx.apply_orientation(outImage,newOrnt) #outImage = np.rot90(np.fliplr(outImage),-1) #print np.shape(outImage) #outImage = np.array(outImage,dtype=selfheaderdtype) new_image = nib.Nifti1Image(outImage,aff) nib.save(new_image,fileNAME[:-7]+'_polygon_MICOS.nii.gz')
def MGDMBrainSegmentation(input_filename_type_list, output_dir=None, num_steps=5, atlas_file=None, topology_lut_dir=None): """ Perform MGDM segmentation :param input_filename_type_list: list of [[fname1,type1],[fname2,type2],...] - for a maximum of 4 inputs :param output_dir: full path to the output directory :param num_steps: number of steps for (default 5, set to 0 for testing) :param atlas_file: full path to the atlas file, default set in defaults.py :param topology_lut_dir: full path to the directory with the topology files, default set in defaults.py :return: """ from nibabel.orientations import io_orientation, inv_ornt_aff, apply_orientation, ornt_transform import os print( "Thank you for choosing the MGDM segmentation from the cbstools for your brain segmentation needs" ) print("Sit back and relax, let the magic of algorithms happen...") print("") if output_dir is None: output_dir = os.path.dirname(input_filename_type_list[0][0]) if atlas_file is None: atlas = os.path.join(ATLAS_DIR, 'brain-atlas-3.0.3.txt') else: atlas = atlas_file if topology_lut_dir is None: topology_lut_dir = TOPOLOGY_LUT_DIR # grabbing this from the default settings in defaults.py else: if not ( topology_lut_dir[-1] == os.sep ): #if we don't end in a path sep, we need to make sure that we add it topology_lut_dir += os.sep print("Atlas file: " + atlas) print("Topology LUT durectory: " + topology_lut_dir) print("") if not any(isinstance(el, list) for el in input_filename_type_list): #make into list of lists input_filename_type_list = [input_filename_type_list] #now we setup the mgdm specfic settings mgdm = cj.BrainMgdmMultiSegmentation2() mgdm.setAtlasFile(atlas) mgdm.setTopologyLUTdirectory(topology_lut_dir) mgdm.setOutputImages('segmentation') # --> mgdm.setOrientations(mgdm.AXIAL, mgdm.R2L, mgdm.A2P, mgdm.I2S) # this is the default for MGDM, <-- # mgdm.setOrientations(mgdm.AXIAL, mgdm.L2R, mgdm.P2A, mgdm.I2S) #LR,PA,IS is always how they are returned from nibabel mgdm.setAdjustIntensityPriors(False) # default is True mgdm.setComputePosterior(False) mgdm.setDiffuseProbabilities(False) mgdm.setSteps(num_steps) mgdm.setTopology('wcs') # {'wcs','no'} no=off for testing, wcs=default for idx, con in enumerate(input_filename_type_list): print("Input files and filetypes:") print(" " + str(idx + 1) + " "), print(con) #flipLR = False #flipAP = False #flipIS = False fname = con[0] type = con[1] d, d_aff, d_head = niiLoad(fname, return_header=True) ## usage example in the proc_file function of : https://github.com/nipy/nibabel/blob/master/bin/parrec2nii ornt_orig = io_orientation(d_aff) ornt_mgdm = io_orientation(np.diag( [-1, -1, 1, 1]).dot(d_aff)) # -1 -1 1 LPS (mgdm default); 1 1 1 is RAS ornt_chng = ornt_transform( ornt_mgdm, ornt_orig) # to get from MGDM to our original input # convert orientation information to mgdm slice and orientation info aff_orients, aff_slc = get_affine_orientation_slice(d_aff) print("data orientation: " + str(aff_orients)), print("slice settings: " + aff_slc) print("mgdm orientation: " + str(ornt_mgdm)) print("data orientation: " + str(ornt_orig)) if aff_slc == "AXIAL": SLC = mgdm.AXIAL elif aff_slc == "SAGITTAL": SLC = mgdm.SAGITTAL else: SLC = mgdm.CORONAL for aff_orient in aff_orients: #TODO: if anything is different from the default MGDM settings, we need to flip axes of the data at the end if aff_orient == "L": LR = mgdm.R2L elif aff_orient == "R": LR = mgdm.L2R # flipLR = True elif aff_orient == "A": AP = mgdm.P2A #flipAP = True elif aff_orient == "P": AP = mgdm.A2P elif aff_orient == "I": IS = mgdm.S2I #flipIS = True elif aff_orient == "S": IS = mgdm.I2S mgdm.setOrientations(SLC, LR, AP, IS) #L2R,P2A,I2S is nibabel default (i.e., RAS) if idx + 1 == 1: # we use the first image to set the dimensions and resolutions res = d_head.get_zooms() res = [a1.item() for a1 in res] # cast to regular python float type mgdm.setDimensions(d.shape[0], d.shape[1], d.shape[2]) mgdm.setResolutions(res[0], res[1], res[2]) # keep the shape and affine from the first image for saving d_shape = np.array(d.shape) out_root_fname = os.path.basename(fname)[0:os.path.basename( fname).find('.')] #assumes no periods in filename, :-/ mgdm.setContrastImage1( cj.JArray('float')((d.flatten('F')).astype(float))) mgdm.setContrastType1(type) elif idx + 1 == 2: mgdm.setContrastImage2( cj.JArray('float')((d.flatten('F')).astype(float))) mgdm.setContrastType2(type) elif idx + 1 == 3: mgdm.setContrastImage3( cj.JArray('float')((d.flatten('F')).astype(float))) mgdm.setContrastType3(type) elif idx + 1 == 4: mgdm.setContrastImage4( cj.JArray('float')((d.flatten('F')).astype(float))) mgdm.setContrastType4(type) try: print("Executing MGDM on your inputs") print("Don't worry, the magic is happening!") mgdm.execute() print(os.path.join(output_dir, out_root_fname + '_seg_cjs.nii.gz')) # outputs # reshape fortran stype to convert back to the format the nibabel likes seg_im = np.reshape( np.array(mgdm.getSegmentedBrainImage(), dtype=np.uint32), d_shape, 'F') lbl_im = np.reshape( np.array(mgdm.getPosteriorMaximumLabels4D(), dtype=np.uint32), d_shape, 'F') ids_im = np.reshape( np.array(mgdm.getSegmentedIdsImage(), dtype=np.uint32), d_shape, 'F') # fix orientation back to the input orientation :-/ not really working # seg_im = apply_orientation(seg_im, ornt_chng) # this takes care of the orientations between mipav and input # lbl_im = apply_orientation(lbl_im, ornt_chng) # TODO: fix the origin point offset?, 2x check possible RL flip # ids_im = apply_orientation(ids_im, ornt_chng) # alternative: register? https://github.com/pyimreg # # save seg_file = os.path.join(output_dir, out_root_fname + '_seg_cjs.nii.gz') lbl_file = os.path.join(output_dir, out_root_fname + '_lbl_cjs.nii.gz') ids_file = os.path.join(output_dir, out_root_fname + '_ids_cjs.nii.gz') ## this will work, but the solution with nibabel.orientations is much cleaner # if our settings were not the same as MGDM likes, we need to flip the relevant settings: #d_aff_new = flip_affine_orientation(d_aff, flipLR=flipLR, flipAP=flipAP, flipIS=flipIS) d_head['data_type'] = np.array(32).astype( 'uint32') #convert the header as well d_head['cal_max'] = np.max(seg_im) #max for display niiSave(seg_file, seg_im, d_aff, header=d_head, data_type='uint32') d_head['cal_max'] = np.max(lbl_im) niiSave(lbl_file, lbl_im, d_aff, header=d_head, data_type='uint32') d_head['cal_max'] = np.max(ids_im) # convert the header as well niiSave(ids_file, ids_im, d_aff, header=d_head, data_type='uint32') print("Data stored in: " + output_dir) except: print("--- MGDM failed. Go cry. ---") return print("Execution completed") return seg_im, d_aff, d_head
'Reorient to LIA and resample to 1mm iso-voxel resolution if required') parser.add_argument('source', type=str, help='Input volume') parser.add_argument('destination', type=str, help='Normalized volume') args = parser.parse_args() src_nib = nib_funcs.squeeze_image(nib.load(args.source)) current_orientation = ''.join(nib.aff2axcodes(src_nib.affine)) print('Input: {} [{}]'.format(src_nib.header.get_zooms(), current_orientation)) # Avoid resampling if already 1mm iso-voxel # Note: Also in cases of tiny rounding error, e.g. (1.0000001, 1.0000001, 1.0) if not np.allclose(src_nib.header.get_zooms(), [1, 1, 1]): # requires re-sampling print('Resampling') dst_nib = nib_processing.conform(src_nib, orientation='LIA') elif current_orientation != 'LIA': # requires just reorient print('Reorientating {} to LIA'.format(current_orientation)) start_ornt = nib_orientations.io_orientation(src_nib.affine) end_ornt = nib_orientations.axcodes2ornt('LIA') transform = nib_orientations.ornt_transform(start_ornt, end_ornt) dst_nib = src_nib.as_reoriented(transform) else: dst_nib = src_nib nib.save(dst_nib, args.destination)
def to_affine( orientation, spacing: Sequence[Union[int, float]] = None, origin: Sequence[Union[int, float]] = None, ): """Convert orientation, spacing, and origin data into affine matrix. Args: orientation (Sequence[str]): Image orientation in the standard orientation format (e.g. ``("LR", "AP", "SI")``). spacing (int(s) | float(s)): Number(s) corresponding to pixel spacing of each direction. If a single value, same pixel spacing is used for all directions. If sequence is less than length of ``orientation``, remaining direction have unit spacing (i.e. ``1``). Defaults to unit spacing ``(1, 1, 1)`` origin (int(s) | float(s)): The ``(x0, y0, z0)`` origin for the scan. If a single value, same origin is used for all directions. If sequence is less than length of ``orientation``, remaining direction have standard origin (i.e. ``0``). Defaults to ``(0, 0, 0)`` Returns: ndarray: A 4x4 ndarray representing the affine matrix. Examples: >>> to_affine(("SI", "AP", "RL"), spacing=(0.5, 0.5, 1.5), origin=(10, 20, 0)) array([[-0. , -0. , -1.5, 10. ], [-0. , -0.5, -0. , 20. ], [-0.5, -0. , -0. , 30. ], [ 0. , 0. , 0. , 1. ]]) Note: This method assumes all direction follow the standard principal directions in the normative patient orientation. Moving along one direction of the array only moves along one fo the normative directions. """ def _format_numbers(input, default_val, name, expected_num): """Formats (sequence of) numbers (spacing, origin) into standard 3-length tuple.""" if input is None: return (default_val, ) * expected_num if isinstance(input, (int, float)): return (input, ) * expected_num if not isinstance(input, (np.ndarray, Sequence)) or len(input) > expected_num: raise ValueError( f"`{name}` must be a real number or sequence (length<={expected_num}) " f"of real numbers. Got {input}") input = tuple(input) if len(input) < expected_num: input += (default_val, ) * (expected_num - len(input)) assert len(input) == expected_num return input if len(orientation) == 2: orientation = _infer_orientation(orientation) __check_orientation__(orientation) spacing = _format_numbers(spacing, 1, "spacing", len(orientation)) origin = _format_numbers(origin, 0, "origin", len(orientation)) affine = np.eye(4) start_ornt = nibo.io_orientation(affine) end_ornt = nibo.axcodes2ornt(orientation_standard_to_nib(orientation)) ornt = nibo.ornt_transform(start_ornt, end_ornt) transpose_idxs = ornt[:, 0].astype(np.int) flip_idxs = ornt[:, 1] affine[:3] = affine[:3][transpose_idxs] affine[:3] *= flip_idxs[..., np.newaxis] affine[:3, :3] *= np.asarray(spacing) affine[:3, 3] = np.asarray(origin) return affine
def loadDataFunc(self): #Function for loading image file self.ui.autogray.setCheckState(2) self.autoGrayFlag = 1 getFileNAME = QFileDialog.getOpenFileName() if str(getFileNAME)[-4:] == '.avw': #if avw file, do conversion, and then work with nifti file MR = loadAVW(str(getFileNAME)) avw2nifti(str(getFileNAME[:-4]) + 'avw', MR, seg=None) getFileNAME = str(getFileNAME[:-4]) + 'avw.nii.gz' #print getFileNAME self.getFileNAME = getFileNAME self.ui.displayFileName.setText(str(os.path.basename(str(self.getFileNAME)))) self.z =0 self.z1 = 0 self.z2 = 0 self.rotD = -90 imgObj2= nib.load(str(getFileNAME)) imgObj1 = imgObj2 self.affine2 = imgObj2.get_affine() self.PSx = imgObj1.get_header()['pixdim'][1] self.PSy = imgObj1.get_header()['pixdim'][2] self.PSz = imgObj1.get_header()['pixdim'][3] (x,y,z) = orx.aff2axcodes(self.affine2) self.Orx = x self.Ory = y self.Orz = z ornt = orx.axcodes2ornt((x,y,z)) refOrnt = orx.axcodes2ornt(('R','S','A')) #was 'R', 'A', 'S' newOrnt = orx.ornt_transform(ornt,refOrnt) self.ornt = ornt self.refOrnt = refOrnt self.img_data2 = imgObj2.get_data() self.img_data2 = orx.apply_orientation(self.img_data2,newOrnt) self.img_data2 = np.fliplr(np.rot90(self.img_data2,1)) self.img_data1 = self.img_data2 self.imageFile2 = str(getFileNAME) #changed self.ui.T2Image.currentText() to getFileNAME self.imageFile1 = self.imageFile2 indx2 = self.imageFile2.rfind('/') indx1 = indx2 self.filePath1 = self.imageFile1[0:indx1+1] self.fileName1 = self.imageFile1[indx1+1:] self.filePath2 = self.imageFile2[0:indx2+1] self.fileName2 = self.imageFile2[indx2+1:] # sizeT1C = self.img_data1.shape try: (x1,y1,z1) = self.img_data1.shape (x2,y2,z2) = self.img_data2.shape except: (x1,y1,z1,d1) = self.img_data1.shape (x2,y2,z2,d1) = self.img_data2.shape self.img_data1 = self.img_data1[:,:,:,0] self.img_data2 = self.img_data2[:,:,:,0] self.sliceNum1 = z1 self.sliceNum2 = z2 self.shape = self.img_data2.shape self.img1 = self.img_data1[:,:,self.z] self.img2 = self.img_data2[:,:,self.z] self.segImg = self.img_data2 * 0 self.imshowFunc() (x,y,z) = self.shape self.ui.figure3.canvas.ax.clear() # self.ui.figure3.canvas.ax.imshow(((self.img_data2[:,round(x/2),:])),cmap=plt.cm.gray) self.ui.figure3.canvas.ax.imshow(((self.img_data2[:,round(x/2),:])),cmap=plt.cm.gray) #self.ui.figure3.canvas.ax.set_aspect('auto') self.ui.figure3.canvas.ax.get_xaxis().set_visible(False) self.ui.figure3.canvas.ax.get_yaxis().set_visible(False) #self.ui.figure3.canvas.ax.set_title('Sagittal View', color = 'white') #this is where had sagittal view self.ui.figure3.canvas.draw() self.ui.figure4.canvas.ax.clear() self.ui.figure4.canvas.ax.imshow(np.rot90((self.img_data2[round(y/2),:,:]),1),cmap=plt.cm.gray) #self.ui.figure4.canvas.ax.set_aspect('auto') self.ui.figure4.canvas.ax.get_xaxis().set_visible(False) self.ui.figure4.canvas.ax.get_yaxis().set_visible(False) #self.ui.figure4.canvas.ax.set_title('Axial View', color = 'white') self.ui.figure4.canvas.draw() # self.imhistFunc() self.ui.imageSlider.setMinimum(0) self.ui.imageSlider.setMaximum(z2-1) self.ui.imageSlider.setSingleStep(1) self.maxSlice = z2 - 1 (row,col,dep) = self.img_data2.shape self.overlayImgAX = np.zeros((row,col)) return