def example_fluid(): f = box_image( core.StructuredGrid((256, 256), spacing=[0.05, 0.05], device=device)) d = box_image(core.StructuredGrid((256, 256), spacing=[0.05, 0.05], device=device), break_width=46, val=100.0) f = so.Gaussian.Create(channels=1, kernel_size=5, sigma=2, dim=2, device=device)(f) grads = so.Gradient.Create(dim=2, device=device)(f) * 100 fluid_kernel = so.FluidKernel.Create(f, device=device) # Flow the gradients # Compute the gradients of the x x = core.StructuredGrid.FromGrid(f, tensor=grads[1].unsqueeze(0), channels=1) x_grads = so.Gradient.Create(dim=2, device=device)(grads[1])
def _create_grid(src_surface, src_excess, grid_size, grid_device): grid_size = torch.tensor(grid_size, device=device, dtype=tar_surface.vertices.dtype) extent_verts = src_surface.vertices.clone() for surface in src_excess: extent_verts = torch.cat([extent_verts, surface.vertices], 0) vert_min = extent_verts.min(0).values vert_max = extent_verts.max(0).values # Expand beyond the min so that we contain the entire surface - 10 % should be enough expansion = (vert_max - vert_min) * expansion_factor vert_min -= expansion vert_max += expansion # the verts are in (x,y,z) and we need (z,y,x) for volumes vert_min = vert_min.flip(0) vert_max = vert_max.flip(0) # Calculate the spacing spacing = (vert_max - vert_min) / grid_size return core.StructuredGrid(grid_size, spacing=spacing, origin=vert_min, device=grid_device, dtype=torch.float32, requires_grad=False)
def process_mrti_data(rabbit, out_path): rerun = False if os.path.exists(f'{out_path}/{rabbit}_thermal_vol.nii.gz') and not rerun: print(f'Processing MRTI for {rabbit} ... done') return print(f'Processing MRTI for {rabbit} ... ', end='') data_dir = f'/hdscratch/ucair/AcuteBiomarker/Data/mrti/' files = sorted(glob.glob(f'{data_dir}/*')) mat_file = [x for x in files if rabbit in x][0] mrti_dict = loadmat(mat_file) cs_data = np.transpose(mrti_dict['PosDCS'][:, :, :, 0:3], (2, 1, 0, 3)) mrti_data = np.transpose(mrti_dict['temps'], (2, 1, 0, 3)) ctd_map = np.nan_to_num(np.transpose(mrti_dict['CTD_Map'], (2, 1, 0)), nan=1.0) ctd_map = np.log(ctd_map) origin = np.ascontiguousarray(cs_data[0, 0, 0, ::-1]) thermal_vol = core.StructuredGrid(size=mrti_data.shape[0:3], origin=origin, spacing=torch.tensor([1.0, 1.0, 1.0]), tensor=torch.tensor(mrti_data).permute( 3, 0, 1, 2), channels=mrti_data.shape[-1]) log_ctd_map = core.StructuredGrid( size=mrti_data.shape[0:3], origin=origin, spacing=torch.tensor([1.0, 1.0, 1.0]), tensor=torch.tensor(ctd_map).unsqueeze(0), channels=1) io.SaveITKFile(thermal_vol, f'{out_path}/{rabbit}_thermal_vol.nii.gz') io.SaveITKFile(log_ctd_map, f'{out_path}/{rabbit}_log_ctd_map.nii.gz') print('done')
def _generate_stir_vol(stir_center, stir_space, stir_data): # z_space = [] # for i, c in enumerate(stir_center[1:]): # z_space.append(c[2] - stir_center[i][2]) # z_space = 2.0 # z_slab = np.round(np.array(z_space).mean(), 3) z_slab = 2.0 z_org = np.array(stir_center)[:, 2].min() vol_org = [ stir_center[0][0] - ((stir_data.shape[0] / 2) * stir_space[0]), stir_center[0][1] - ((stir_data.shape[1] / 2) * stir_space[1]), z_org + z_slab / 2.0 ] # Plus or minus? vol_space = [stir_space[0], stir_space[1], z_slab] # stir_vol = core.StructuredGrid( # [stir_data.shape[0], stir_data.shape[1], stir_data.shape[2]], # spacing=vol_space, # origin=vol_org, # tensor=torch.tensor(stir_data).unsqueeze(0), # channels=1 # ) stir_vol = core.StructuredGrid( [stir_data.shape[0], stir_data.shape[2], stir_data.shape[1]], spacing=[vol_space[0], vol_space[2], vol_space[1]], origin=[vol_org[0], vol_org[2], vol_org[1]], tensor=torch.tensor(stir_data.transpose(0, 2, 1)).unsqueeze(0).flip(-2), channels=1) return stir_vol
def exvivo_to_stacked_blocks(rabbit, block, direction, affine_only=False, base_dir='/hdscratch/ucair/'): # This is registered from blocks to exvivo, so phi is needed to bring the exvivo MR image to the block images # Need to determine the grid to sample the MR onto # rabbit_dir = f'/hdscratch/ucair/{rabbit}/blockface/' block_dir = f'{base_dir}{rabbit}/blockface/{block}/' exvivo_dir = f'{base_dir}{rabbit}/mri/exvivo/' # block_list = sorted(glob.glob(f'{rabbit_dir}block*')) # orig_dir = f'/home/sci/blakez/ucair/{rabbit}/rawVolumes/ExVivo_2018-07-26/' # Load the affine # try: # aff = np.loadtxt(f'{exvivo_dir}/surfaces/raw/blocks_to_exvivo_affine.txt') # aff = torch.tensor(aff, device=device, dtype=torch.float32) # except IOError: aff = np.loadtxt(os.path.normpath(f'{block_dir}../recons/surfaces/raw/blocks_to_exvivo_affine.txt')) aff = torch.tensor(aff, device=device, dtype=torch.float32) # if affine_only: # if direction == 'phi': # return aff.inverse() # else: # return aff # Load the deformation # try: # deformation = io.LoadITKFile( # f'{exvivo_dir}/volumes/raw/blocks_{direction}_to_exvivo.mhd', device=device # ) # deformation.set_size((256, 256, 256)) # except RuntimeError: deformation = io.LoadITKFile( os.path.normpath(f'{block_dir}../recons/surfaces/raw/blocks_{direction}_to_exvivo.mhd'), device=device ) deformation.set_size((256, 256, 256)) if affine_only: deformation.set_to_identity_lut_() if direction == 'phi': spacing = [] origin = [] size = [] # if 'block07' in block_path: # hdr = tools.read_mhd_header(f'{block_path}/volumes/raw/difference_volume.mhd') # else: hdr = tools.read_mhd_header(f'{block_dir}/volumes/raw/{block}_phi_inv_stacking.mhd') spacing.append(np.array([float(x) for x in hdr['ElementSpacing'].split(' ')])) origin.append(np.array([float(x) for x in hdr['Offset'].split(' ')])) size.append(np.array([float(x) for x in hdr['DimSize'].split(' ')])) spacing = np.stack(spacing) origin = np.stack(origin) size = np.stack(size) extent = size * spacing + origin aff_grid_size = np.array((512, 512, 512)) aff_grid_origin = np.min(origin, axis=0) aff_grid_spacing = (np.max(extent, axis=0) - aff_grid_origin) / aff_grid_size aff_grid = core.StructuredGrid( size=aff_grid_size.tolist()[::-1], spacing=aff_grid_spacing.tolist()[::-1], origin=aff_grid_origin.tolist()[::-1], device=device, channels=3 ) aff_grid.set_size(size=(512, 512, 512), inplace=True) aff_grid.set_to_identity_lut_() # Apply the FORWARD affine to the deformation a = aff[0:3, 0:3].float() t = aff[-0:3, 3].float() # Create a deformation from the affine that lives in the stacked blocks space aff_grid.data = aff_grid.data.flip(0) aff_grid.data = torch.matmul(a, aff_grid.data.permute(list(range(1, 3 + 1)) + [0]).unsqueeze(-1)) aff_grid.data = (aff_grid.data.squeeze() + t).permute([-1] + list(range(0, 3))) aff_grid.data = aff_grid.data.flip(0) # Compose the grids exvivo_to_blocks = so.ComposeGrids.Create(device=device)([aff_grid, deformation]) else: deformation.data = deformation.data.flip(0) # Apply the inverse affine to the grid aff = aff.inverse() a = aff[0:3, 0:3].float() t = aff[-0:3, 3].float() deformation.data = torch.matmul(a.unsqueeze(0).unsqueeze(0), deformation.data.permute(list(range(1, 3 + 1)) + [0]).unsqueeze(-1)) deformation.data = (deformation.data.squeeze() + t).permute([-1] + list(range(0, 3))) # Flip phi_inv back to the way it was deformation.data = deformation.data.flip(0) exvivo_to_blocks = deformation.copy() return exvivo_to_blocks
def block_stacking(rabbit): rabbit_dir = f'/hdscratch2/{rabbit}/blockface/' raw_ext = '/surfaces/raw/' rigid_ext = '/surfaces/rigid/' deform_ext = '/surfaces/deformable/' vol_ext = '/volumes/raw/' stitch_ext = '/surfaces/raw/stitching/deformable/' # Get a list of the blocks block_list = sorted(glob.glob(f'{rabbit_dir}block*')) # for block_path in block_list: # block = block_path.split('/')[-1] # with open(f'{rabbit_dir}{block}{raw_ext}{block}_deformable_config.yaml', 'r') as f: # params = yaml.load(f, Loader=yaml.FullLoader) # params['propagation_sigma'] = [8.0, 8.0, 3.0] # params['currents_sigma'] = [3.0, 1.5] # with open(f'{rabbit_dir}{block}{raw_ext}{block}_deformable_config.yaml', 'w') as f: # yaml.dump(params, f) # Determine the middle block # middle_block = block_list[9] middle_block = block_list[5] foot_blocks = block_list[block_list.index(middle_block):] head_blocks = block_list[:block_list.index(middle_block) + 1][::-1] rerun = True rigid = False # skip_blocks = ['block01', 'block02', 'block03', 'block04', 'block05', 'block06', 'block07', 'block08'] skip_blocks = [] # Go ahead and just delete any deformable surfaces to make sure that everything resets # for block_path in block_list: # filelist = glob.glob(f'{block_path}/surfaces/deformable/*') # for f in filelist: # os.remove(f) if rerun or not os.path.exists( f'{middle_block}{vol_ext}{middle_block.split("/")[-1]}_phi_stacking.mhd' ): mid_block = middle_block.split('/')[-1] # middle = io.LoadITKFile(f'{middle_block}{vol_ext}difference_volume.mhd') middle = io.LoadITKFile( f'{middle_block}{vol_ext}difference_volume.nii.gz') # middle = io.LoadITKFile(f'{middle_block}{vol_ext}{mid_block}_volume.nrrd') middle.set_size((60, 1024, 1024)) deformation = core.StructuredGrid.FromGrid(middle, channels=3) deformation.set_to_identity_lut_() io.SaveITKFile( deformation, f'{middle_block}{vol_ext}{middle_block.split("/")[-1]}_phi_inv_stacking.mhd' ) io.SaveITKFile( deformation, f'{middle_block}{vol_ext}{middle_block.split("/")[-1]}_phi_stacking.mhd' ) affine_tform = torch.eye(4) np.savetxt( f'{rabbit_dir}{mid_block}{raw_ext}{mid_block}_rigid_tform.txt', affine_tform.numpy()) np.savetxt( f'{rabbit_dir}{mid_block}{vol_ext}{mid_block}_rigid_tform.txt', affine_tform.numpy()) # Copy the files from raw to deformable for the middle surface # if os.path.exists(f'{rabbit_dir}{mid_block}{stitch_ext}'): # mid_path = f'{mid_block}{stitch_ext}' # else: mid_path = f'{mid_block}{raw_ext}' files = [ f'{rabbit_dir}{mid_path}{mid_block}_decimate.obj', f'{rabbit_dir}{mid_path}{mid_block}_ext.obj', ] if os.path.exists(f'{rabbit_dir}{mid_path}{mid_block}_foot.obj'): files += [f'{rabbit_dir}{mid_path}{mid_block}_foot.obj'] if os.path.exists(f'{rabbit_dir}{mid_path}{mid_block}_head.obj'): files += [f'{rabbit_dir}{mid_path}{mid_block}_head.obj'] if os.path.exists( f'{rabbit_dir}{mid_path}{mid_block}_foot_support.obj'): files += [f'{rabbit_dir}{mid_path}{mid_block}_foot_support.obj'] if os.path.exists( f'{rabbit_dir}{mid_path}{mid_block}_head_support.obj'): files += [f'{rabbit_dir}{mid_path}{mid_block}_head_support.obj'] out_names = [] out_path = f'{rabbit_dir}{mid_block}{deform_ext}{mid_block}' for path in files: name = path.split('/')[-1].split(f'{mid_block}')[-1].replace( '.', '_deformable.') out_names += [f'{out_path}{name}'] if not os.path.exists(out_path): os.makedirs(out_path) for in_file, out_file in zip(files, out_names): shutil.copy(in_file, out_file) # Loop over the foot blocks for i, block_path in enumerate(foot_blocks, 1): if i == len(foot_blocks): break target_block = block_path.split('/')[-1] source_block = foot_blocks[i].split('/')[-1] if source_block in skip_blocks: continue # if os.path.exists(f'{rabbit_dir}{source_block}{stitch_ext}'): # mid_path = f'{source_block}{stitch_ext}' # else: mid_path = f'{source_block}{raw_ext}' target_surface_path = f'{rabbit_dir}{target_block}{deform_ext}{target_block}_foot_deformable.obj' source_surface_path = f'{rabbit_dir}{mid_path}{source_block}_head.obj' # if os.path.exists(f'{rabbit_dir}{mid_path}{source_block}_head_stitched.obj'): # source_surface_path = f'{rabbit_dir}{source_block}{raw_ext}{source_block}_head_stitched.obj' extras_paths = [ f'{rabbit_dir}{mid_path}{source_block}_decimate.obj', f'{rabbit_dir}{mid_path}{source_block}_ext.obj', ] if i < len(foot_blocks) - 1: extras_paths += [f'{rabbit_dir}{mid_path}{source_block}_foot.obj'] if os.path.exists( f'{rabbit_dir}{mid_path}{source_block}_foot_support.obj'): extras_paths += [ f'{rabbit_dir}{mid_path}{source_block}_foot_support.obj' ] if os.path.exists( f'{rabbit_dir}{source_block}{deform_ext}{source_block}_head_deformable.obj' ) and not rerun: print( f'The deformed surface for {source_block} already exists ... Next block' ) continue try: verts, faces = io.ReadOBJ(target_surface_path) tar_surface = core.TriangleMesh(verts, faces) tar_surface.to_(device) except IOError: print( f'The deformed foot surface for {target_block} was not found ... Next block' ) continue # Need to see if the target needs any support support_block = block_list[block_list.index(block_path) - 1].split('/')[-1] if os.path.exists( f'{rabbit_dir}{support_block}{deform_ext}{support_block}_foot_support_deformable.obj' ): verts, faces = io.ReadOBJ( f'{rabbit_dir}{support_block}{deform_ext}{support_block}_foot_support_deformable.obj' ) tar_surface.add_surface_(verts.to(device=device), faces.to(device=device)) try: verts, faces = io.ReadOBJ(source_surface_path) src_surface = core.TriangleMesh(verts, faces) src_surface.to_(device) src_surface.flip_normals_() except IOError: print( f'The raw head surface for {source_block} was not found ... Next block' ) continue extra_surfaces = [] for path in extras_paths: try: verts, faces = io.ReadOBJ(path) except IOError: extra_name = path.split('/')[-1] print( f'{extra_name} not found as an extra ... removing from list' ) _ = extras_paths.pop(extras_paths.index(path)) continue extra_surfaces += [core.TriangleMesh(verts, faces)] extra_surfaces[-1].to_(device) # Load or create the dictionary for registration try: if rerun: raise IOError with open( f'{rabbit_dir}{source_block}{raw_ext}{source_block}_affine_config.yaml', 'r') as f: params = yaml.load(f, Loader=yaml.FullLoader) except IOError: params = { 'spatial_sigma': [2.0, 0.5], 'affine_lr': 1.0e-08, 'translation_lr': 1.0e-05, 'converge': 1.0, 'rigid_transform': True } print(f'Registering {source_block} to {target_block}:') affine_tform = tools.affine_register( tar_surface.copy(), src_surface.copy(), spatial_sigma=params['spatial_sigma'], affine_lr=params['affine_lr'], translation_lr=params['translation_lr'], rigid=params['rigid_transform'], converge=params['converge'], device=device) # Apply the affine to the source element and the excess aff_tformer = uo.AffineTransformSurface.Create(affine_tform, device=device) aff_src_surface = aff_tformer(src_surface) aff_extra_surface = [] for surface in extra_surfaces: aff_extra_surface += [aff_tformer(surface)] out_path = f'{rabbit_dir}{source_block}{rigid_ext}{source_block}' if not os.path.exists(out_path): os.makedirs(out_path) for extra_path, extra_surface in zip(extras_paths, aff_extra_surface): name = extra_path.split('/')[-1].split( f'{source_block}')[-1].replace('.', '_rigid.') # if '_stitched' in name: # name = name.replace('_stitched', '') if not os.path.exists(f'{out_path}{name}') or rerun: io.WriteOBJ(extra_surface.vertices, extra_surface.indices, f'{out_path}{name}') if rigid: continue # Save out the parameters: with open( f'{rabbit_dir}{source_block}{raw_ext}{source_block}_affine_config.yaml', 'w') as f: yaml.dump(params, f) # Save out all of the affine transformed surfaces and the transformation out_path = f'{rabbit_dir}{source_block}{rigid_ext}{source_block}' # Save the affine in the volumes and in the surfaces location np.savetxt( f'{rabbit_dir}{source_block}{raw_ext}{source_block}_rigid_tform.txt', affine_tform.numpy()) np.savetxt( f'{rabbit_dir}{source_block}{vol_ext}{source_block}_rigid_tform.txt', affine_tform.numpy()) if not os.path.exists(f'{out_path}_head_rigid.obj') or rerun: io.WriteOBJ(aff_src_surface.vertices, aff_src_surface.indices, f'{out_path}_head_rigid.obj') for extra_path, extra_surface in zip(extras_paths, aff_extra_surface): name = extra_path.split('/')[-1].split( f'{source_block}')[-1].replace('.', '_rigid.') if not os.path.exists(f'{out_path}{name}') or rerun: io.WriteOBJ(extra_surface.vertices, extra_surface.indices, f'{out_path}{name}') try: if rerun: raise IOError with open( f'{rabbit_dir}{source_block}{raw_ext}{source_block}_deformable_config.yaml', 'r') as f: params = yaml.load(f, Loader=yaml.FullLoader) except IOError: params = { 'currents_sigma': [3.0, 0.5], 'propagation_sigma': [6.0, 6.0, 3.0], 'deformable_lr': [1.0e-04, 0.5e-04], 'converge': 1.0, 'grid_size': [20, 100, 100], 'niter': 500 } # params = { # 'currents_sigma': [3.0, 1.5], # 'propagation_sigma': [6.0, 6.0, 3.0], # 'deformable_lr': [2.0e-04, 1.0e-04], # 'converge': 1.0, # 'grid_size': [20, 100, 100], # 'niter': 500 # } # Account for some old parameter names - can be deleted later if 'spatial_sigma' in params.keys(): params['currents_sigma'] = params['spatial_sigma'] del params['spatial_sigma'] if 'phi_inv_size' in params.keys(): params['grid_size'] = params['phi_inv_size'] del params['phi_inv_size'] if 'rigid_transform' in params.keys(): del params['rigid_transform'] if 'smoothing_sigma' in params.keys(): params['propagation_sigma'] = params['smoothing_sigma'] del params['smoothing_sigma'] if type(params['deformable_lr']) is not list: params['deformable_lr'] = [params['deformable_lr']] * len( params['spatial_sigma']) # Do the deformable registration def_surface, def_extras, phi, phi_inv = tools.deformable_register( tar_surface.copy(), aff_src_surface.copy(), currents_sigma=params['currents_sigma'], prop_sigma=params['propagation_sigma'], deformable_lr=params['deformable_lr'], converge=params['converge'], grid_size=params['grid_size'], src_excess=aff_extra_surface, accu_forward=True, accu_inverse=True, device=device, grid_device='cuda:1', iters=params['niter']) # Save out the parameters: with open( f'{rabbit_dir}{source_block}{raw_ext}{source_block}_deformable_config.yaml', 'w') as f: yaml.dump(params, f) # Save out all of the deformable transformed surfaces and phi inv io.SaveITKFile( phi_inv, f'{rabbit_dir}{source_block}{vol_ext}{source_block}_phi_inv_stacking.mhd' ) io.SaveITKFile( phi, f'{rabbit_dir}{source_block}{vol_ext}{source_block}_phi_stacking.mhd' ) out_path = f'{rabbit_dir}{source_block}{deform_ext}{source_block}' if not os.path.exists(out_path): os.makedirs(out_path) if not os.path.exists(f'{out_path}_head_deformable.obj') or rerun: io.WriteOBJ(def_surface.vertices, def_surface.indices, f'{out_path}_head_deformable.obj') for extra_path, extra_surface in zip(extras_paths, def_extras): name = extra_path.split('/')[-1].split( f'{source_block}')[-1].replace('.', '_deformable.') # if '_stitched' in name: # name = name.replace('_stitched', '') if not os.path.exists(f'{out_path}{name}') or rerun: io.WriteOBJ(extra_surface.vertices, extra_surface.indices, f'{out_path}{name}') print('Done registering foots blocks to middle block.') # Loop over the head blocks for i, block_path in enumerate(head_blocks, 1): if i == len(head_blocks): break target_block = block_path.split('/')[-1] source_block = head_blocks[i].split('/')[-1] if source_block in skip_blocks: continue if os.path.exists(f'{rabbit_dir}{source_block}{stitch_ext}'): mid_path = f'{source_block}{stitch_ext}' else: mid_path = f'{source_block}{raw_ext}' target_surface_path = f'{rabbit_dir}{target_block}{deform_ext}{target_block}_head_deformable.obj' source_surface_path = f'{rabbit_dir}{mid_path}{source_block}_foot.obj' if os.path.exists( f'{rabbit_dir}{mid_path}{source_block}_foot_stitched.obj'): source_surface_path = f'{rabbit_dir}{mid_path}{source_block}_foot_stitched.obj' extras_paths = [ f'{rabbit_dir}{mid_path}{source_block}_decimate.obj', f'{rabbit_dir}{mid_path}{source_block}_ext.obj' ] if i < len(head_blocks) - 1: extras_paths += [f'{rabbit_dir}{mid_path}{source_block}_head.obj'] if os.path.exists( f'{rabbit_dir}{mid_path}{source_block}_head_support.obj'): extras_paths += [ f'{rabbit_dir}{mid_path}{source_block}_head_support.obj' ] if os.path.exists( f'{rabbit_dir}{source_block}{deform_ext}{source_block}_foot_deformable.obj' ) and not rerun: print( f'The deformed surface for {source_block} already exists ... Next block' ) continue try: verts, faces = io.ReadOBJ(target_surface_path) tar_surface = core.TriangleMesh(verts, faces) tar_surface.to_(device) except IOError: print( f'The deformed foot surface for {target_block} was not found ... Next block' ) continue # Need to see if the target needs any support support_block = block_list[block_list.index(block_path) + 1].split('/')[-1] if os.path.exists( f'{rabbit_dir}{support_block}{deform_ext}{support_block}_head_support_deformable.obj' ): verts, faces = io.ReadOBJ( f'{rabbit_dir}{support_block}{deform_ext}{support_block}_head_support_deformable.obj' ) tar_surface.add_surface_(verts.to(device=device), faces.to(device=device)) try: verts, faces = io.ReadOBJ(source_surface_path) src_surface = core.TriangleMesh(verts, faces) src_surface.to_(device) src_surface.flip_normals_() except IOError: print( f'The raw foot surface for {source_block} was not found ... Next block' ) continue extra_surfaces = [] for path in extras_paths: try: verts, faces = io.ReadOBJ(path) except IOError: extra_name = path.split('/')[-1] print( f'{extra_name} not found as an extra ... removing from list' ) _ = extras_paths.pop(extras_paths.index(path)) continue extra_surfaces += [core.TriangleMesh(verts, faces)] extra_surfaces[-1].to_(device) # Load or create the dictionary for registration try: if rerun: raise IOError with open( f'{rabbit_dir}{source_block}{raw_ext}{source_block}_affine_config.yaml', 'r') as f: params = yaml.load(f, Loader=yaml.FullLoader) except IOError: params = { 'spatial_sigma': [2.0, 0.5], 'affine_lr': 1.0e-08, 'translation_lr': 1.0e-05, 'converge': 1.0, 'rigid_transform': True } print(f'Registering {source_block} to {target_block}:') affine_tform = tools.affine_register( tar_surface.copy(), src_surface.copy(), spatial_sigma=params['spatial_sigma'], affine_lr=params['affine_lr'], translation_lr=params['translation_lr'], rigid=params['rigid_transform'], converge=params['converge'], device=device) # Apply the affine to the source element and the excess aff_tformer = uo.AffineTransformSurface.Create(affine_tform, device=device) aff_src_surface = aff_tformer(src_surface) aff_extra_surface = [] for surface in extra_surfaces: aff_extra_surface += [aff_tformer(surface)] out_path = f'{rabbit_dir}{source_block}{rigid_ext}{source_block}' if not os.path.exists(out_path): os.makedirs(out_path) for extra_path, extra_surface in zip(extras_paths, aff_extra_surface): name = extra_path.split('/')[-1].split( f'{source_block}')[-1].replace('.', '_rigid.') if '_stitched' in name: name = name.replace('_stitched', '') if not os.path.exists(f'{out_path}{name}') or rerun: io.WriteOBJ(extra_surface.vertices, extra_surface.indices, f'{out_path}{name}') if rigid: continue # Save out the parameters: with open( f'{rabbit_dir}{source_block}{raw_ext}{source_block}_affine_config.yaml', 'w') as f: yaml.dump(params, f) # Save out all of the affine transformed surfaces and the transformation out_path = f'{rabbit_dir}{source_block}{rigid_ext}{source_block}' # Save the affine in the volumes and in the surfaces location np.savetxt( f'{rabbit_dir}{source_block}{raw_ext}{source_block}_rigid_tform.txt', affine_tform.numpy()) np.savetxt( f'{rabbit_dir}{source_block}{vol_ext}{source_block}_rigid_tform.txt', affine_tform.numpy()) if not os.path.exists(f'{out_path}_foot_rigid.obj') or rerun: io.WriteOBJ(aff_src_surface.vertices, aff_src_surface.indices, f'{out_path}_foot_rigid.obj') for extra_path, extra_surface in zip(extras_paths, aff_extra_surface): name = extra_path.split('/')[-1].split( f'{source_block}')[-1].replace('.', '_rigid.') if not os.path.exists(f'{out_path}{name}') or rerun: io.WriteOBJ(extra_surface.vertices, extra_surface.indices, f'{out_path}{name}') try: if rerun: raise IOError with open( f'{rabbit_dir}{source_block}{raw_ext}{source_block}_deformable_config.yaml', 'r') as f: params = yaml.load(f, Loader=yaml.FullLoader) except IOError: params = { 'currents_sigma': [3.0, 0.5], 'propagation_sigma': [6.0, 6.0, 3.0], 'deformable_lr': [1.0e-04, 0.5e-04], 'converge': 1.0, 'grid_size': [20, 100, 100], 'niter': 500 } # if 'spatial_sigma' in params.keys(): # params['currents_sigma'] = params['spatial_sigma'] # del params['spatial_sigma'] # if 'phi_inv_size' in params.keys(): # params['grid_size'] = params['phi_inv_size'] # del params['phi_inv_size'] # if 'rigid_transform' in params.keys(): # del params['rigid_transform'] # if 'smoothing_sigma' in params.keys(): # params['propagation_sigma'] = params['smoothing_sigma'] # del params['smoothing_sigma'] # # if type(params['deformable_lr']) is not list: # params['deformable_lr'] = [params['deformable_lr']] * len(params['spatial_sigma']) # Do the deformable registration def_surface, def_extras, phi, phi_inv = tools.deformable_register( tar_surface.copy(), aff_src_surface.copy(), currents_sigma=params['currents_sigma'], prop_sigma=params['propagation_sigma'], deformable_lr=params['deformable_lr'], converge=params['converge'], grid_size=params['grid_size'], src_excess=aff_extra_surface, accu_forward=True, accu_inverse=True, device=device, grid_device='cuda:1', iters=params['niter']) # Save out the parameters: with open( f'{rabbit_dir}{source_block}{raw_ext}{source_block}_deformable_config.yaml', 'w') as f: yaml.dump(params, f) # Save out all of the deformable transformed surfaces and phi inv io.SaveITKFile( phi_inv, f'{rabbit_dir}{source_block}{vol_ext}{source_block}_phi_inv_stacking.mhd' ) io.SaveITKFile( phi, f'{rabbit_dir}{source_block}{vol_ext}{source_block}_phi_stacking.mhd' ) out_path = f'{rabbit_dir}{source_block}{deform_ext}{source_block}' if not os.path.exists(out_path): os.makedirs(out_path) if not os.path.exists(f'{out_path}_foot_deformable.obj') or rerun: io.WriteOBJ(def_surface.vertices, def_surface.indices, f'{out_path}_foot_deformable.obj') for extra_path, extra_surface in zip(extras_paths, def_extras): name = extra_path.split('/')[-1].split( f'{source_block}')[-1].replace('.', '_deformable.') # if '_stitched' in name: # name = name.replace('_stitched', '') if not os.path.exists(f'{out_path}{name}') or rerun: io.WriteOBJ(extra_surface.vertices, extra_surface.indices, f'{out_path}{name}') print('Done registering head blocks to middle block.')
def exvivo_to_blocks(stacked_blocks_dir): # This is registered from blocks to exvivo, so phi is needed to bring the exvivo MR image to the block images # Need to determine the grid to sample the MR onto rabbit_dir = f'/hdscratch/ucair/{rabbit}/blockface/' block_list = sorted(glob.glob(f'{rabbit_dir}block*')) orig_dir = f'/home/sci/blakez/ucair/{rabbit}/rawVolumes/ExVivo_2018-07-26/' spacing = [] origin = [] size = [] for block_path in block_list: if 'block07' in block_path: hdr = tools.read_mhd_header(f'{block_path}/volumes/raw/difference_volume.mhd') else: hdr = tools.read_mhd_header(f'{block_path}/volumes/deformable/difference_volume_deformable.mhd') spacing.append(np.array([float(x) for x in hdr['ElementSpacing'].split(' ')])) origin.append(np.array([float(x) for x in hdr['Offset'].split(' ')])) size.append(np.array([float(x) for x in hdr['DimSize'].split(' ')])) spacing = np.stack(spacing) origin = np.stack(origin) size = np.stack(size) extent = size * spacing + origin aff_grid_size = np.array((512, 512, 512)) aff_grid_origin = np.min(origin, axis=0) aff_grid_spacing = (np.max(extent, axis=0) - aff_grid_origin) / aff_grid_size aff_grid = core.StructuredGrid( size=aff_grid_size.tolist()[::-1], spacing=aff_grid_spacing.tolist()[::-1], origin=aff_grid_origin.tolist()[::-1], device=device, channels=3 ) aff_grid.set_size(size=(512, 512, 512), inplace=True) aff_grid.set_to_identity_lut_() # Load the affine aff = np.loadtxt(f'{orig_dir}blocks_to_exvivo_affine.txt') aff = torch.tensor(aff, device=device, dtype=torch.float32) # Apply the FORWARD affine to the deformation # aff = aff.inverse() a = aff[0:3, 0:3].float() t = aff[-0:3, 3].float() # Create a deformation from the affine that lives in the stacked blocks space aff_grid.data = aff_grid.data.flip(0) aff_grid.data = torch.matmul(a, aff_grid.data.permute(list(range(1, 3 + 1)) + [0]).unsqueeze(-1)) aff_grid.data = (aff_grid.data.squeeze() + t).permute([-1] + list(range(0, 3))) aff_grid.data = aff_grid.data.flip(0) # Load the defromabale transformation phi = io.LoadITKFile( f'{orig_dir}blocks_phi_to_exvivo.mhd', device=device ) phi.set_size((256, 256, 256)) # Compose the grids deformation = so.ComposeGrids.Create(device=device)([aff_grid, phi]) return deformation
def corrections(rabbit_dir, block, corr_num): block_path = f'{rabbit_dir}{block}/' image_list = sorted(glob.glob(f'{block_path}/images/filtered/*')) with open(f'{block_path}/volumes/raw/{block}_affine_dictionary.yaml', 'r') as f: Adict = yaml.load(f, Loader=yaml.FullLoader) surface_list = [x for x in image_list if 'surface' in x] scatter_list = [x for x in image_list if 'scatter' in x] # Load the surface image ImSurface_good = io.LoadITKFile(surface_list[corr_num - 1], device=device) ImSurface_good.set_spacing_(Adict['spacing']) ImSurface_good.set_origin_( -1 * (ImSurface_good.size * ImSurface_good.spacing) / 2) ImSurface_good /= 255.0 ImSurface_bad = io.LoadITKFile(surface_list[corr_num], device=device) ImSurface_bad.set_spacing_(Adict['spacing']) ImSurface_bad.set_origin_(-1 * (ImSurface_bad.size * ImSurface_bad.spacing) / 2) ImSurface_bad /= 255.0 # Apply the affines to both of the images aff_filter = so.AffineTransform.Create(affine=torch.tensor(Adict[corr_num - 1], device=device), device=device) ImSurface_good = aff_filter(ImSurface_good) aff_filter = so.AffineTransform.Create(affine=torch.tensor(Adict[corr_num], device=device), device=device) ImSurface_bad = aff_filter(ImSurface_bad) points = torch.tensor(tools.LandmarkPicker( [ImSurface_bad[0].squeeze().cpu(), ImSurface_good[0].squeeze().cpu()]), dtype=torch.float32, device=device).permute(1, 0, 2) # Change to real coordinates points *= torch.cat([ ImSurface_good.spacing[None, None, :], ImSurface_good.spacing[None, None, :] ], 0) points += torch.cat([ ImSurface_good.origin[None, None, :], ImSurface_good.origin[None, None, :] ], 0) aff_filter = so.AffineTransform.Create(points[1], points[0], device=device) corr_affine = torch.eye(3, device=device, dtype=torch.float32) corr_affine[0:2, 0:2] = aff_filter.affine corr_affine[0:2, 2] = aff_filter.translation ImPrev = ImSurface_good.copy() for scat, surf in zip(scatter_list[corr_num:], surface_list[corr_num:]): print(f'Registering {scat.split("/")[-1]} .... ') sys.stdout.flush() image_num = scat.split('/')[-1].split('_')[1] # Get the number the file is from the start dist = scatter_list.index(scat) # Load the next image ImScatter = io.LoadITKFile(scat, device=device) ImScatter.set_spacing_(ImPrev.spacing) ImScatter.set_origin_(ImPrev.origin) ImScatter /= 255.0 ImSurface = io.LoadITKFile(surf, device=device) ImSurface.set_spacing_(ImPrev.spacing) ImSurface.set_origin_(ImPrev.origin) ImSurface /= 255.0 difference = ImScatter - ImSurface ImDifference = core.StructuredGrid( ImSurface.shape()[1:], tensor=difference.data[2].unsqueeze(0), spacing=ImSurface.spacing, origin=ImSurface.origin, device=device, dtype=torch.float32, channels=1) affine = torch.mm(corr_affine, torch.tensor(Adict[dist], device=device)) # Save out the images aff_filter = so.AffineTransform.Create(affine=affine, device=device) aff_scatter = aff_filter(ImScatter) aff_surface = aff_filter(ImSurface) aff_difference = aff_filter(ImDifference) # difference = (difference - difference.min()) / (difference.max() - difference.min()) io.SaveITKFile( aff_scatter, f'{block_path}/volumes/raw/scatter/IMG_{image_num}_scatter.mhd') io.SaveITKFile( aff_surface, f'{block_path}/volumes/raw/surface/IMG_{image_num}_surface.mhd') io.SaveITKFile( aff_difference, f'{block_path}/volumes/raw/difference/IMG_{image_num}_difference.mhd' ) Adict[dist] = affine.detach().cpu().clone().tolist() ImPrev = aff_scatter.copy() with open(f'{block_path}/volumes/raw/{block}_affine_dictionary.yaml', 'w') as f: yaml.dump(Adict, f)
def SolveAffineGrid(source_image, input_affine): '''Takes a source volume and an affine matrix and solves for the necessary grid in the target space of the affine. Essentially calculates the bounding box of the source image after apply the affine transformation source_image = volume to be transformed by the affine (Image3D type) input_affine = the affine transformation (4x4) Returns the grid for the transformed source volume in real coordinates''' # Make sure we don't mess with the incoming affine affine = np.copy(input_affine) # Get parameters from the incoming source image in_sz = source_image.size.tolist() in_sp = source_image.spacing.tolist() in_or = source_image.origin.tolist() # Extract the pure rotation and ignore scaling to find the final size of the volume U, s, V = np.linalg.svd(affine[0:3, 0:3]) rotMat = np.eye(4) rotMat[0:3, 0:3] = np.dot(U, V) # Get the corners of the volume in index coordinates inputCorners = np.array([0, 0, 0, 1]) inputCorners = np.vstack((inputCorners, np.array([in_sz[0], 0, 0, 1]))) inputCorners = np.vstack((inputCorners, np.array([0, in_sz[1], 0, 1]))) inputCorners = np.vstack((inputCorners, np.array([in_sz[0], in_sz[1], 0, 1]))) # Account for the case that the source image is 3D if len(source_image.size) == 3: inputCorners = np.vstack((inputCorners, np.array([0, 0, in_sz[2], 1]))) inputCorners = np.vstack((inputCorners, np.array([in_sz[0], 0, in_sz[2], 1]))) inputCorners = np.vstack((inputCorners, np.array([0, in_sz[1], in_sz[2], 1]))) inputCorners = np.vstack((inputCorners, np.array([in_sz[0], in_sz[1], in_sz[2], 1]))) # Define the index corners to find the final size of the transformed volume indexCorners = np.matrix(inputCorners) # Find the real corners of the input volume for finding the output origin realCorners = np.matrix(np.multiply(inputCorners, np.array(in_sp + [1])) + np.array(in_or + [0])) # Apply the transformations to the real and index corners # Need to subtract the mean and apply around the center # Translation is not going to effect the index size, but will effect the origin of the block for real coordinates outRealCorners = np.matmul(affine, realCorners.transpose()) outIndxCorners = np.matmul(rotMat, indexCorners.transpose()) # Find the size in real and index coordinates of the output volume realSize = (np.max(outRealCorners, 1) - np.min(outRealCorners, 1))[0:3] indexSize = (np.max(outIndxCorners, 1) - np.min(outIndxCorners, 1))[0:3] # The index size is the size of the output volume out_sz = np.squeeze(np.array(np.ceil(indexSize).astype(int))) out_sz[out_sz == 0] = 1 # We can divide index size into real size to get the spacing of the real volume; Need to account for 2D zero out_sp = np.squeeze(np.array(np.divide(realSize, indexSize, where=indexSize != 0))) out_sp[out_sp == 0] = 1 # Find the output origin by taking the min in each dimension of the real transformed corners out_or = np.squeeze(np.array(np.min(outRealCorners, 1)))[0:3] # Make the output structured grid out_tensor = core.StructuredGrid( size=out_sz.copy(), origin=out_or.copy(), spacing=out_sp.copy(), device=source_image.device, channels=1 ) return out_tensor
def front_face_stacking(rabbit, base_dir='/hdscratch/ucair/'): rabbit_dir = f'{base_dir}{rabbit}/blockface/' raw_ext = '/surfaces/raw/' # rigid_ext = '/surfaces/rigid/' ff_ext = '/surfaces/frontface/' deform_ext = '/surfaces/deformable/' vol_ext = '/volumes/raw/' stitch_ext = '/surfaces/raw/stitching/deformable/' # Get a list of the blocks block_list = sorted(glob.glob(f'{rabbit_dir}block*')) # Determine the middle block middle_block = block_list[4] foot_blocks = block_list[block_list.index(middle_block):] head_blocks = block_list[:block_list.index(middle_block) + 1][::-1] rerun = True # skip_blocks = ['block05', 'block06', 'block07', 'block08', 'block09', 'block10', 'block11', 'block12'] skip_blocks = [] if rerun: mid_block = middle_block.split('/')[-1] if not os.path.exists(f'{rabbit_dir}{mid_block}{ff_ext}'): os.makedirs(f'{rabbit_dir}{mid_block}{ff_ext}') affine_tform = torch.eye(4) np.savetxt( f'{rabbit_dir}{mid_block}{raw_ext}{mid_block}_front_face_tform.txt', affine_tform.numpy()) # Copy the files from raw to deformable for the middle surface if os.path.exists(f'{rabbit_dir}{mid_block}{stitch_ext}'): mid_path = f'{mid_block}{stitch_ext}' else: mid_path = f'{mid_block}{raw_ext}' files = [ f'{rabbit_dir}{mid_path}{mid_block}_decimate.obj', f'{rabbit_dir}{mid_path}{mid_block}_ext.obj', ] if os.path.exists(f'{rabbit_dir}{mid_path}{mid_block}_foot.obj'): files += [f'{rabbit_dir}{mid_path}{mid_block}_foot.obj'] if os.path.exists(f'{rabbit_dir}{mid_path}{mid_block}_head.obj'): files += [f'{rabbit_dir}{mid_path}{mid_block}_head.obj'] if os.path.exists( f'{rabbit_dir}{mid_path}{mid_block}_foot_support.obj'): files += [f'{rabbit_dir}{mid_path}{mid_block}_foot_support.obj'] if os.path.exists( f'{rabbit_dir}{mid_path}{mid_block}_head_support.obj'): files += [f'{rabbit_dir}{mid_path}{mid_block}_head_support.obj'] out_names = [] for path in files: name = path.split('/')[-1].split(f'{mid_block}')[-1].replace( '.', '_front_face.') out_path = f'{rabbit_dir}{mid_block}{ff_ext}{mid_block}' out_names += [f'{out_path}{name}'] for in_file, out_file in zip(files, out_names): shutil.copy(in_file, out_file) # Loop over the foot blocks for i, block_path in enumerate(foot_blocks, 1): if i == len(foot_blocks): break target_block = block_path.split('/')[-1] source_block = foot_blocks[i].split('/')[-1] if source_block in skip_blocks: continue if os.path.exists(f'{rabbit_dir}{source_block}{stitch_ext}'): mid_path = f'{source_block}{stitch_ext}' else: mid_path = f'{source_block}{raw_ext}' target_surface_path = f'{rabbit_dir}{target_block}{ff_ext}{target_block}_foot_front_face.obj' source_surface_path = f'{rabbit_dir}{mid_path}{source_block}_head.obj' extras_paths = [ f'{rabbit_dir}{mid_path}{source_block}_decimate.obj', f'{rabbit_dir}{mid_path}{source_block}_ext.obj', ] if i < len(foot_blocks) - 1: extras_paths += [f'{rabbit_dir}{mid_path}{source_block}_foot.obj'] if os.path.exists( f'{rabbit_dir}{mid_path}{source_block}_foot_support.obj'): extras_paths += [ f'{rabbit_dir}{mid_path}{source_block}_foot_support.obj' ] if os.path.exists( f'{rabbit_dir}{source_block}{ff_ext}{source_block}_head_front_face.obj' ) and not rerun: print( f'The front face surface for {source_block} already exists ... Next block' ) continue try: verts, faces = io.ReadOBJ(target_surface_path) tar_surface = core.TriangleMesh(verts, faces) tar_surface.to_(device) except IOError: print( f'The front face foot surface for {target_block} was not found ... Next block' ) continue # Need to see if the target needs any support support_block = block_list[block_list.index(block_path) - 1].split('/')[-1] if os.path.exists( f'{rabbit_dir}{support_block}{ff_ext}{support_block}_foot_support_front_face.obj' ): verts, faces = io.ReadOBJ( f'{rabbit_dir}{support_block}{ff_ext}{support_block}_foot_support_front_face.obj' ) tar_surface.add_surface_(verts.to(device=device), faces.to(device=device)) try: verts, faces = io.ReadOBJ(source_surface_path) src_surface = core.TriangleMesh(verts, faces) src_surface.to_(device) src_surface.flip_normals_() except IOError: print( f'The raw head surface for {source_block} was not found ... Next block' ) continue extra_surfaces = [] for path in extras_paths: try: verts, faces = io.ReadOBJ(path) except IOError: extra_name = path.split('/')[-1] print( f'{extra_name} not found as an extra ... removing from list' ) _ = extras_paths.pop(extras_paths.index(path)) continue extra_surfaces += [core.TriangleMesh(verts, faces)] extra_surfaces[-1].to_(device) # Load or create the dictionary for registration try: with open( f'{rabbit_dir}{source_block}{raw_ext}{source_block}_front_face_config.yaml', 'r') as f: params = yaml.load(f, Loader=yaml.FullLoader) except IOError: params = { 'spatial_sigma': [2.0, 1.0], 'affine_lr': 1.0e-06, 'translation_lr': 1.0e-04, 'converge': 0.01, 'rigid_transform': True } print(f'Registering {source_block} to {target_block}:') affine_tform = front_face_register( tar_surface.copy(), src_surface.copy(), spatial_sigma=params['spatial_sigma'], affine_lr=params['affine_lr'], translation_lr=params['translation_lr'], converge=params['converge'], device=device) # Apply the affine to the source element and the excess aff_tformer = uo.AffineTransformSurface.Create(affine_tform, device=device) aff_src_surface = aff_tformer(src_surface) aff_extra_surface = [] for surface in extra_surfaces: aff_extra_surface += [aff_tformer(surface)] out_path = f'{rabbit_dir}{source_block}{ff_ext}{source_block}' if not os.path.exists(f'{rabbit_dir}{source_block}{ff_ext}'): os.makedirs(f'{rabbit_dir}{source_block}{ff_ext}') if not os.path.exists(f'{out_path}_head_front_face.obj') or rerun: io.WriteOBJ(aff_src_surface.vertices, aff_src_surface.indices, f'{out_path}_head_front_face.obj') for extra_path, extra_surface in zip(extras_paths, aff_extra_surface): name = extra_path.split('/')[-1].split( f'{source_block}')[-1].replace('.', '_front_face.') if not os.path.exists(f'{out_path}{name}') or rerun: io.WriteOBJ(extra_surface.vertices, extra_surface.indices, f'{out_path}{name}') # Save out the parameters: with open( f'{rabbit_dir}{source_block}{raw_ext}{source_block}_front_face_config.yaml', 'w') as f: yaml.dump(params, f) # Save the affine in the volumes and in the surfaces location np.savetxt( f'{rabbit_dir}{source_block}{raw_ext}{source_block}_front_face_tform.txt', affine_tform.numpy()) print('Done registering foots blocks to middle block.') # Loop over the head blocks for i, block_path in enumerate(head_blocks, 1): if i == len(head_blocks): break target_block = block_path.split('/')[-1] source_block = head_blocks[i].split('/')[-1] if source_block in skip_blocks: continue if os.path.exists(f'{rabbit_dir}{source_block}{stitch_ext}'): mid_path = f'{source_block}{stitch_ext}' else: mid_path = f'{source_block}{raw_ext}' target_surface_path = f'{rabbit_dir}{target_block}{ff_ext}{target_block}_head_front_face.obj' source_surface_path = f'{rabbit_dir}{mid_path}{source_block}_foot.obj' extras_paths = [ f'{rabbit_dir}{mid_path}{source_block}_decimate.obj', f'{rabbit_dir}{mid_path}{source_block}_ext.obj' ] if i < len(head_blocks) - 1: extras_paths += [f'{rabbit_dir}{mid_path}{source_block}_head.obj'] if os.path.exists( f'{rabbit_dir}{mid_path}{source_block}_head_support.obj'): extras_paths += [ f'{rabbit_dir}{mid_path}{source_block}_head_support.obj' ] if os.path.exists( f'{rabbit_dir}{source_block}{ff_ext}{source_block}_foot_front_face.obj' ) and not rerun: print( f'The front face surface for {source_block} already exists ... Next block' ) continue try: verts, faces = io.ReadOBJ(target_surface_path) tar_surface = core.TriangleMesh(verts, faces) tar_surface.to_(device) except IOError: print( f'The front face foot surface for {target_block} was not found ... Next block' ) continue # Need to see if the target needs any support support_block = block_list[block_list.index(block_path) + 1].split('/')[-1] if os.path.exists( f'{rabbit_dir}{support_block}{ff_ext}{support_block}_head_support_front_face.obj' ): verts, faces = io.ReadOBJ( f'{rabbit_dir}{support_block}{ff_ext}{support_block}_head_support_front_face.obj' ) tar_surface.add_surface_(verts.to(device=device), faces.to(device=device)) try: verts, faces = io.ReadOBJ(source_surface_path) src_surface = core.TriangleMesh(verts, faces) src_surface.to_(device) src_surface.flip_normals_() except IOError: print( f'The raw foot surface for {source_block} was not found ... Next block' ) continue extra_surfaces = [] for path in extras_paths: try: verts, faces = io.ReadOBJ(path) except IOError: extra_name = path.split('/')[-1] print( f'{extra_name} not found as an extra ... removing from list' ) _ = extras_paths.pop(extras_paths.index(path)) continue extra_surfaces += [core.TriangleMesh(verts, faces)] extra_surfaces[-1].to_(device) # Load or create the dictionary for registration try: with open( f'{rabbit_dir}{source_block}{raw_ext}{source_block}_front_face_config.yaml', 'r') as f: params = yaml.load(f, Loader=yaml.FullLoader) except IOError: params = { 'spatial_sigma': [2.0, 1.0], 'affine_lr': 1.0e-06, 'translation_lr': 1.0e-04, 'converge': 0.01, 'rigid_transform': True } print(f'Registering {source_block} to {target_block}:') affine_tform = front_face_register( tar_surface.copy(), src_surface.copy(), spatial_sigma=params['spatial_sigma'], affine_lr=params['affine_lr'], translation_lr=params['translation_lr'], converge=params['converge'], device=device) # Apply the affine to the source element and the excess aff_tformer = uo.AffineTransformSurface.Create(affine_tform, device=device) aff_src_surface = aff_tformer(src_surface) aff_extra_surface = [] for surface in extra_surfaces: aff_extra_surface += [aff_tformer(surface)] out_path = f'{rabbit_dir}{source_block}{ff_ext}{source_block}' if not os.path.exists(f'{rabbit_dir}{source_block}{ff_ext}'): os.makedirs(f'{rabbit_dir}{source_block}{ff_ext}') if not os.path.exists(f'{out_path}_foot_front_face.obj') or rerun: io.WriteOBJ(aff_src_surface.vertices, aff_src_surface.indices, f'{out_path}_foot_front_face.obj') for extra_path, extra_surface in zip(extras_paths, aff_extra_surface): name = extra_path.split('/')[-1].split( f'{source_block}')[-1].replace('.', '_front_face.') if not os.path.exists(f'{out_path}{name}') or rerun: io.WriteOBJ(extra_surface.vertices, extra_surface.indices, f'{out_path}{name}') # Save out the parameters: with open( f'{rabbit_dir}{source_block}{raw_ext}{source_block}_front_face_config.yaml', 'w') as f: yaml.dump(params, f) # Save the affine in the volumes and in the surfaces location np.savetxt( f'{rabbit_dir}{source_block}{raw_ext}{source_block}_front_face_tform.txt', affine_tform.numpy()) print('Done registering head blocks to middle block.')
def process_mic(rabbit): raw_mic_dir = f'/hdscratch/ucair/{rabbit}/microscopic/' bf_dir = f'/hdscratch/ucair/{rabbit}/blockface/' raw_bf_dir = f'/hdscratch/ucair/blockface/{rabbit}/' block_list = sorted(glob.glob(f'{raw_mic_dir}/*')) # for block_path in block_list: # block = block_path.split('/')[-1] # # mic_list = sorted(glob.glob(f'{block_path}/raw/*_image.tif')) # # img_nums = [x.split('/')[-1].split('_')[1] for x in mic_list] # # # Load the image # for img in img_nums: # print(f'Processing {block}, {img} ... ', end='') # mic_file = f'{raw_mic_dir}{block}/hdf5/{block}_img{img}_image.hdf5' # # mic = io.LoadITKFile(f'{raw_mic_dir}{block}/raw/IMG_{img}_histopathology_image.tif') # # with h5py.File(mic_file, 'w') as f: # g = f.create_group('RawImage') # g.create_dataset('ImageData', data=mic.data.numpy()) # # print('Done') for block_path in block_list: block = block_path.split('/')[-1] mic_list = sorted(glob.glob(f'{block_path}/raw/*_image.tif')) img_nums = [x.split('/')[-1].split('_')[1] for x in mic_list] for img in img_nums: mic_file = f'{raw_mic_dir}{block}/hdf5/{block}_img{img}_image.hdf5' mic_seg = f'{raw_mic_dir}{block}/hdf5/{block}_img{img}_label.hdf5' blockface_image = f'{bf_dir}{block}/volumes/raw/difference/IMG_{img}_difference.mhd' blockface_label = f'{bf_dir}{block}/volumes/raw/segmentation/IMG_{img}_segmentation.mhd' meta_dict = {} with h5py.File(mic_file, 'r') as f: for key in f['RawImage'].attrs: meta_dict[key] = f['RawImage'].attrs[key] if 'Affine' in meta_dict.keys(): continue blockface = io.LoadITKFile(blockface_image, device=device) label_data = io.LoadITKFile(blockface_label, device=device) blockface = (blockface - blockface.min()) / (blockface.max() - blockface.min()) label = blockface.clone() label.data = label_data.data.clone() print(f'Affine Registering ... ') aff_mic, aff_mic_seg, affine = tools.process_mic(mic_file, mic_seg, blockface, label, device=device) print(f'Done') aff_mic *= aff_mic_seg blockface *= label blockface_s = core.StructuredGrid.FromGrid( blockface, tensor=blockface[0].unsqueeze(0), channels=1) aff_mic = (aff_mic - aff_mic.min()) / (aff_mic.max() - aff_mic.min()) print(f'Deformable Registering Labels ... ') def_label, label_deformation = match_bf_mic(aff_mic_seg, label, steps=[0.01, 0.005], scales=[4, 1], gauss=True) print(f'Done') label_def_mic = so.ApplyGrid(label_deformation, device=device)(aff_mic, label_deformation) print(f'Deformable Registering Images ... ') def_image, image_deformation = match_bf_mic( label_def_mic, blockface_s, steps=[0.01, 0.01], scales=[2, 1], ) print(f'Done') composer = so.ComposeGrids(device=device, dtype=torch.float32, padding_mode='border') deformation = composer([image_deformation, label_deformation]) def_mic = so.ApplyGrid(deformation, device=device)(aff_mic, deformation) try: with h5py.File(mic_file, 'r') as f: mic = f['ImageData'][:] except KeyError: with h5py.File(mic_file, 'r') as f: mic = f['RawImage/ImageData'][:] mic = core.StructuredGrid(mic.shape[1:], tensor=torch.tensor(mic, dtype=torch.float32, device=device), device=device, dtype=torch.float32, channels=3) print('Saving ... ') with h5py.File(mic_file, 'w') as f: g = f.create_group('RawImage') d = f.create_group('Deformation') g.create_dataset('ImageData', data=mic.data.cpu().numpy()) d.create_dataset('Phi', data=deformation.data.cpu().numpy()) g.attrs['Shape'] = list(mic.shape()) g.attrs['Spacing'] = mic.spacing.tolist() g.attrs['Origin'] = mic.origin.tolist() g.attrs['Affine'] = affine.tolist() d.attrs['Shape'] = list(deformation.shape()) d.attrs['Spacing'] = deformation.spacing.tolist() d.attrs['Origin'] = deformation.origin.tolist() io.SaveITKFile( def_mic, f'{raw_mic_dir}{block}/volume/images/IMG_{img}_def_histopathology.mhd' ) print('Done') plt.close('all') print('All Done')
def tempRecon(Obj, opt): if not os.path.exists(f'{Obj.rabbitDirectory}/rawVolumes/TemperatureRecon/'): os.makedirs(f'{Obj.rabbitDirectory}/rawVolumes/TemperatureRecon/') size, orgn = _convert_mat_files(Obj) unionOrigin = np.min(orgn, 0).tolist() adj = np.abs(np.max(orgn, 0) - np.min(orgn, 0)) unionSize = (np.max(size, 0) + adj).tolist() unionSize = [int(x) for x in unionSize] # We are going to create a config file for each registration sonicationList = natural_sort(glob.glob(Obj.rabbitDirectory + '/rawVolumes/TemperatureRecon/*')) # Need to remove the resample from the list sonicationList = [x for x in sonicationList if '_resample' not in x] sonicationList = [x for x in sonicationList if 'RegVols' not in x] lastName = sonicationList[-1].split('/')[-1] preLength = len(lastName.split('_')[0]) lastName = lastName[:preLength] + '_----' + lastName[preLength:] # Set up the Config directory if not os.path.exists(Obj.rabbitDirectory + '/ConfigFiles/temperatureConfigs/'): os.makedirs(Obj.rabbitDirectory + '/ConfigFiles/temperatureConfigs/') # Construct teh common Grid: at this point assuming isotropic spacing of 1 if 'unionSize' in locals(): unionSpacing = [1.0, 1.0, 1.0] unionIm = core.StructuredGrid( size=unionSize, origin=unionOrigin, spacing=unionSpacing, channels=1, device=device ) def_im = unionIm.clone() else: configList = glob.glob(Obj.rabbitDirectory + '/ConfigFiles/temperatureConfigs/*') tmpObj = Config.Load(TempConfigSpec, configList[0]) unionSize = tmpObj.unionSize unionOrigin = tmpObj.unionOrigin unionSpacing = tmpObj.unionSpacing unionIm = core.StructuredGrid( size=unionSize, origin=unionOrigin, spacing=unionSpacing, channels=1, device=device ) def_im = unionIm.clone() # We need to register the last image to the VIBE son = sonicationList[-1] try: regObj = Config.Load(TempConfigSpec, Obj.rabbitDirectory + '/ConfigFiles/temperatureConfigs/{0}_config.yaml'.format( son.split('/')[-1])) except IOError: regObj = Config.SpecToConfig(TempConfigSpec) regObj.Niter = [40] regObj.gaussSigma = [0.1] regObj.gaussKernel = [3] regObj.regWeight = [2.0] regObj.stepSize = [0.004] regObj.I0 = 'None' regObj.I1 = 'None' regObj.unionSize = unionSize regObj.unionSpacing = unionSpacing regObj.unionOrigin = unionOrigin # Need to get the name of the volume outName = son.split('/')[-1] preLength = len(outName.split('_')[0]) outName = outName[:preLength] + '_----' + outName[preLength:] regObj.outputPath = Obj.rabbitDirectory + '/elastVolumes/TemperatureRecon/' + son.split('/')[-1] if not os.path.exists(regObj.outputPath): os.makedirs(regObj.outputPath) plt.close('all') # Get the deformation field that takes the first of the sonication images to the final h_post = ER.MultiscaleElast(regObj, opt) io.SaveITKFile(h_post, regObj.outputPath + '/final_deformation_Is_to_It.mha') rc.WriteConfig(TempConfigSpec, regObj, Obj.rabbitDirectory + '/ConfigFiles/temperatureConfigs/{0}_config.yaml'.format(son.split('/')[-1])) print('Applying elast transformation to {0} volumes .... '.format(son.split('/')[-1]), end='') sys.stdout.flush() for image in glob.glob('{0}/rawVolumes/TemperatureRecon/'.format(Obj.rabbitDirectory) + son.split('/')[-1] + '/*'): # Update the name outName = image.split('/')[-1] outName = outName[:preLength + 4] + 'e' + outName[preLength + 5:] im = io.LoadITKFile(image, device=device) temp = so.ResampleWorld.Create(unionIm, device=device)(im) def_im = so.ApplyGrid.Create(h_post, device=device)(temp) io.SaveITKFile(def_im, regObj.outputPath + '/' + outName) # common.DebugHere() # # Need to resample the final image onto the new grid # for image in glob.glob('{0}/rawVolumes/TemperatureRecon/'.format(Obj.rabbitDirectory) + sonicationList[-1].split('/')[-1] + '/*'): # outputPath = Obj.rabbitDirectory + '/rawVolumes/TemperatureRecon/' + sonicationList[-1].split('/')[-1] + '_resample' # if not os.path.exists(outputPath): # os.makedirs(outputPath) # outName = image.split('/')[-1] # im = common.LoadITKImage(image, ca.MEM_DEVICE) # cc.ResampleWorld(unionIm, im, bg=3) # common.SaveITKImage(unionIm, outputPath + '/' + outName) for son in sonicationList[:-1]: # Create the registration object for elastic try: regObj = Config.Load(TempConfigSpec, Obj.rabbitDirectory + '/ConfigFiles/temperatureConfigs/{0}_config.yaml'.format( son.split('/')[-1])) except IOError: regObj = Config.SpecToConfig(TempConfigSpec) # Need to get the name of the volume outName = son.split('/')[-1] preLength = len(outName.split('_')[0]) outName = outName[:preLength] + '_----' + outName[preLength:] # Always set the target image to the last file - have to get the first mag image! regObj.I0 = son + '/../RegVols/' + outName[0:5] + '.nii.gz' # Do we have to get the file that has been registered to the vibe?? regObj.I1 = sonicationList[-1] + '/../RegVols/' + lastName[0:5] + '.nii.gz' # Set the grid parameters of the unionFOV regObj.unionSize = unionSize regObj.unionSpacing = unionSpacing regObj.unionOrigin = unionOrigin # if sonicationList.index(son) < 8: # regObj.stepSize = [0.000] # else: # regObj.stepSize = [0.004] regObj.outputPath = Obj.rabbitDirectory + '/elastVolumes/TemperatureRecon/' + son.split('/')[-1] if not os.path.exists(regObj.outputPath): os.makedirs(regObj.outputPath) plt.close('all') # Get the deformation field that takes the first of the sonication images to the final h = ER.MultiscaleElast(regObj, opt) # Need to compose the h field to get the full deformation h_comp = so.ComposeGrids.Create(device=device)([h, h_post]) io.SaveITKFile(h_comp, regObj.outputPath + '/final_deformation_Is_to_It.mha') # Write out the config file if not os.path.exists(Obj.rabbitDirectory + '/ConfigFiles/temperatureConfigs'): os.makedirs(Obj.rabbitDirectory + '/ConfigFiles/temperatureConfigs') rc.WriteConfig(TempConfigSpec, regObj, Obj.rabbitDirectory + '/ConfigFiles/temperatureConfigs/{0}_config.yaml'.format( son.split('/')[-1])) # Need to itterate through all the mag and phase images and apply the deformation print('Applying elast transformation to {0} volumes .... '.format(son.split('/')[-1]), end='') sys.stdout.flush() for image in glob.glob( '{0}/rawVolumes/TemperatureRecon/'.format(Obj.rabbitDirectory) + son.split('/')[-1] + '/*'): # Update the name outName = image.split('/')[-1] outName = outName[:preLength + 4] + 'e' + outName[preLength + 5:] im = io.LoadITKFile(image, device=device) temp = so.ResampleWorld.Create(unionIm, device=device)(im) def_im = so.ApplyGrid.Create(h_post, device=device)(temp) io.SaveITKFile(def_im, regObj.outputPath + '/' + outName)
def eval_R1(rabbit_list): def_diff = [] aff_diff = [] print('Eval R1:') for r in rabbit_list: print(f'Processing Rabbit {r} ... ', end='') in_path = f'/hdscratch/ucair/{r}/' if r == '18_047': in_path = f'/hdscratch2/{r}/' block_list = sorted(glob.glob(f'{in_path}/blockface/block*')) for b_path in block_list: b = b_path.split('/')[-1] lm_list = sorted( glob.glob(f'{in_path}/microscopic/{b}/landmarks/R1*')) if not lm_list: continue for lm in lm_list: i = lm.split('_')[6] r_img = io.LoadITKFile( f'{in_path}microscopic/{b}/segmentations/IMG_{i}/img_{i}_red.nii.gz', device=device) def_dir = f'{in_path}microscopic/{b}/deformations/' # Load the affine aff = np.loadtxt( glob.glob(f'{def_dir}img_{i}_affine_to_blockface.txt')[0]) aff = torch.tensor(aff, device=device, dtype=torch.float32) # Load the deformation deformation_data = io.LoadITKFile( f'{def_dir}/img_{i}_deformation_to_blockface.mhd', device=device) deformation = core.StructuredGrid( size=deformation_data.size[0:2], spacing=deformation_data.spacing[1:3], origin=deformation_data.origin[1:3], device=deformation_data.device, tensor=deformation_data.data.squeeze().permute(2, 0, 1), channels=2) # Apply the inverse affine to the grid aff = aff.inverse() a = aff[0:2, 0:2].float() t = aff[-0:2, 2].float() aff_deformation = deformation.clone() aff_deformation.set_to_identity_lut_() deformation.data = torch.matmul( a.unsqueeze(0).unsqueeze(0), deformation.data.permute(list(range(1, 2 + 1)) + [0]).unsqueeze(-1)) deformation.data = (deformation.data.squeeze() + t).permute([-1] + list(range(0, 2))) aff_deformation.data = torch.matmul( a.unsqueeze(0).unsqueeze(0), aff_deformation.data.permute(list(range(1, 2 + 1)) + [0]).unsqueeze(-1)) aff_deformation.data = (aff_deformation.data.squeeze() + t).permute([-1] + list(range(0, 2))) in_bf = f'{in_path}blockface/{b}/landmarks/' bf_lm = [ torch.tensor(np.loadtxt( f'{in_bf}/R1_{r}_{b}_IMG_{i}_blockface_landmarks.txt'), device=device) ] in_mic = f'{in_path}microscopic/{b}/landmarks/' mic_lm = torch.tensor(np.loadtxt( f'{in_mic}/R1_{r}_{b}_IMG_{i}_microscopic_landmarks.txt'), device=device) def_point = sample_points(deformation, bf_lm) aff_point = sample_points(aff_deformation, bf_lm) # idx_def = def_point[0]/ r_img.spacing aff_diff.append((mic_lm - aff_point) * 0.00176) def_diff.append((mic_lm - def_point) * 0.00176) # spacing from histology in mm print('done') def_dist = torch.sqrt((torch.cat(def_diff, 0)**2).sum(-1)) aff_dist = torch.sqrt((torch.cat(aff_diff, 0)**2).sum(-1)) print('Eval R1: Done') return def_dist, aff_dist
def example(): f = box_image( core.StructuredGrid((256, 256), spacing=[1.0, 1.0], device=device)) d = box_image(core.StructuredGrid( (256, 256), spacing=[1.0, 1.0], device=device), break_width=50, val=500.0) + 0.1 m = box_image(core.StructuredGrid((256, 256), spacing=[1.0, 1.0], device=device), break_width=50, val=1.0) f = so.Gaussian.Create(channels=1, kernel_size=5, sigma=2, dim=2, device=device)(f) grads = so.Gradient.Create(dim=2, device=device)(f) * m orig_grads = grads.copy() phi_inv = core.StructuredGrid((256, 256), spacing=[1.0, 1.0], device=device) phi_inv.set_to_identity_lut_() id = phi_inv.copy() for i in range(0, 6): diffuse_x_grad = grads[0].data diffuse_y_grad = diffuse(grads[1].data, d.data.squeeze()) # Scale back up the gradients y_scale = (grads[1].max() - grads[1].min()) / (diffuse_y_grad.max() - diffuse_y_grad.min()) update = core.StructuredGrid.FromGrid( f, tensor=torch.stack((diffuse_x_grad, y_scale * diffuse_y_grad), 0), channels=2) sample = id.clone() + 20.0 * update phi_inv = so.ApplyGrid.Create(sample, pad_mode='border', device=update.device, dtype=update.dtype)(phi_inv) # update the gradients f = so.ApplyGrid.Create(phi_inv, device=device)(f) d = so.ApplyGrid.Create(phi_inv, device=device)(d) m = so.ApplyGrid.Create(phi_inv, device=device)(m) grads = so.Gradient.Create(dim=2, device=device)(f) * m print(f'Iter {i}/5 done...') test = so.ApplyGrid.Create(phi_inv, device=device)(f) plt.figure() plt.imshow(d.cpu()) plt.colorbar() plt.title('Diffusion Coefficient') plt.figure() plt.imshow(imgout.cpu()) plt.colorbar() plt.title('Diffused Gradients') # plt.figure() plt.imshow(grads[1].squeeze().cpu()) plt.colorbar() plt.title('Starting Gradients') # # def_grid = id.copy() # def_grid.set_to_identity_lut_() # def_grid += smooth_grads # # phi_inv = so.ApplyGrid.Create(def_grid, device=device)(phi_inv) # # def_image = so.ApplyGrid.Create(phi_inv, device=device)(f) # # plt.figure() # plt.imshow(def_image.data.squeeze().cpu() - f.data.squeeze().cpu()) # plt.colorbar() # plt.title('Difference Of Images') # # plt.figure() # plt.imshow(def_image.data.squeeze().cpu()) # plt.title('Deformed Image') print('something')
def _convert_mat_files(Obj): rabbitNumber = Obj.rabbitDirectory.split('/')[-1] matList = sorted(glob.glob(f'{Obj.rabbitDirectory}/externalRecons/AblationImaging/*')) # Do some string comparison to make sure that this file is a sonication file matList = [x for x in matList if 'Son' in x] # Have to keep track of the MIN and MAX of each transfor so we can know the bounding box to resample to size = [] orgn = [] if not os.path.exists(os.path.join(Obj.rabbitDirectory + '/rawVolumes/TemperatureRecon/RegVols/')): os.makedirs(os.path.join(Obj.rabbitDirectory + '/rawVolumes/TemperatureRecon/RegVols/')) for tempFile in matList: # Create the output folder for all of the timepoint images outDir = os.path.splitext(tempFile)[0] outName = outDir.split('/')[-1] dirOutName = os.path.join(Obj.rabbitDirectory + '/rawVolumes/TemperatureRecon/', outName) if not os.path.exists(dirOutName): os.makedirs(dirOutName) # Have to add mroe logic here because there are varying lengths of numbers FUN preLength = len(outName.split('_')[0]) outName = outName[:preLength] + '_----' + outName[preLength:] struct = spi.loadmat(tempFile) imData = struct['ims'] # Could put some logic here to make sure that they all have a 4th dimension timePts = np.shape(imData)[-1] # Get transformation and solve for the affine # tform = struct['PosDCS'] # print('min: {0} .... '.format(np.min(np.min(np.min(tform, 0), 0), 0).tolist())) # print('max: {0} .... '.format(np.max(np.max(np.max(tform, 0), 0), 0).tolist())) # dim = np.shape(tform)[0:3] # dim = [x - 1 for x in dim] # print('shape: {0} .... '.format(dim)) # Pull points from the transformation to solve for the affine # landmarks = [] # landmarks.append([[0, 0, 0], tform[0, 0, 0].tolist()]) # landmarks.append([[dim[0], 0, 0], tform[dim[0], 0, 0].tolist()]) # landmarks.append([[0, dim[1], 0], tform[0, dim[1], 0].tolist()]) # landmarks.append([[dim[0], dim[1], 0], tform[dim[0], dim[1], 0].tolist()]) # landmarks.append([[0, 0, dim[2]], tform[0, 0, dim[2]].tolist()]) # landmarks.append([[dim[0], 0, dim[2]], tform[dim[0], 0, dim[2]].tolist()]) # landmarks.append([[0, dim[1], dim[2]], tform[0, dim[1], dim[2]].tolist()]) # landmarks.append([[dim[0], dim[1], dim[2]], tform[dim[0], dim[1], dim[2]].tolist()]) # Solve for the affine # affine = apps.SolveAffine(landmarks) affine = struct['geomInfo'][0,0]['AffineDCS'] temp = core.StructuredGrid( [imData.shape[0], imData.shape[1], imData.shape[2]], tensor=torch.tensor(np.real(imData)).permute(-1, 0, 1, 2), origin= [0, 0, 0], channels=imData.shape[-1] ) # temp = ca.Image3D(cc.MakeGrid(np.shape(tform)[0:3], ca.MEM_DEVICE), ca.MEM_DEVICE) # temp.setOrigin(ca.Vec3Df(0, 0, 0)) # temp.setSpacing(ca.Vec3Df(1, 1, 1)) affGrid = rc.SolveAffineGrid(temp, affine) size.append(affGrid.size.tolist()) orgn.append(affGrid.origin.tolist()) print('Converting files for {0} time series images .... '.format(timePts), end='') sys.stdout.flush() for time in range(0, timePts): magIm_np = np.real(imData[:, :, :, time]) phsIm_np = np.imag(imData[:, :, :, time]) magIm = core.StructuredGrid( [magIm_np.shape[0], magIm_np.shape[1], magIm_np.shape[2]], tensor=torch.tensor(magIm_np).unsqueeze(0).float(), origin=[0, 0, 0], channels=1 ) phsIm = core.StructuredGrid( [phsIm_np.shape[0], phsIm_np.shape[1], phsIm_np.shape[2]], tensor=torch.tensor(phsIm_np).unsqueeze(0).float(), origin=[0, 0, 0], channels=1 ) affFilt = so.AffineTransform.Create(affine=torch.tensor(affine).float()) affMagIm = affFilt(magIm, out_grid=affGrid) affPhsIm = affFilt(phsIm, out_grid=affGrid) io.SaveITKFile(affMagIm, dirOutName + '/' + outName + '_real_{0}.nii.gz'.format(str(time).zfill(2))) io.SaveITKFile(affPhsIm, dirOutName + '/' + outName + '_imag_{0}.nii.gz'.format(str(time).zfill(2))) if time == 0: # We need to have the magnitude image for registration regIm_np = np.abs(imData[:, :, :, time]) regIm = core.StructuredGrid( [regIm_np.shape[0], regIm_np.shape[1], regIm_np.shape[2]], tensor=torch.tensor(regIm_np).unsqueeze(0).float(), origin=[0, 0, 0], channels=1 ) affRegIm = affFilt(regIm, out_grid=affGrid) io.SaveITKFile(affRegIm, dirOutName + '/../RegVols/' + outName[0:5] + '.nii.gz') # print('size: {0} .... '.format(magIm_np.shape), end='') print('Done') return size, orgn
def sample_on_histopathology(rabbit, block, img_num, bf_slice): blockface_dir = f'/hdscratch/ucair/{rabbit}/blockface/{block}/' histology_dir = f'/hdscratch/ucair/{rabbit}/microscopic/{block}/' invivo_dir = f'/hdscratch/ucair/{rabbit}/mri/invivo/volumes/deformable/{block}/' day0_dir = f'/hdscratch/ucair/{rabbit}/mri/day0/volumes/deformable/{block}/' invivo_mr_out_path = f'{invivo_dir}/IMG_{img_num}/' day0_mr_out_path = f'{day0_dir}/IMG_{img_num}/' mic_file = f'{histology_dir}hdf5/{block}_img{img_num}_image.hdf5' # First need to see if the deformation from histology to blockface exists # Load the deformation try: phi_inv_data = io.LoadITKFile( f'{histology_dir}deformations/img_{img_num}_deformation_to_blockface.mhd', device=device ) aff = np.loadtxt(f'{histology_dir}deformations/img_{img_num}_affine_to_blockface.txt') aff = torch.tensor(aff, device=device, dtype=torch.float32) except IOError: raise IOError(f'The full deformation for IMG {img_num} was not found. Please generate and then re-run.') # Because I can't save 2D deformations at the moment phi_inv = core.StructuredGrid( size=phi_inv_data.size[0:2], spacing=phi_inv_data.spacing[1:3], origin=phi_inv_data.origin[1:3], device=phi_inv_data.device, tensor=phi_inv_data.data.squeeze().permute(2, 0, 1), channels=2 ) # Apply the inverse affine to the deformation aff = aff.inverse() a = aff[0:2, 0:2].float() t = aff[-0:2, 2].float() phi_inv.data = torch.matmul(a.unsqueeze(0).unsqueeze(0), phi_inv.data.permute(list(range(1, 2 + 1)) + [0]).unsqueeze(-1)) phi_inv.data = (phi_inv.data.squeeze() + t).permute([-1] + list(range(0, 2))) if not os.path.exists(invivo_mr_out_path): os.makedirs(invivo_mr_out_path) if not os.path.exists(day0_mr_out_path): os.makedirs(day0_mr_out_path) meta_dict = {} with h5py.File(mic_file, 'r') as f: mic = f['RawImage/ImageData'][:, ::10, ::10] for key in f['RawImage'].attrs: meta_dict[key] = f['RawImage'].attrs[key] mic = core.StructuredGrid( mic.shape[1:], tensor=torch.tensor(mic, dtype=torch.float32, device=device), spacing=torch.tensor([10.0, 10.0], dtype=torch.float32, device=device), origin=(torch.tensor(mic.shape[1:]) / 2) * -1, device=device, dtype=torch.float32, channels=3 ) mic = (mic - mic.min()) / (mic.max() - mic.min()) segs = [] segs += [io.LoadITKFile(f'{histology_dir}segmentations/IMG_{img_num}/img_{img_num}_healthy_tissue.nrrd', device=device)] if os.path.exists(f'{histology_dir}segmentations/IMG_{img_num}/img_{img_num}_uncertain_region.nrrd'): segs += [io.LoadITKFile(f'{histology_dir}segmentations/IMG_{img_num}/img_{img_num}_uncertain_region.nrrd', device=device)] if os.path.exists(f'{histology_dir}segmentations/IMG_{img_num}/img_{img_num}_ablated_region.nrrd'): segs += [io.LoadITKFile(f'{histology_dir}segmentations/IMG_{img_num}/img_{img_num}_ablated_region.nrrd', device=device)] # Apply the deformation to the microscopic image deformed_histology = so.ApplyGrid(phi_inv, device=device)(mic, phi_inv) for i, vol in enumerate(segs): segs[i] = so.ApplyGrid(phi_inv, device=device)(vol, phi_inv) # Load the blockface blockface = io.LoadITKFile(f'{blockface_dir}volumes/raw/difference_volume.mhd', device=device) sd = 'cuda:0' mr_invivo_t1_slice = get_mr_slice(f'{invivo_dir}/invivo_ce_t1_to_{block}.mhd', blockface, bf_slice - 1, sd) # mr_invivo_t2_slice = get_mr_slice(f'{invivo_dir}/invivo_t2_to_{block}.mhd', blockface, bf_slice - 1, sd) # mr_invivo_adc_slice = get_mr_slice(f'{invivo_dir}/invivo_adc_to_{block}.mhd', blockface, bf_slice - 1, sd) mr_invivo_npv_slice = get_mr_slice(f'{invivo_dir}/invivo_npv_to_{block}.mhd', blockface, bf_slice - 1, sd) mr_d0_npv_slice = get_mr_slice(f'/hdscratch/ucair/18_062/mri/day0/volumes/deformable/block04/day0_npv_to_{block}.mhd', blockface, bf_slice - 1, sd) mr_d0_t1_slice = get_mr_slice( f'/hdscratch/ucair/18_062/mri/day0/volumes/deformable/block04/day0_t1_to_{block}.mhd', blockface, bf_slice - 1, sd) mr_d0_t1_nc_slice = get_mr_slice( f'/hdscratch/ucair/18_062/mri/day0/volumes/deformable/block04/day0_t1_nc_to_{block}.mhd', blockface, bf_slice - 1, sd) mr_log_ctd_slice = get_mr_slice( f'/hdscratch/ucair/18_062/mri/day0/volumes/deformable/block04/day0_log_ctd_to_{block}.mhd', blockface, bf_slice - 1, sd) mr_log_ctd_slice.data = torch.exp(mr_log_ctd_slice.data) def create_circular_mask(h, w, center=None, radius=None): if center is None: # use the middle of the image center = (int(w / 2), int(h / 2)) if radius is None: # use the smallest distance between the center and image walls radius = min(center[0], center[1], w - center[0], h - center[1]) Y, X = np.ogrid[:h, :w] dist_from_center = np.sqrt((X - center[0]) ** 2 + (Y - center[1]) ** 2) mask = dist_from_center <= radius return mask circle_mask = create_circular_mask(2586, 3426, center=[1831, 1100], radius=700) mr_log_ctd_slice.data = mr_log_ctd_slice.data * torch.tensor(circle_mask, device=device).float() # mr_day0_t2_slice = get_mr_slice(f'{day0_dir}/day0_t2_to_{block}.mhd', blockface, bf_slice - 1, sd) # mr_day0_ctd_slice = get_mr_slice(f'{day0_dir}/day0_ctd_to_{block}.mhd', blockface, bf_slice - 1, sd) # mr_day0_t1_slice = get_mr_slice(f'{day0_dir}/day0_ce_t1_to_{block}.mhd', blockface, bf_slice - 1, sd) # mr_day0_npv_slice = get_mr_slice(f'{day0_dir}/day0_npv_to_{block}.mhd', blockface, bf_slice - 1, sd) # io.SaveITKFile(mr_invivo_t1_slice, f'{invivo_mr_out_path}/invivo_ce_t1_as_bf_img_{img_num}.mhd') # io.SaveITKFile(mr_invivo_t2_slice, f'{invivo_mr_out_path}/invivo_t2_as_bf_img_{img_num}.mhd') # io.SaveITKFile(mr_invivo_adc_slice, f'{invivo_mr_out_path}/invivo_adc_as_bf_img_{img_num}.mhd') # io.SaveITKFile(mr_invivo_npv_slice, f'{invivo_mr_out_path}/invivo_npv_as_bf_img_{img_num}.mhd') # io.SaveITKFile(mr_day0_t2_slice, f'{day0_mr_out_path}/day0_t2_as_bf_img_{img_num}.mhd') # io.SaveITKFile(mr_day0_ctd_slice, f'{day0_mr_out_path}/day0_ctd_as_bf_img_{img_num}.mhd') # io.SaveITKFile(mr_day0_t1_slice, f'{day0_mr_out_path}/day0_ce_t1_as_bf_img_{img_num}.mhd') # io.SaveITKFile(mr_day0_npv_slice, f'{day0_mr_out_path}/day0_ce_t1_as_bf_img_{img_num}.mhd') del blockface, phi_inv, phi_inv_data torch.cuda.empty_cache() histology_seg = core.StructuredGrid.FromGrid(segs[0], channels=1) for seg in segs: histology_seg += seg ctd_slice = mr_log_ctd_slice.data[0].cpu() ctd_mask = np.logical_and(ctd_slice > 10.0, circle_mask == 1) masked_thermal = np.ma.masked_where(ctd_mask == 0.0, ctd_slice.numpy()).squeeze() plt.figure() # plt.imshow(t1_nc.data.cpu()[0, slice_n].squeeze(), aspect=1.0 / aspect, cmap='gray') plt.imshow(masked_thermal, cmap='jet', vmin=10, vmax=240) plt.gca().patch.set_facecolor([0, 0, 0, 0]) plt.axis('off') zero_slice = np.zeros_like(ctd_slice) masked_slice = np.ma.masked_where(zero_slice == 0.0, zero_slice).squeeze() plt.figure() plt.imshow(masked_slice.squeeze(), cmap='gray') npv_contours = measure.find_contours(ctd_slice.data.squeeze().cpu().numpy() * circle_mask, 240) for contour in npv_contours: plt.plot(contour[:, 1], contour[:, 0], color='red', linewidth=1.8) plt.axis('off') plt.gca().patch.set_facecolor([0, 0, 0, 0]) plt.savefig(f'{invivo_mr_out_path}/day0_ctd_contour.png', dpi=600, bbox_inches='tight', pad_inches=0, transparent=True) save = True # Generate the figures without masking generate_figures( mr_d0_t1_slice, segs, out_path=invivo_mr_out_path, base_name='day0_ce_t1_no_mask', save=save ) generate_figures( mr_invivo_t1_slice, segs, out_path=invivo_mr_out_path, base_name='deformed_invivo_ce_t1_MR_no_mask', save=save ) histology_image = histology_seg * deformed_histology mr_invivo_t1_slice = histology_seg * mr_invivo_t1_slice mr_d0_t1_slice = histology_seg * mr_d0_t1_slice # mr_invivo_t2_slice = histology_seg * mr_invivo_t2_slice # mr_day0_t2_slice = histology_seg * mr_day0_t2_slice # mr_day0_ctd_slice = histology_seg * mr_day0_ctd_slice # mr_day0_t1_slice = histology_seg * mr_day0_t1_slice # mr_day0_npv_slice = histology_seg * mr_day0_npv_slice # mr_day0_ctd_slice.data[mr_day0_ctd_slice.data < 0.5] = 0.0 # mr_day0_ctd_slice.data[mr_day0_ctd_slice.data >= 0.5] = 1.0 # # mr_day0_npv_slice.data[mr_day0_npv_slice.data < 0.5] = 0.0 # mr_day0_npv_slice.data[mr_day0_npv_slice.data >= 0.5] = 1.0 hist_map = core.StructuredGrid.FromGrid(segs[0]) for i, seg in enumerate(segs, 1): hist_map = hist_map + (i * seg) if len(segs) == 3: color_map = ListedColormap(['k', 'crimson', 'lime', 'gold']) plt.figure() plt.imshow(hist_map.data.squeeze().cpu(), cmap=color_map) elif len(segs) == 2: color_map = ListedColormap(['k', 'crimson', 'lime']) plt.figure() plt.imshow(hist_map.data.squeeze().cpu(), cmap=color_map) else: color_map = ListedColormap(['k', 'crimson']) plt.figure() plt.imshow(hist_map.data.squeeze().cpu(), cmap=color_map) plt.axis('off') plt.show() plt.pause(1.0) if save: plt.savefig(f'{invivo_mr_out_path}/histology_segmentation_map.png', dpi=600, bbox_inches='tight', pad_inches=0) plt.savefig(f'{day0_mr_out_path}/histology_segmentation_map.png', dpi=600, bbox_inches='tight', pad_inches=0) plt.close('all') # Generate the figure for showing the NPV on the Day0 MRI CE T1 # contours = measure.find_contours(mr_day0_npv_slice.data.squeeze().cpu().numpy(), 0.5) # Plot the image with contours # plt.figure() # plt.imshow(mr_day0_t1_slice.data.permute(1, 2, 0).squeeze().cpu(), cmap='gray') # for contour in contours: # plt.plot(contour[:, 1], contour[:, 0], color='blue', linewidth=0.8) # plt.axis('off') # plt.show() # plt.pause(1.0) # if save: # plt.savefig(f'{invivo_mr_out_path}/day0_npv_with_contours.png', dpi=600, bbox_inches='tight', pad_inches=0) # # plt.close('all') # generate_figures( # mr_day0_npv_slice, segs, out_path=day0_mr_out_path, base_name='deformed_npv', save=save, # extra_cont=contours # ) generate_figures( histology_image, segs, out_path=invivo_mr_out_path, base_name='deformed_histology', save=save ) generate_figures( mr_invivo_t1_slice, segs, out_path=invivo_mr_out_path, base_name='deformed_invivo_ce_t1_MR', save=save ) generate_figures( mr_d0_t1_slice, segs, out_path=invivo_mr_out_path, base_name='day0_ce_t1', save=save ) # generate_figures( # mr_invivo_t2_slice, segs, out_path=invivo_mr_out_path, base_name='deformed_invivo_t2_MR', save=save # ) # generate_figures( # mr_invivo_adc_slice, segs, out_path=invivo_mr_out_path, base_name='deformed_invivo_adc_MR', save=save # ) # generate_figures( # mr_day0_t2_slice, segs, out_path=day0_mr_out_path, base_name='deformed_day0_t2_MR', save=save # ) # generate_figures( # mr_day0_ctd_slice, segs, out_path=day0_mr_out_path, base_name='deformed_day0_ctd_MR', save=save # ) # # generate_figures( # mr_day0_t1_slice, segs, out_path=day0_mr_out_path, base_name='deformed_day0_ce_t1_MR', save=True # ) npv_contours = measure.find_contours(mr_invivo_npv_slice.data.squeeze().cpu().numpy(), 0.5) zero_im = np.zeros_like(mr_invivo_npv_slice.data.squeeze().cpu().numpy()) fig = plt.figure() # plt.imshow(mr_invivo_t1_slice.data.cpu().squeeze()) ax = fig.add_subplot() ax.set_xlim([0, zero_im.shape[1]]) ax.set_ylim([0, zero_im.shape[0]]) for contour in npv_contours: plt.plot(contour[:, 1], contour[:, 0], color=matplotlib._cm._tab10_data[2], linewidth=3.0) ax.invert_yaxis() ax.set_aspect(1) plt.axis('off') plt.savefig(f'{invivo_mr_out_path}/NPV_contours.png', dpi=600, bbox_inches='tight', pad_inches=0, transparent=True) npv_contours = measure.find_contours(mr_d0_npv_slice.data.squeeze().cpu().numpy(), 0.5) zero_im = np.zeros_like(mr_d0_npv_slice.data.squeeze().cpu().numpy()) fig = plt.figure() # plt.imshow(mr_invivo_t1_slice.data.cpu().squeeze()) ax = fig.add_subplot() ax.set_xlim([0, zero_im.shape[1]]) ax.set_ylim([0, zero_im.shape[0]]) for contour in npv_contours: plt.plot(contour[:, 1], contour[:, 0], color=matplotlib._cm._tab10_data[0], linewidth=3.0) ax.invert_yaxis() ax.set_aspect(1) plt.axis('off') plt.savefig(f'{invivo_mr_out_path}/D0_NPV_contours.png', dpi=600, bbox_inches='tight', pad_inches=0, transparent=True) if len(segs) == 3: hst_contours = measure.find_contours(segs[-1].data.squeeze().cpu().numpy(), 0.5) fig = plt.figure() ax = fig.add_subplot() ax.set_xlim([0, zero_im.shape[1]]) ax.set_ylim([0, zero_im.shape[0]]) for contour in hst_contours: plt.plot(contour[:, 1], contour[:, 0], color=matplotlib._cm._tab10_data[6], linewidth=3.0) ax.invert_yaxis() ax.set_aspect(1) plt.axis('off') plt.savefig(f'{invivo_mr_out_path}/HST_contours.png', dpi=600, bbox_inches='tight', pad_inches=0, transparent=True) plt.close('all') print('Done')
def generate_image_volume(rabbit, block, base_dir='/hdscratch/ucair/'): blockface_dir = f'{base_dir}{rabbit}/blockface/{block}/' histology_dir = f'{base_dir}{rabbit}/microscopic/{block}/' def_dir = f'{histology_dir}deformations/' out_dir = f'{histology_dir}/volume/raw/' if not os.path.exists(out_dir): os.makedirs(out_dir) raw_images = glob.glob(f'{histology_dir}/raw/*_image.tif') if not raw_images: raw_images += sorted(raw_images + glob.glob(f'{histology_dir}/raw/*_image.jpg')) image_nums = sorted( [int(x.split('/')[-1].split('_')[1]) for x in raw_images]) deformed_images = [] for im in image_nums: print(f'Loading and deforming image {im} ... ', end='') # Load the healthy segmentation as a reference healthy = io.LoadITKFile( f'{histology_dir}segmentations/IMG_{im:03d}/img_{im:03d}_healthy_tissue.nrrd', device=device) try: ablation = io.LoadITKFile( f'{histology_dir}segmentations/IMG_{im:03d}/img_{im:03d}_ablated_region.nrrd', device=device) except RuntimeError: ablation = (healthy * 0.0).copy() try: transition = io.LoadITKFile( f'{histology_dir}segmentations/IMG_{im:03d}/img_{im:03d}_uncertain_region.nrrd', device=device) except RuntimeError: transition = (healthy * 0.0).copy() combined = ablation + transition + healthy combined.data = (combined.data >= 0.5).float() # Load the affine aff = np.loadtxt( glob.glob(f'{def_dir}img_{im:03d}_affine_to_blockface.txt')[0]) aff = torch.tensor(aff, device=device, dtype=torch.float32) # Load the deformation deformation_data = io.LoadITKFile( f'{def_dir}/img_{im:03d}_deformation_to_blockface.mhd', device=device) deformation = core.StructuredGrid( size=deformation_data.size[0:2], spacing=deformation_data.spacing[1:3], origin=deformation_data.origin[1:3], device=deformation_data.device, tensor=deformation_data.data.squeeze().permute(2, 0, 1), channels=2) # Apply the inverse affine to the grid aff = aff.inverse() a = aff[0:2, 0:2].float() t = aff[-0:2, 2].float() deformation.data = torch.matmul( a.unsqueeze(0).unsqueeze(0), deformation.data.permute(list(range(1, 2 + 1)) + [0]).unsqueeze(-1)) deformation.data = (deformation.data.squeeze() + t).permute([-1] + list(range(0, 2))) # Load the actual image #### Apply the affine to the image mic_file = f'{histology_dir}hdf5/{block}_img{im:03d}_image.hdf5' meta_dict = {} with h5py.File(mic_file, 'r') as f: mic = f['RawImage/ImageData'][:, ::10, ::10] for key in f['RawImage'].attrs: meta_dict[key] = f['RawImage'].attrs[key] mic = core.StructuredGrid(mic.shape[1:], tensor=torch.tensor(mic, dtype=torch.float32, device=device), spacing=torch.tensor([10.0, 10.0], dtype=torch.float32, device=device), device=device, dtype=torch.float32, channels=3) mic = (mic - mic.min()) / (mic.max() - mic.min()) mic.data = mic.data * combined.data def_mic = so.ApplyGrid.Create(deformation, device=device)(mic, deformation) # Need to put the mic image in the right slice z_or = (im - 1) * 0.05 def_mic = core.StructuredGrid( [2] + list(def_mic.shape()[1:]), tensor=def_mic.data.unsqueeze(1).repeat(1, 2, 1, 1), spacing=torch.tensor( [0.05, def_mic.spacing[0], def_mic.spacing[1]], dtype=torch.float32, device=device), origin=torch.tensor([z_or, def_mic.origin[0], def_mic.origin[1]], dtype=torch.float32, device=device), device=device, dtype=torch.float32, channels=3) deformed_images.append(def_mic.copy()) del def_mic, mic, deformation torch.cuda.empty_cache() print('done') # Now need to load the blockface volume block_vol = io.LoadITKFile( f'{blockface_dir}volumes/raw/difference_volume.mhd', device='cpu') # image_vol = core.StructuredGrid( # size=block_vol.size, # spacing=block_vol.spacing, # origin=block_vol.origin, # device=block_vol.device, # channels=3 # ) image_z_spacing = np.diff(np.array(image_nums)).mean() * 0.05 image_z_origin = image_nums[0] * 0.05 image_vol = core.StructuredGrid( size=[len(image_nums)] + list(block_vol.size[-2:].numpy()), spacing=[image_z_spacing] + list(block_vol.spacing[-2:].numpy()), origin=[image_z_origin] + list(block_vol.origin[-2:].numpy()), device=block_vol.device, channels=3) for ii, im in enumerate(image_nums): image_vol.data[:, ii] = deformed_images[ii].data[:, 0].clone().cpu() io.SaveITKFile(image_vol, f'{out_dir}/{block}_image_volume.mhd') del image_vol, deformed_images, block_vol torch.cuda.empty_cache()
def generate_segmentation_volume(rabbit, block, base_dir='/hdscratch/ucair/'): blockface_dir = f'{base_dir}{rabbit}/blockface/{block}/' histology_dir = f'{base_dir}{rabbit}/microscopic/{block}/' def_dir = f'{histology_dir}deformations/' out_dir = f'{histology_dir}/volume/raw/' if not os.path.exists(out_dir): os.makedirs(out_dir) raw_images = glob.glob(f'{histology_dir}/raw/*_image.tif') if not raw_images: raw_images += sorted(raw_images + glob.glob(f'{histology_dir}/raw/*_image.jpg')) image_nums = sorted( [int(x.split('/')[-1].split('_')[1]) for x in raw_images]) deformed_ablation_segs = [] deformed_combined_segs = [] deformed_images = [] for im in image_nums: print(f'Loading and deforming image {im} ... ', end='') # Load the healthy segmentation as a reference healthy = io.LoadITKFile( f'{histology_dir}segmentations/IMG_{im:03d}/img_{im:03d}_healthy_tissue.nrrd', device=device) # Load the affine aff = np.loadtxt( glob.glob(f'{def_dir}img_{im:03d}_affine_to_blockface.txt')[0]) aff = torch.tensor(aff, device=device, dtype=torch.float32) # Load the deformation deformation_data = io.LoadITKFile( f'{def_dir}/img_{im:03d}_deformation_to_blockface.mhd', device=device) deformation = core.StructuredGrid( size=deformation_data.size[0:2], spacing=deformation_data.spacing[1:3], origin=deformation_data.origin[1:3], device=deformation_data.device, tensor=deformation_data.data.squeeze().permute(2, 0, 1), channels=2) # Apply the inverse affine to the grid aff = aff.inverse() a = aff[0:2, 0:2].float() t = aff[-0:2, 2].float() deformation.data = torch.matmul( a.unsqueeze(0).unsqueeze(0), deformation.data.permute(list(range(1, 2 + 1)) + [0]).unsqueeze(-1)) deformation.data = (deformation.data.squeeze() + t).permute([-1] + list(range(0, 2))) try: ablation = io.LoadITKFile( f'{histology_dir}segmentations/IMG_{im:03d}/img_{im:03d}_ablated_region.nrrd', device=device) except RuntimeError: ablation = (healthy * 0.0).copy() try: transition = io.LoadITKFile( f'{histology_dir}segmentations/IMG_{im:03d}/img_{im:03d}_uncertain_region.nrrd', device=device) except RuntimeError: transition = (healthy * 0.0).copy() # Load the actual image #### Apply the affine to the image mic_file = f'{histology_dir}hdf5/{block}_img{im:03d}_image.hdf5' meta_dict = {} with h5py.File(mic_file, 'r') as f: mic = f['RawImage/ImageData'][:, ::10, ::10] for key in f['RawImage'].attrs: meta_dict[key] = f['RawImage'].attrs[key] mic = core.StructuredGrid(mic.shape[1:], tensor=torch.tensor(mic, dtype=torch.float32, device=device), spacing=torch.tensor([10.0, 10.0], dtype=torch.float32, device=device), device=device, dtype=torch.float32, channels=3) mic = (mic - mic.min()) / (mic.max() - mic.min()) if not os.path.exists( f'{histology_dir}deformable/img_{im:03d}_to_blockface/'): os.makedirs( f'{histology_dir}deformable/img_{im:03d}_to_blockface/') def_mic = so.ApplyGrid.Create(deformation, device=device)(mic, deformation) # Need to put the mic image in the right slice z_or = (im - 1) * 0.05 def_mic = core.StructuredGrid( [2] + list(def_mic.shape()[1:]), tensor=def_mic.data.unsqueeze(1).repeat(1, 2, 1, 1), spacing=torch.tensor( [0.05, def_mic.spacing[0], def_mic.spacing[1]], dtype=torch.float32, device=device), origin=torch.tensor([z_or, def_mic.origin[0], def_mic.origin[1]], dtype=torch.float32, device=device), device=device, dtype=torch.float32, channels=3) io.SaveITKFile( def_mic, f'{histology_dir}deformable/img_{im:03d}_to_blockface/img_{im:03d}_to_blockface.mhd' ) combined = ablation + transition combined.data = (combined.data >= 0.5).float() def_ablation = so.ApplyGrid.Create(deformation, device=device)(ablation, deformation) def_combined = so.ApplyGrid.Create(deformation, device=device)(combined, deformation) deformed_ablation_segs.append(def_ablation.copy()) deformed_combined_segs.append(def_combined.copy()) deformed_images.append(def_mic.copy()) del def_ablation, def_combined, def_mic, combined, mic, deformation torch.cuda.empty_cache() print('done') # Now need to load the blockface volume block_vol = io.LoadITKFile( f'{blockface_dir}volumes/raw/difference_volume.mhd', device='cpu') image_vol = core.StructuredGrid(size=block_vol.size, spacing=block_vol.spacing, origin=block_vol.origin, device=block_vol.device, channels=3) for ii, im in enumerate(image_nums): image_vol.data[:, im - 1] = deformed_images[ii].data[:, 0].clone().cpu() io.SaveITKFile(image_vol, f'{out_dir}/{block}_image_volume.mhd') del image_vol, deformed_images torch.cuda.empty_cache() single_vol = core.StructuredGrid(size=block_vol.size, spacing=block_vol.spacing, origin=block_vol.origin, device=block_vol.device, channels=1) for ii, im in enumerate(image_nums): single_vol.data[:, im - 1] = deformed_ablation_segs[ii].data[:].clone().cpu() io.SaveITKFile(single_vol, f'{out_dir}/{block}_ablation_segmentation_no_interp.mhd') del single_vol torch.cuda.empty_cache() # Now need to load the blockface volume ablation_vol = core.StructuredGrid(size=block_vol.size, spacing=block_vol.spacing, origin=block_vol.origin, device=device, channels=1) for ii, im in enumerate(image_nums): if ii == len(image_nums) - 1: continue next_slice = image_nums[ii + 1] for s, slice in enumerate(range(im, next_slice + 1)): step = 1.0 / (next_slice - im) cur_alpha = 1.0 - (s * step) next_alpa = 0.0 + (s * step) ablation_vol.data[:, slice] = ( deformed_ablation_segs[ii].data * cur_alpha) + (deformed_ablation_segs[ii + 1].data * next_alpa) gauus_ablation = so.Gaussian.Create(1, 10, [0.1, 2, 2], dim=3, device=device)(ablation_vol) io.SaveITKFile(gauus_ablation, f'{out_dir}/{block}_ablation_segmentation.mhd') del gauus_ablation, deformed_ablation_segs torch.cuda.empty_cache() combined_vol = core.StructuredGrid(size=block_vol.size, spacing=block_vol.spacing, origin=block_vol.origin, device=block_vol.device, channels=1) for ii, im in enumerate(image_nums): if ii == len(image_nums) - 1: continue next_slice = image_nums[ii + 1] for s, slice in enumerate(range(im, next_slice + 1)): step = 1.0 / (next_slice - im) cur_alpha = 1.0 - (s * step) next_alpa = 0.0 + (s * step) combined_vol.data[:, slice] = ( deformed_combined_segs[ii].data * cur_alpha) + (deformed_combined_segs[ii + 1].data * next_alpa) # gauus_ablation = so.Gaussian.Create(1, 10, [0.1, 2, 2], dim=3, device=device)(ablation_vol) io.SaveITKFile( combined_vol, f'{out_dir}/{block}_ablation_and_transition_segmentation.mhd') del block_vol del combined_vol torch.cuda.empty_cache()
def process_mic(micicroscopic, mic_seg_file, blockface, label, device='cpu'): meta_dict = {} try: with h5py.File(micicroscopic, 'r') as f: mic = f['ImageData'][1, ::10, ::10] except KeyError: with h5py.File(micicroscopic, 'r') as f: mic = f['RawImage/ImageData'][1, ::10, ::10] for key in f['RawImage'].attrs: meta_dict[key] = f['RawImage'].attrs[key] with h5py.File(mic_seg_file, 'r') as f: mic_seg = f['ImageData'][0, ::10, ::10] mic = core.StructuredGrid( mic.shape, tensor=torch.tensor(mic, dtype=torch.float32, device=device).unsqueeze(0), spacing=torch.tensor([10.0, 10.0], dtype=torch.float32, device=device), device=device, dtype=torch.float32, channels=1 ) mic_seg = core.StructuredGrid( mic_seg.shape, tensor=torch.tensor(mic_seg, dtype=torch.float32, device=device).unsqueeze(0), spacing=torch.tensor([10.0, 10.0], dtype=torch.float32, device=device), device=device, dtype=torch.float32, channels=1 ) # mic_seg.data = (mic_seg.data <= 0.5).float() if 'Affine' in meta_dict: opt_affine = torch.tensor(meta_dict['Affine'], dtype=torch.float32, device=device) optaff_filter = AffineTransform.Create(affine=opt_affine, device=device) aff_mic_image = optaff_filter(mic, blockface) aff_mic_label = optaff_filter(mic_seg, blockface) return aff_mic_image, aff_mic_label, opt_affine points = torch.tensor( LandmarkPicker([mic[0].cpu(), blockface[1].cpu()]), dtype=torch.float32, device=device ).permute(1, 0, 2) # Change to real coordinates points *= torch.cat([mic.spacing[None, None, :], blockface.spacing[None, None, :]], 0) points += torch.cat([mic.origin[None, None, :], blockface.origin[None, None, :]], 0) aff_filter = AffineTransform.Create(points[1], points[0], device=device) affine = torch.eye(3, device=device, dtype=torch.float32) affine[0:2, 0:2] = aff_filter.affine affine[0:2, 2] = aff_filter.translation # aff_mic_image = aff_filter(mic, blockface) aff_mic_seg = aff_filter(mic_seg, blockface) # Do some additional registration just to make sure it is in the right spot similarity = so.L2Similarity.Create(device=device) model = so.AffineIntensity.Create(similarity, device=device) # Create the optimizer optimizer = optim.SGD([ {'params': model.affine, 'lr': 1.0e-11}, {'params': model.translation, 'lr': 1.0e-12}], momentum=0.9, nesterov=True ) energy = [] for epoch in range(0, 1000): optimizer.zero_grad() loss = model( label.data, aff_mic_seg.data ) energy.append(loss.item()) print(f'===> Iteration {epoch:3} Energy: {loss.item():.3f}') loss.backward() # Compute the gradients optimizer.step() # # if epoch >= 2: if epoch > 10 and np.mean(energy[-10:]) - energy[-1] < 0.01: break itr_affine = torch.eye(3, device=device, dtype=torch.float32) itr_affine[0:2, 0:2] = model.affine itr_affine[0:2, 2] = model.translation opt_affine = torch.matmul(itr_affine.detach(), affine) # Create a new resample filter to make sure everything works optaff_filter = AffineTransform.Create(affine=opt_affine, device=device) aff_mic_image = optaff_filter(mic, blockface) aff_mic_label = optaff_filter(mic_seg, blockface) return aff_mic_image, aff_mic_label, opt_affine
def MultiscaleElast(regObj, opt): device = opt.device try: src_name = regObj.I0 tar_name = regObj.I1 regObj.I1 = io.LoadITKFile(regObj.I1, device) regObj.I0 = io.LoadITKFile(regObj.I0, device) except RuntimeError: src_name = _getFilePath('Source Image File (ElastReg)', initialdir='/home/sci/blakez/ucair/') tar_name = _getFilePath('Target Image File (ElastReg)', initialdir='/home/sci/blakez/ucair/') regObj.I1 = io.LoadITKFile(tar_name, device) regObj.I0 = io.LoadITKFile(src_name, device) # Check if the croppedImage exists in the object - temperature object wont have it if regObj.croppedImage != 'None': # Load the cropped image which contains the region of interest try: crp_im = io.LoadITKFile(regObj.croppedImage, device) except RuntimeError: regObj.croppedImage = _getFilePath( 'Cropped Image File', initialdir='/home/sci/blakez/ucair/') crp_im = io.LoadITKFile(regObj.croppedImage, device) # Resample the images onto the ROI grid regObj.I1 = so.ResampleWorld.Create(crp_im, device=device)(regObj.I1) regObj.I0 = so.ResampleWorld.Create(crp_im, device=device)(regObj.I0) del crp_im if hasattr(regObj, 'unionSize'): # Make the grid for the union and resample the two images onto that grid # unionGrid = cc.MakeGrid(regObj.unionSize, regObj.unionSpacing, regObj.unionOrigin) unionIm = core.StructuredGrid(size=regObj.unionSize, origin=regObj.unionOrigin, spacing=regObj.unionSpacing, channels=1) regObj.I0 = so.ResampleWorld.Create(unionIm, device=device)(regObj.I0) regObj.I1 = so.ResampleWorld.Create(unionIm, device=device)(regObj.I1) # Make sure that the registration is only happening where both of them are not 0 regObj.I0.data[regObj.I1.data == 0.0] = 0.0 regObj.I1.data[regObj.I0.data == 0.0] = 0.0 # Make sure that both images are from 0 to 1 regObj.I0 = regObj.I0 / regObj.I0.max() regObj.I1 = regObj.I1 / regObj.I1.max() # Median filter if the regObj has a kernel size for it if regObj.medianFilterSize != 'None': regObj.I0 = rc.MedianFilter_I(regObj.I0, regObj.medianFilterSize) regObj.I1 = rc.MedianFilter_I(regObj.I1, regObj.medianFilterSize) if regObj.tvWeight != 'None': regObj.I0 = rc.TVFilter_I(regObj.I0, regObj.tvWeight) regObj.I1 = rc.TVFilter_I(regObj.I1, regObj.tvWeight) # Variance Equalize volumes if regObj.sourceVarESig != 'None': kernel_size = regObj.sourceVarESig * 3 regObj.I0 = so.VarianceEqualize.Create(kernel_size, regObj.sourceVarESig, device=device)(regObj.I0) if regObj.targetVarESig != 'None': kernel_size = regObj.sourceVarESig * 3 regObj.I1 = so.VarianceEqualize.Create(kernel_size, regObj.sourceVarESig, device=device)(regObj.I1) # Check for masks: assume they mask out the water or bladder if regObj.I1_mask != 'None': try: mask = io.LoadITKFile(regObj.I1_mask, device) except RuntimeError: regObj.I1_mask = _getFilePath('Mask Image File (Target)', initialdir='/') mask = io.LoadITKFile(regObj.I1_mask, device) mask = so.ResampleWorld.Create(regObj.I1, device=opt.device)(mask) regObj.I1 = regObj.I1 * ((1 - mask) * -1) del mask torch.cuda.empty_cache() if regObj.I0_mask != 'None': try: mask = io.LoadITKFile(regObj.I0_mask, device) except RuntimeError: regObj.I0_mask = _getFilePath('Mask Image File (Source)', initialdir='/') mask = io.LoadITKFile(regObj.I0_mask, device) mask = so.ResampleWorld.Create(regObj.I0, device=opt.device)(mask) regObj.I0 = regObj.I0 * ((1 - mask) * -1) del mask torch.cuda.empty_cache() # Gaussian blur the images - blue after masking so the edges are not as sharp if regObj.gaussSigma != 'None': blur = so.Gaussian.Create(1, regObj.gaussKernel * 3, regObj.gaussSigma * 3, dim=3, device=device, dtype=torch.float32) regObj.I0 = blur(regObj.I0) regObj.I1 = blur(regObj.I1) deformation = regObj.I1.clone() deformation.set_to_identity_lut_() deformation_list = [] # Create a grid composer composer = so.ComposeGrids(device=device, dtype=torch.float32, padding_mode='border') # for i, s in enumerate(regObj.scales): scale_source = regObj.I0.set_size(regObj.I1.size // s, inplace=False) scale_target = regObj.I1.set_size(regObj.I1.size // s, inplace=False) deformation = deformation.set_size(regObj.I1.size // s, inplace=False) # Apply the deformation to the source image scale_source = so.ApplyGrid(deformation)(scale_source) # Create the matching term similarity = so.L2Similarity.Create(dim=3, device=device) # Create the smoothing operator operator = so.FluidKernel.Create( scale_target, device=device, alpha=1.0, beta=0.0, gamma=0.001, ) # Create the regularizer regularizer = so.NormGradient.Create(weight=regObj.regWeight[i], device=device, dtype=regObj.I1.dtype, dim=3) # Now register the source and the gad volume interday = st.IterativeMatch.Create( source=scale_source, target=scale_target, similarity=similarity, operator=operator, device=device, step_size=regObj.stepSize[i], regularization=regularizer, incompressible=regObj.incompressible) energy = [interday.initial_energy] print(f'Iteration: 0 Energy: {interday.initial_energy}') for i in range(1, regObj.Niter[i]): energy.append(interday.step()) print(f'Iteration: {i} Energy: {energy[-1]}') # if energy[-1] > energy[-2] - (3e-4 * energy[-2]): # break deformation = interday.get_field() deformation_list.append(deformation.clone().set_size(regObj.I1.size, inplace=False)) deformation = composer(deformation_list[::-1]) regObj.I0 = src_name regObj.I1 = tar_name return deformation
def register_histopathology_to_blockface(rabbit, block, img_num, bf_slice): blockface_dir = f'/hdscratch/ucair/{rabbit}/blockface/{block}/' histology_dir = f'/hdscratch/ucair/{rabbit}/microscopic/{block}/' out_dir = f'{histology_dir}deformations/' if not os.path.exists(out_dir): os.makedirs(out_dir) # if os.path.exists(f'{out_dir}/img_{img_num}_deformation_to_blockface.mhd'): # return # Load and make the histopathology segmentation segs = [] segs += [ io.LoadITKFile( f'{histology_dir}segmentations/IMG_{img_num}/img_{img_num}_healthy_tissue.nrrd', device=device) ] if os.path.exists( f'{histology_dir}segmentations/IMG_{img_num}/img_{img_num}_ablated_region.nrrd' ): segs += [ io.LoadITKFile( f'{histology_dir}segmentations/IMG_{img_num}/img_{img_num}_ablated_region.nrrd', device=device) ] if os.path.exists( f'{histology_dir}segmentations/IMG_{img_num}/img_{img_num}_uncertain_region.nrrd' ): segs += [ io.LoadITKFile( f'{histology_dir}segmentations/IMG_{img_num}/img_{img_num}_uncertain_region.nrrd', device=device) ] histology_seg = core.StructuredGrid.FromGrid(segs[0], channels=1) for seg in segs: histology_seg += seg try: blockface_seg = io.LoadITKFile( f'{blockface_dir}volumes/raw/hd_labels/{block}_hdlabel_volume.nrrd', device=device) except: blockface_seg = io.LoadITKFile( f'{blockface_dir}volumes/raw/segmentation_volume.nrrd', device=device) # # Load the surface slice and get the difference # blockface_surf = io.LoadITKFile(f'{blockface_dir}volumes/raw/surface/IMG_{bf_slice:03d}_surface.mhd', # device=device) # # blockface_surf_p1 = io.LoadITKFile(f'{blockface_dir}volumes/raw/surface/IMG_{bf_slice + 1:03d}_surface.mhd', # device=device) # # diff = (blockface_surf - blockface_surf_p1).data[2] # # diff = (diff - diff.min()) / (diff.max() - diff.min()) # Extract the slice blockface_seg = blockface_seg.extract_slice(bf_slice - 1, dim=0) aff_seg, affine = solve_affine(histology_seg, blockface_seg, img_num, out_dir=out_dir, device=device) np.savetxt(f'{out_dir}/img_{img_num}_affine_to_blockface.txt', affine.cpu().numpy()) #### Apply the affine to the image mic_file = f'{histology_dir}hdf5/{block}_img{img_num}_image.hdf5' meta_dict = {} with h5py.File(mic_file, 'r') as f: mic = f['RawImage/ImageData'][:, ::10, ::10] for key in f['RawImage'].attrs: meta_dict[key] = f['RawImage'].attrs[key] mic = core.StructuredGrid(mic.shape[1:], tensor=torch.tensor(mic, dtype=torch.float32, device=device), spacing=torch.tensor([10.0, 10.0], dtype=torch.float32, device=device), origin=histology_seg.origin, device=device, dtype=torch.float32, channels=3) mic = (mic - mic.min()) / (mic.max() - mic.min()) aff_mic = so.AffineTransform.Create(affine=affine)(mic, blockface_seg) # plt.figure() # plt.imshow(aff_mic.data.permute(1,2,0).cpu()) # plt.axis('off') # plt.gca().invert_yaxis() # plt.savefig(f'/home/sci/blakez/ucair/Animations/Scrolls/Mic/Images/{blockface_slice}_image.png', dpi=500, bbox_inches='tight', pad_inches=0) def_histology, deformation = deformable_histology_to_blockface( aff_seg, blockface_seg, steps=[0.01, 0.005], scales=[4, 1], gauss=True, mic=aff_mic) # Save out the deformation io.SaveITKFile(deformation, f'{out_dir}/img_{img_num}_deformation_to_blockface.mhd')
def solve_affines(block_list): for block_path in block_list: block = block_path.split('/')[-1] if not os.path.exists(f'{block_path}/volumes/raw/'): os.makedirs(f'{block_path}/volumes/raw/difference/') os.makedirs(f'{block_path}/volumes/raw/surface/') os.makedirs(f'{block_path}/volumes/raw/scatter/') # elif sorted(glob.glob(f'{block_path}/volumes/raw/difference/*')): # print('alread filtered') # continue image_list = sorted(glob.glob(f'{block_path}/images/filtered/*')) if not image_list: print(f'No filtered image files found for {block} ... skipping') continue print(f'Solving Affines for {block} ... ') spacing = list( map(float, input(f'X,Y Spacing for {block}: ').strip().split(' '))) # spacing = [0.0163, 0.0163] surface_list = [x for x in image_list if 'surface' in x] scatter_list = [x for x in image_list if 'scatter' in x] ImScatter = io.LoadITKFile(scatter_list[0], device=device) ImScatter.set_spacing_(spacing) ImScatter.set_origin_(-1 * (ImScatter.size * ImScatter.spacing) / 2) ImScatter /= ImScatter.max() # Load the surface image ImSurface = io.LoadITKFile(surface_list[0], device=device) ImSurface.set_spacing_(spacing) ImSurface.set_origin_(-1 * (ImSurface.size * ImSurface.spacing) / 2) ImSurface /= ImSurface.max() # Save out the first image difference = ImScatter - ImSurface ImDifference = core.StructuredGrid( ImSurface.shape()[1:], tensor=difference.data[2].unsqueeze(0), spacing=ImSurface.spacing, origin=ImSurface.origin, device=device, dtype=torch.float32, channels=1) io.SaveITKFile( ImScatter, f'{block_path}/volumes/raw/scatter/IMG_001_scatter.mhd') io.SaveITKFile( ImSurface, f'{block_path}/volumes/raw/surface/IMG_001_surface.mhd') io.SaveITKFile( ImDifference, f'{block_path}/volumes/raw/difference/IMG_001_difference.mhd') ImPrev = ImScatter.copy() Adict = { 'origin': ImPrev.origin.tolist(), 'spacing': ImPrev.spacing.tolist(), scatter_list.index(scatter_list[0]): np.eye(3).tolist() } maxIter = 1000 for scat, surf in zip(scatter_list[1:], surface_list[1:]): print(f'Registering {scat.split("/")[-1]} .... ') sys.stdout.flush() image_num = scat.split('/')[-1].split('_')[1] # Get the number the file is from the start dist = scatter_list.index(scat) # Load the next image ImScatter = io.LoadITKFile(scat, device=device) ImScatter.set_spacing_(ImPrev.spacing) ImScatter.set_origin_(ImPrev.origin) ImScatter /= ImScatter.max() ImSurface = io.LoadITKFile(surf, device=device) ImSurface.set_spacing_(ImPrev.spacing) ImSurface.set_origin_(ImPrev.origin) ImSurface /= ImSurface.max() difference = ImScatter - ImSurface ImDifference = core.StructuredGrid( ImSurface.shape()[1:], tensor=difference.data[2].unsqueeze(0), spacing=ImSurface.spacing, origin=ImSurface.origin, device=device, dtype=torch.float32, channels=1) affine = affine_register(ImPrev.copy(), ImSurface.copy(), niter=maxIter, device=device) # Save out the images aff_filter = so.AffineTransform.Create(affine=affine, device=device) aff_scatter = aff_filter(ImScatter) aff_surface = aff_filter(ImSurface) aff_difference = aff_filter(ImDifference) # difference = (difference - difference.min()) / (difference.max() - difference.min()) io.SaveITKFile( aff_scatter, f'{block_path}/volumes/raw/scatter/IMG_{image_num}_scatter.mhd' ) io.SaveITKFile( aff_surface, f'{block_path}/volumes/raw/surface/IMG_{image_num}_surface.mhd' ) io.SaveITKFile( aff_difference, f'{block_path}/volumes/raw/difference/IMG_{image_num}_difference.mhd' ) Adict[dist] = affine.detach().cpu().clone().tolist() ImPrev = aff_scatter.copy() with open(f'{block_path}/volumes/raw/{block}_affine_dictionary.yaml', 'w') as f: yaml.dump(Adict, f) _make_volumes(block_path)
def stitch_surfaces(rabbit): rabbit_dir = f'/hdscratch/ucair/{rabbit}/blockface/' raw_ext = '/surfaces/raw/' vol_ext = '/volumes/raw/' # Get a list of the blocks block_list = sorted(glob.glob(f'{rabbit_dir}block*')) # complete = ['block08'] complete = ['block06', 'block09'] for i, block_path in enumerate(block_list): block = block_path.split('/')[-1] stitching_dir = f'{rabbit_dir}{block}{raw_ext}/stitching/' if block in complete: continue if not os.path.exists(stitching_dir): print(f'No stitching surfaces found for {block}.') continue target_surface_path = f'{stitching_dir}/raw/{block}_target_faces.obj' source_surface_path = f'{stitching_dir}/raw/{block}_source_faces.obj' # Load the target surface try: verts, faces = io.ReadOBJ(target_surface_path) tar_surface = core.TriangleMesh(verts, faces) tar_surface.to_(device) except IOError: print( f'The target stitching surface for {block} was not found ... skipping' ) continue try: verts, faces = io.ReadOBJ(source_surface_path) src_surface = core.TriangleMesh(verts, faces) src_surface.to_(device) src_surface.flip_normals_() except IOError: print( f'The source stitching surface for {block} was not found ... skipping' ) continue # Need to load the exterior to drag along try: verts, faces = io.ReadOBJ( f'{rabbit_dir}/{block}/{raw_ext}/{block}_decimate.obj') surface_ext = core.TriangleMesh(verts, faces) surface_ext.to_(device) except IOError: print( f'The source stitching surface for {block} was not found ... skipping' ) continue # Determine the surface half way between the source and the target try: with open( f'{stitching_dir}/raw/{block}_middle_surface_config.yaml', 'r') as f: params = yaml.load(f, Loader=yaml.FullLoader) except IOError: params = { 'currents_sigma': [5.0, 0.25, 0.05], 'propagation_sigma': [1.0, 1.0, 1.0], 'deformable_lr': [0.0008, 0.01, 0.01], 'converge': 0.05, 'mid_offset': 0.5 } # Do the deformable registration def_src_surface, def_ext = tools.deformable_register( tar_surface.copy(), src_surface.copy(), src_excess=None, deformable_lr=params['deformable_lr'], currents_sigma=params['currents_sigma'], prop_sigma=params['propagation_sigma'], grid_size=None, converge=params['converge'], accu_forward=False, accu_inverse=False, device=device, ) new_verts = src_surface.vertices.clone() + ( (def_src_surface.vertices - src_surface.vertices) * params['mid_offset']) mid_surface = src_surface.copy() mid_surface.vertices = new_verts.clone() mid_surface.calc_normals() mid_surface.calc_centers() io.WriteOBJ( def_src_surface.vertices, def_src_surface.indices, f'{stitching_dir}/deformable_pieces/{block}_source_faces.obj') io.WriteOBJ( mid_surface.vertices, mid_surface.indices, f'{stitching_dir}/deformable_pieces/{block}_source_middle.obj') with open(f'{stitching_dir}/raw/{block}_middle_surface_config.yaml', 'w') as f: yaml.dump(params, f) # Load the binary volume for the block mask = io.LoadITKFile( f'{rabbit_dir}/{block}/{vol_ext}/segmentation_volume.mhd', device='cuda:0') # Load the other surfaces to drag along extras_paths = [ f'{rabbit_dir}{block}{raw_ext}{block}_decimate.obj', f'{rabbit_dir}{block}{raw_ext}{block}_ext.obj' ] if os.path.exists(f'{rabbit_dir}{block}{raw_ext}{block}_head.obj'): extras_paths += [f'{rabbit_dir}{block}{raw_ext}{block}_head.obj'] if os.path.exists(f'{rabbit_dir}{block}{raw_ext}{block}_foot.obj'): extras_paths += [f'{rabbit_dir}{block}{raw_ext}{block}_foot.obj'] if os.path.exists( f'{rabbit_dir}{block}{raw_ext}{block}_head_support.obj'): extras_paths += [ f'{rabbit_dir}{block}{raw_ext}{block}_head_support.obj' ] if os.path.exists( f'{rabbit_dir}{block}{raw_ext}{block}_foot_support.obj'): extras_paths += [ f'{rabbit_dir}{block}{raw_ext}{block}_foot_support.obj' ] extra_surfaces = [] for path in extras_paths: try: verts, faces = io.ReadOBJ(path) except IOError: extra_name = path.split('/')[-1] print( f'{extra_name} not found as an extra ... removing from list' ) _ = extras_paths.pop(extras_paths.index(path)) continue extra_surfaces += [core.TriangleMesh(verts, faces)] extra_surfaces[-1].to_(device) # Define the diffusion parameters for the registration try: with open(f'{stitching_dir}/raw/{block}_diffusion_config.yaml', 'r') as f: diff_params = yaml.load(f, Loader=yaml.FullLoader) except IOError: diff_params = { 'z_diff_c': 5.0, 'y_diff_c': 200.0, 'x_diff_c': 30.0, 'background_c': 0.5, 'niter': 10000, 'gamma': 0.0005, 'propegation_sigma': [0.2, 0.2, 0.2], 'grad_amp': 300.0 } try: with open( f'{stitching_dir}/raw/{block}_stitch_surface_config.yaml', 'r') as f: params = yaml.load(f, Loader=yaml.FullLoader) except IOError: params = { 'currents_sigma': [2.0, 1.0], 'smoothing_sigma': [2.0, 2.0, 2.0], 'deformable_lr': [0.0001, 0.001], 'grid_size': [40, 256, 256], 'converge': 0.05, 'grid_device': 'cpu', 'niters': 25 } # Do the deformable registration with the source to the mid def_src_surface, def_tar_surface, def_extra_surfaces, phi, phi_inv = deformable_register( tar_surface.copy(), src_surface.copy(), mid_surface.copy(), src_excess=extra_surfaces, deformable_lr=params['deformable_lr'], currents_sigma=params['currents_sigma'], prop_sigma=params['smoothing_sigma'], grid_size=params['grid_size'], converge=params['converge'], accu_forward=True, accu_inverse=True, grid_device=params['grid_device'], device=device, mask=mask, diff_params=diff_params, iters=params['niters']) with open(f'{stitching_dir}/raw/{block}_stitch_surface_config.yaml', 'w') as f: yaml.dump(params, f) with open(f'{stitching_dir}/raw/{block}_diffusion_config.yaml', 'w') as f: yaml.dump(diff_params, f) io.WriteOBJ( def_src_surface.vertices, def_src_surface.indices, f'{stitching_dir}/deformable/{block}_whole_stitched_decimate.obj') io.SaveITKFile(phi, f'{stitching_dir}/deformable/{block}_stitch_phi.mhd') io.SaveITKFile( phi_inv, f'{stitching_dir}/deformable/{block}_stitch_phi_inv.mhd') out_path = f'{stitching_dir}/deformable/' for extra_path, extra_surface in zip(extras_paths, def_extra_surfaces): name = extra_path.split('/')[-1] io.WriteOBJ(extra_surface.vertices, extra_surface.indices, f'{out_path}{name}') vol = io.LoadITKFile( '/hdscratch/ucair/18_047/blockface/block08/volumes/raw/difference_volume.mhd', device='cuda:0') # phi_inv.set_size((60, 1024, 1024)) # phi.set_size((60, 1024, 1024)) # resampled_stitched = so.ApplyGrid.Create(phi_inv, device='cuda:0')(vol, phi_inv) # resampled_unstitched = so.ApplyGrid.Create(phi, device='cuda:0')(resampled_stitched, phi) # io.SaveITKFile(resampled_stitched, '/home/sci/blakez/stitched_block08.mhd') # io.SaveITKFile(resampled_unstitched, '/home/sci/blakez/unstitched_block08.mhd') print(f'Done stitching {block} ... ')
def LoadDICOM(dicomDirectory, device): '''Takes a directory that contains DICOM files and returns a PyCA Image3D dicomDirectory = Full path to the folder with the dicoms Returns an Image3D in the Reference Coordiante System (RCS) ''' # Read the DICOM files in the directory if type(dicomDirectory) == list: dicoms = dicomDirectory else: dicoms = dc.read_dicom_directory(dicomDirectory) # Sort the loaded dicoms sort_dcms = dc.sort_dicoms(dicoms) acqList = [sort_dcms[x].AcquisitionNumber for x in range(len(sort_dcms))] # Need to account for if there are multiple acquisitions in a single folder if np.min(acqList) != np.max(acqList): volList = [] for acq in range(np.max(acqList)): dcm_list = [x for x in sort_dcms if x.AcquisitionNumber == (acq + 1)] volList += [LoadDICOM(dcm_list, device)] return volList # Extract the actual volume of pixels pixel_vol = dc.get_volume_pixeldata(sort_dcms) # Generate the affine from the dicom headers (THIS CODE WAS MODIFIED FROM dicom2nifti) affine, spacing, pp = dc.create_affine(sort_dcms) if type(affine) == str: return 'Not A Volume' # Convert the dicom volume to an Image3D - numpy is X, Y, Z and CAMP is Z, Y, X rawDicom = core.StructuredGrid( size=pixel_vol.shape, origin=[0.0, 0.0, 0.0], tensor=torch.tensor(pixel_vol.astype(np.float)).unsqueeze(0), device=device, channels=1 ) rcs_grid = SolveAffineGrid(rawDicom, affine) wcsTrans = np.eye(4) if pp == 'HFS': wcsTrans[0, 0] *= -1 wcsTrans[1, 1] *= -1 if pp == 'FFS': wcsTrans[1, 1] *= -1 wcsTrans[2, 2] *= -1 if pp == 'FFP': wcsTrans[0, 0] *= -1 wcsTrans[2, 2] *= -1 world_grid = SolveAffineGrid(rcs_grid, wcsTrans) rcs_grid = so.AffineTransform.Create(affine=torch.tensor(affine, dtype=torch.float, device=device))( rawDicom, rcs_grid, xyz_affine=False) world_grid = so.AffineTransform.Create(affine=torch.tensor(wcsTrans, dtype=torch.float, device=device))( rcs_grid, world_grid, xyz_affine=False) return world_grid
def process_mic(rabbit): raw_mic_dir = f'/hdscratch/ucair/{rabbit}/microscopic/' bf_dir = f'/hdscratch/ucair/{rabbit}/blockface/' raw_bf_dir = f'/hdscratch/ucair/blockface/{rabbit}/' from Histology.NNSeg.models import UNet # from types import SimpleNamespace import torch.nn as nn from skimage import color model = UNet.UNet(6, 3) # saved_dict = SimpleNamespace(**torch.load(f'./Histology/NNSeg/model_weights/epoch_00230_model.pth')) # if opt.cuda: device = torch.device('cuda') model = model.to(device=device) model = nn.DataParallel(model) params = torch.load('./NNSeg/model_weights/epoch_00230_model.pth') model.load_state_dict(params['state_dict']) # else: # device = torch.device('cpu') # params = torch.load(f'./Histology/NNSeg/model_weights/epoch_00230_model.pth', map_location='cpu') # model.load_state_dict(params['state_dict']) block_list = sorted(glob.glob(f'{raw_mic_dir}/block*')) for block_path in block_list: block = block_path.split('/')[-1] # Make sure that the hdf5 files have been written if not os.path.exists(f'{raw_mic_dir}{block}/hdf5/'): os.makedirs(f'{raw_mic_dir}{block}/hdf5/') convert_hdf5(block_path, raw_mic_dir) mic_list = sorted(glob.glob(f'{block_path}/raw/*_image.tif')) # if mic_list == []: mic_list += sorted(glob.glob(f'{block_path}/raw/*_image.jpg')) img_nums = [x.split('/')[-1].split('_')[1] for x in mic_list] for img in img_nums: if os.path.exists( f'{raw_mic_dir}{block}/segmentations/IMG_{img}/img_{img}_gmm_segmentation.nii.gz' ): continue mic_file = f'{raw_mic_dir}{block}/hdf5/{block}_img{img}_image.hdf5' meta_dict = {} with h5py.File(mic_file, 'r') as f: mic = f['RawImage/ImageData'][:, ::10, ::10] for key in f['RawImage'].attrs: meta_dict[key] = f['RawImage'].attrs[key] mic = core.StructuredGrid(mic.shape[1:], tensor=torch.tensor(mic, dtype=torch.float32, device=device), spacing=torch.tensor([10.0, 10.0], dtype=torch.float32, device=device), device='cpu', dtype=torch.float32, channels=3) mic = (mic - mic.min()) / (mic.max() - mic.min()) plt.figure() plt.imshow(mic.data.permute(1, 2, 0).cpu()) plt.title('Microscopic Image') plt.show() plt.pause(1.0) satisfied = False while not satisfied: n_comps = int( input("Enter the number of components for segmentation: ")) print('Clustering ... ', end='') cluster = cluster_image(mic, n_comps) print('done') plt.figure() plt.imshow(cluster) plt.title('GMM Cluster') plt.colorbar() plt.show() plt.pause(1.0) redo_cluster = input( "Are you satisfied with the clustering? [y/n]: ") if redo_cluster == 'y': satisfied = True input_hsv = torch.from_numpy( color.rgb2hsv(mic.data.squeeze().permute(1, 2, 0))).permute(2, 0, 1) inputs = torch.cat([mic.data.squeeze(), input_hsv], dim=0) pred = model(inputs.unsqueeze(0)).cpu().detach().squeeze() seg = torch.zeros((pred.shape[1], pred.shape[2])) for i in range(0, pred.shape[0]): label = pred[i] seg[label > 0] = i # Save out the original image and the segmentation out_path = f'{raw_mic_dir}{block}/segmentations/IMG_{img}/' if not os.path.exists(out_path): os.makedirs(out_path) io.SaveITKFile(mic, f'{out_path}/img_{img}_color.nii.gz') io.SaveITKFile( core.StructuredGrid.FromGrid(mic, mic.data[0].unsqueeze(0)), f'{out_path}/img_{img}_red.nii.gz') io.SaveITKFile( core.StructuredGrid.FromGrid(mic, mic.data[1].unsqueeze(0)), f'{out_path}/img_{img}_green.nii.gz') io.SaveITKFile( core.StructuredGrid.FromGrid(mic, mic.data[2].unsqueeze(0)), f'{out_path}/img_{img}_blue.nii.gz') io.SaveITKFile( core.StructuredGrid.FromGrid( mic, torch.tensor(cluster).unsqueeze(0)), f'{out_path}/img_{img}_gmm_segmentation.nii.gz') io.SaveITKFile(core.StructuredGrid.FromGrid(mic, seg.unsqueeze(0)), f'{out_path}/img_nn_seg.nii.gz') plt.close('all') print(f'Done with {img}')
def register(rabbit, base_dir='/hdscratch/ucair/'): source_path = f'{base_dir}{rabbit}/mri/exvivo/surfaces/raw/' target_path = f'{base_dir}{rabbit}/mri/invivo/surfaces/raw/' source_file = f'{source_path}exvivo_ablation_region_decimate.obj' target_file = f'{target_path}invivo_ablation_region_decimate.obj' verts, faces = io.ReadOBJ(target_file) invivo_surface = core.TriangleMesh(verts, faces) invivo_surface.to_(device=device) verts, faces = io.ReadOBJ(source_file) exvivo_surface = core.TriangleMesh(verts, faces) exvivo_surface.to_(device=device) print('Starting Affine ... ') # Load or create the dictionary for registration try: with open(f'{source_path}affine_config.yaml', 'r') as f: params = yaml.load(f, Loader=yaml.FullLoader) except IOError: params = { 'spatial_sigma': [15.0], 'affine_lr': 1.0e-07, 'translation_lr': 1.0e-06, 'converge': 4.0 } try: aff = np.loadtxt(f'{source_path}exvivo_to_invivo_affine.txt') aff = torch.tensor(aff, device=device) except IOError: aff = affine(invivo_surface.copy(), exvivo_surface.copy(), affine_lr=params['affine_lr'], translation_lr=params['translation_lr'], converge=params['converge'], spatial_sigma=params['spatial_sigma'], device=device) # Save out the parameters: with open(f'{source_path}affine_config.yaml', 'w') as f: yaml.dump(params, f) np.savetxt(f'{source_path}exvivo_to_invivo_affine.txt', aff.cpu().numpy()) aff_tfrom = uo.AffineTransformSurface.Create(aff, device=device) aff_exvivo = aff_tfrom(exvivo_surface) if not os.path.exists(f'{source_path}../affine/'): os.makedirs(f'{source_path}../affine/') io.WriteOBJ( aff_exvivo.vertices, aff_exvivo.indices, f'{source_path}../affine/exvivo_to_invivo_{rabbit}_affine.obj') print('Starting Deformable ... ') try: with open(f'{source_path}/deformable_config.yaml', 'r') as f: params = yaml.load(f, Loader=yaml.FullLoader) except IOError: params = { 'currents_sigma': [5.0, 1.0], 'propagation_sigma': [8.0, 8.0, 8.0], 'deformable_lr': [2.0e-04, 1.0e-04], 'converge': 4.0, 'phi_inv_size': [32, 32, 32], 'n_iters': 500, } def_surface, _, phi, phi_inv = tools.deformable_register( invivo_surface.copy(), aff_exvivo.copy(), src_excess=None, deformable_lr=params['deformable_lr'], currents_sigma=params['currents_sigma'], prop_sigma=params['propagation_sigma'], converge=params['converge'], grid_size=params['phi_inv_size'], accu_forward=True, accu_inverse=True, device=device, grid_device='cuda:0', expansion_factor=1.5, iters=params['n_iters']) # Save out the parameters: with open(f'{source_path}deformable_config.yaml', 'w') as f: yaml.dump(params, f) if not os.path.exists(f'{source_path}../../volumes/raw//'): os.makedirs(f'{source_path}../../volumes/raw//') if not os.path.exists(f'{source_path}../deformable/'): os.makedirs(f'{source_path}../deformable/') io.SaveITKFile( phi_inv, f'{source_path}../../volumes/raw/exvivo_to_invivo_phi_inv.mhd') io.SaveITKFile(phi, f'{source_path}../../volumes/raw/exvivo_to_invivo_phi.mhd') io.WriteOBJ(def_surface.vertices, def_surface.indices, f'{source_path}../deformable/exvivo_to_invivo_deformable.obj')