def read_pixel_size(params): ''' Read the pixel size and magnification from the DXchange file. Use to compute the effective pixel size. ''' log.info(' *** auto pixel size reading') if params.pixel_size_auto != True: log.info(' *** *** OFF') return params pixel_size = config.param_from_dxchange( params.file_name, '/measurement/instrument/detector/pixel_size_x') mag = config.param_from_dxchange( params.file_name, '/measurement/instrument/detection_system/objective/magnification') #Handle case where something wasn't read right if not (pixel_size and mag): log.warning(' *** *** problem reading pixel size from DXchange') return params #What if pixel size isn't in microns, but in mm or m? for i in range(3): if pixel_size < 0.5: pixel_size *= 1e3 else: break params.pixel_size = pixel_size / mag log.info(' *** *** effective pixel size = {:6.4e} microns'.format( params.pixel_size)) return params
def padding(data, rotation_axis, params): log.info(" *** padding") if ((params.reconstruction_algorithm == 'gridrec' and params.gridrec_padding) or (params.reconstruction_algorithm == 'lprec_fbp' and params.lprec_fbp_padding)): #if(params.padding): log.info(' *** *** ON') N = data.shape[2] data_pad = np.zeros([data.shape[0], data.shape[1], 3 * N // 2], dtype="float32") data_pad[:, :, N // 4:5 * N // 4] = data data_pad[:, :, 0:N // 4] = np.reshape(data[:, :, 0], [data.shape[0], data.shape[1], 1]) data_pad[:, :, 5 * N // 4:] = np.reshape(data[:, :, -1], [data.shape[0], data.shape[1], 1]) data = data_pad rot_center = rotation_axis + N // 4 else: log.warning(' *** *** OFF') data = data rot_center = rotation_axis return data, rot_center
def all(proj, flat, dark, params, sino): # zinger_removal proj, flat = zinger_removal(proj, flat, params) if (params.dark_zero): dark *= 0 log.warning(' *** *** dark fields are ignored') # normalize data = flat_correction(proj, flat, dark, params) # remove stripes data = remove_stripe(data, params) # Perform beam hardening. This leaves the data in pathlength. if params.beam_hardening_method == 'standard': data = beamhardening_correct(data, params, sino) else: # phase retrieval data = phase_retrieval(data, params) # minus log data = minus_log(data, params) # remove outlier data = remove_nan_neg_inf(data, params) return data
def padding(data, rotation_axis, params): log.info(" *** padding") if (params.padding): log.info(' *** *** ON') N = data.shape[2] data_pad = np.zeros([data.shape[0], data.shape[1], 3 * N // 2], dtype="float32") data_pad[:, :, N // 4:5 * N // 4] = data data_pad[:, :, 0:N // 4] = np.reshape(data[:, :, 0], [data.shape[0], data.shape[1], 1]) data_pad[:, :, 5 * N // 4:] = np.reshape(data[:, :, -1], [data.shape[0], data.shape[1], 1]) data = data_pad rot_center = rotation_axis + N // 4 else: log.warning(' *** *** OFF') data = data rot_center = rotation_axis return data, rot_center
def remove_stripe(data, params): log.info(' *** remove stripe:') if (params.remove_stripe_method == 'fw'): log.info(' *** *** fourier wavelet') data = tomopy.remove_stripe_fw(data, level=params.fw_level, wname=params.fw_filter, sigma=params.fw_sigma, pad=params.fw_pad) log.info(' *** *** *** fw level %d ' % params.fw_level) log.info(' *** *** *** fw wname %s ' % params.fw_filter) log.info(' *** *** *** fw sigma %f ' % params.fw_sigma) log.info(' *** *** *** fw pad %r ' % params.fw_pad) elif (params.remove_stripe_method == 'ti'): log.info(' *** *** titarenko') data = tomopy.remove_stripe_ti(data, nblock=params.ti_nblock, alpha=params.ti_alpha) log.info(' *** *** *** ti nblock %d ' % params.ti_nblock) log.info(' *** *** *** ti alpha %f ' % params.ti_alpha) elif (params.remove_stripe_method == 'sf'): log.info(' *** *** smoothing filter') data = tomopy.remove_stripe_sf(data, size == params.sf_size) log.info(' *** *** *** sf size %d ' % params.sf_size) elif (params.remove_stripe_method == 'none'): log.warning(' *** *** OFF') return data
def minus_log(data, params): log.info(" *** minus log") if (params.minus_log): log.info(' *** *** ON') data = tomopy.minus_log(data) else: log.warning(' *** *** OFF') return data
def unpadding(rec, N, params): log.info(" *** un-padding") if (params.padding): log.info(' *** *** ON') rec = rec[:, N // 4:5 * N // 4, N // 4:5 * N // 4] else: log.warning(' *** *** OFF') rec = rec return rec
def binning(proj, flat, dark, params): log.info(" *** binning") if (params.binning == 0): log.info(' *** *** OFF') else: log.warning(' *** *** ON') log.warning(' *** *** binning: %d' % params.binning) proj = _binning(proj, params) flat = _binning(flat, params) dark = _binning(dark, params) return proj, flat, dark
def log_values(args): """Log all values set in the args namespace. Arguments are grouped according to their section and logged alphabetically using the DEBUG log level thus --verbose is required. """ args = args.__dict__ log.warning('tomopy-cli status start') for section, name in zip(SECTIONS, NICE_NAMES): entries = sorted((k for k in args.keys() if k.replace('_', '-') in SECTIONS[section])) # print('log_values', section, name, entries) if entries: log.info(name) for entry in entries: value = args[entry] if args[entry] is not None else "-" if (value == 'none'): log.warning(" {:<16} {}".format(entry, value)) elif (value is not False): log.info(" {:<16} {}".format(entry, value)) elif (value is False): log.warning(" {:<16} {}".format(entry, value)) log.warning('tomopy-cli status end')
def unpadding(rec, N, params): log.info(" *** un-padding") if ((params.reconstruction_algorithm == 'gridrec' and params.gridrec_padding) or (params.reconstruction_algorithm == 'lprec_fbp' and params.lprec_fbp_padding)): #if(params.padding): log.info(' *** *** ON') rec = rec[:, N // 4:5 * N // 4, N // 4:5 * N // 4] else: log.warning(' *** *** OFF') rec = rec return rec
def read_rot_center(params): """ Read the rotation center from /process group in the DXchange file. Return: rotation center from this dataset or None if it doesn't exist. """ log.info(' *** *** rotation axis') #First, try to read from the /process/tomopy-cli parameters with h5py.File(params.file_name, 'r') as file_name: try: dataset = '/process' + '/tomopy-cli-' + __version__ + '/' + 'find-rotation-axis' + '/' + 'rotation-axis' params.rotation_axis = float(file_name[dataset][0]) log.info( ' *** *** Rotation center read from HDF5 file: {0:f}'.format( params.rotation_axis)) return params except (KeyError, ValueError): log.warning(' *** *** No rotation center stored in the HDF5 file') #If we get here, we need to either find it automatically or from config file. log.warning(' *** *** No rotation axis stored in DXchange file') if (params.rotation_axis_auto == True): log.warning(' *** *** Auto axis location requested') log.warning(' *** *** Computing rotation axis') params.rotation_axis = find_center.find_rotation_axis(params) log.info(' *** *** using config file value of {:f}'.format( params.rotation_axis)) return params
def remove_nan_neg_inf(data, params): log.info(' *** remove nan, neg and inf') if (params.fix_nan_and_inf == True): log.info(' *** *** ON') log.info(' *** *** replacement value %f ' % params.fix_nan_and_inf_value) data = tomopy.remove_nan(data, val=params.fix_nan_and_inf_value) data = tomopy.remove_neg(data, val=params.fix_nan_and_inf_value) data[np.where(data == np.inf)] = params.fix_nan_and_inf_value else: log.warning(' *** *** OFF') return data
def reconstruct(data, theta, rot_center, params): if (params.reconstruction_type == "try"): sinogram_order = True else: sinogram_order = False log.info(" *** algorithm: %s" % params.reconstruction_algorithm) if params.reconstruction_algorithm == 'astrasirt': extra_options = {'MinConstraint': 0} options = { 'proj_type': 'cuda', 'method': 'SIRT_CUDA', 'num_iter': 200, 'extra_options': extra_options } shift = (int((data.shape[2] / 2 - rot_center) + .5)) data = np.roll(data, shift, axis=2) rec = tomopy.recon(data, theta, algorithm=tomopy.astra, options=options) elif params.reconstruction_algorithm == 'astracgls': extra_options = {'MinConstraint': 0} options = { 'proj_type': 'cuda', 'method': 'CGLS_CUDA', 'num_iter': 15, 'extra_options': extra_options } shift = (int((data.shape[2] / 2 - rot_center) + .5)) data = np.roll(data, shift, axis=2) rec = tomopy.recon(data, theta, algorithm=tomopy.astra, options=options) elif params.reconstruction_algorithm == 'gridrec': log.warning(" *** *** sinogram_order: %s" % sinogram_order) rec = tomopy.recon(data, theta, center=rot_center, sinogram_order=sinogram_order, algorithm=params.reconstruction_algorithm, filter_name=params.filter) else: log.warning(" *** *** algorithm: %s is not supported yet" % params.reconstruction_algorithm) params.reconstruction_algorithm = 'gridrec' log.warning(" *** *** using: %s instead" % params.reconstruction_algorithm) log.warning(" *** *** sinogram_order: %s" % sinogram_order) rec = tomopy.recon(data, theta, center=rot_center, sinogram_order=sinogram_order, algorithm=params.reconstruction_algorithm, filter_name=params.filter) return rec
def blocked_view(proj, theta, params): log.info(" *** correcting for blocked view data collection") if params.blocked_views: log.warning(' *** *** ON') miss_angles = [params.missing_angles_start, params.missing_angles_end] # Manage the missing angles: proj = np.concatenate( (proj[0:miss_angles[0], :, :], proj[miss_angles[1] + 1:-1, :, :]), axis=0) theta = np.concatenate( (theta[0:miss_angles[0]], theta[miss_angles[1] + 1:-1])) else: log.warning(' *** *** OFF') return proj, theta
def flat_correction(proj, flat, dark, params): log.info(' *** normalization') if (params.flat_correction_method == 'standard'): data = tomopy.normalize(proj, flat, dark, cutoff=params.normalization_cutoff) log.info(' *** *** ON %f cut-off' % params.normalization_cutoff) elif (params.flat_correction_method == 'air'): data = tomopy.normalize_bg(proj, air=params.air) log.info(' *** *** air %d pixels' % params.air) elif (params.flat_correction_method == 'none'): data = proj log.warning(' *** *** normalization is turned off') return data
def mask(data, params): log.info(" *** mask") if (params.reconstruction_mask): log.info(' *** *** ON') if 0 < params.reconstruction_mask_ratio <= 1: log.warning(" *** mask ratio: %f " % params.reconstruction_mask_ratio) data = tomopy.circ_mask(data, axis=0, ratio=params.reconstruction_mask_ratio) else: log.error(" *** mask ratio must be between 0-1: %f is ignored" % params.reconstruction_mask_ratio) else: log.warning(' *** *** OFF') return data
def phase_retrieval(data, params): log.info(" *** retrieve phase") if (params.retrieve_phase_method == 'paganin'): log.info(' *** *** paganin') log.info(" *** *** pixel size: %s" % params.pixel_size) log.info(" *** *** sample detector distance: %s" % params.propagation_distance) log.info(" *** *** energy: %s" % params.energy) log.info(" *** *** alpha: %s" % params.retrieve_phase_alpha) data = tomopy.retrieve_phase(data, pixel_size=(params.pixel_size * 1e-4), dist=(params.propagation_distance / 10.0), energy=params.energy, alpha=params.retrieve_phase_alpha, pad=True) elif (params.retrieve_phase_method == 'none'): log.warning(' *** *** OFF') return data
def all(proj, flat, dark, params): # zinger_removal proj, flat = zinger_removal(proj, flat, params) if (params.dark_zero): dark *= 0 log.warning(' *** *** dark fields are ignored') # normalize data = flat_correction(proj, flat, dark, params) # remove stripes data = remove_stripe(data, params) # phase retrieval data = phase_retrieval(data, params) # minus log data = minus_log(data, params) # remove outlier data = remove_nan_neg_inf(data, params) return data
def write_hdf(config_file, args=None, sections=None): """ Write in the hdf raw data file the content of *config_file* with values from *args* if they are specified, otherwise use the defaults. If *sections* are specified, write values from *args* only to those sections, use the defaults on the remaining ones. """ if not args.dx_update: log.warning(" *** Not saving log data to the projection HDF file.") return with h5py.File(args.file_name,'r+') as hdf_file: #If the group we will write to already exists, remove it if hdf_file.get('/process/tomopy-cli-' + __version__): del(hdf_file['/process/tomopy-cli-' + __version__]) #dt = h5py.string_dtype(encoding='ascii') log.info(" *** tomopy.conf parameter written to /process%s in file %s " % (__version__, args.file_name)) config = configparser.ConfigParser() for section in SECTIONS: config.add_section(section) for name, opts in SECTIONS[section].items(): if args and sections and section in sections and hasattr(args, name.replace('-', '_')): value = getattr(args, name.replace('-', '_')) if isinstance(value, list): # print(type(value), value) value = ', '.join(value) else: value = opts['default'] if opts['default'] is not None else '' prefix = '# ' if value is '' else '' if name != 'config': dataset = '/process' + '/tomopy-cli-' + __version__ + '/' + section + '/'+ name dset_length = len(str(value)) * 2 if len(str(value)) > 5 else 10 dt = 'S{0:d}'.format(dset_length) hdf_file.require_dataset(dataset, shape=(1,), dtype=dt) log.info(name + ': ' + str(value)) try: hdf_file[dataset][0] = np.string_(str(value)) except TypeError: print(value) raise TypeError
def zinger_removal(proj, flat, params): log.info(" *** zinger removal") if (params.zinger_removal_method == 'standard'): log.info(' *** *** ON') log.info(" *** *** zinger level projections: %d" % params.zinger_level_projections) log.info(" *** *** zinger level white: %s" % params.zinger_level_white) log.info(" *** *** zinger_size: %d" % params.zinger_size) proj = tomopy.misc.corr.remove_outlier(proj, params.zinger_level_projections, size=params.zinger_size, axis=0) flat = tomopy.misc.corr.remove_outlier(flat, params.zinger_level_white, size=params.zinger_size, axis=0) elif (params.zinger_removal_method == 'none'): log.warning(' *** *** OFF') return proj, flat
def segment(params): # slice/full reconstruction file location tail = os.sep + os.path.splitext(os.path.basename( params.hdf_file))[0] + '_rec' + os.sep top = os.path.dirname(params.hdf_file) + '_rec' + tail # log.info(os.listdir(top)) if os.path.isdir(top): rec_file_list = list( filter(lambda x: x.endswith(('.tiff', '.tif')), os.listdir(top))) rec_file_list.sort() log.info('found in %s' % top) log.info('files %s' % rec_file_list) log.info('applying segmentation') log.warning('not implemented') else: log.error("ERROR: the directory %s does not exist" % top) log.error("ERROR: to create one run a full reconstruction first:") log.error( "ERROR: $ tomopy recon --reconstruction-type full --hdf-file %s" % params.hdf_file)
def read_scintillator(params): '''Read the scintillator type and thickness from DXchange. ''' if params.scintillator_auto and params.beam_hardening_method.lower( ) == 'standard': log.info(' *** *** Find scintillator params from DXchange') params.scintillator_thickness = float( config.param_from_dxchange( params.file_name, '/measurement/instrument/detection_system/scintillator/scintillating_thickness', attr=None, scalar=True, char_array=False)) log.info(' *** *** scintillator thickness = {:f}'.format( params.scintillator_thickness)) scint_material_string = config.param_from_dxchange( params.file_name, '/measurement/instrument/detection_system/scintillator/description', scalar=False, char_array=True) if scint_material_string.lower().startswith('luag'): params.scintillator_material = 'LuAG_Ce' elif scint_material_string.lower().startswith('lyso'): params.scintillator_material = 'LYSO_Ce' elif scint_material_string.lower().startswith('yag'): params.scintillator_material = 'YAG_Ce' else: log.warning(' *** *** scintillator {:s} not recognized!'.format( scint_material_string)) log.warning(' *** *** using scintillator {:s}'.format( params.scintillator_material)) #Run the initialization for beam hardening. Needed in case rotation_axis must #be computed later. if params.beam_hardening_method.lower() == 'standard': beamhardening.initialize(params) return params
def read_rot_centers(params): # Add a trailing slash if missing top = os.path.join(params.file_name, '') # Load the the rotation axis positions. jfname = top + params.rotation_axis_file try: with open(jfname) as json_file: json_string = json_file.read() dictionary = json.loads(json_string) return collections.OrderedDict(sorted(dictionary.items())) except Exception as error: log.warning( "the json %s file containing the rotation axis locations is missing" % jfname) log.warning("to create one run:") log.warning("$ tomopy find_center --file-name %s" % top)
def reconstruct(data, theta, rot_center, params): if (params.reconstruction_type == "try"): sinogram_order = True else: sinogram_order = False log.info(" *** algorithm: %s" % params.reconstruction_algorithm) if params.reconstruction_algorithm == 'astrasirt': extra_options = {} try: extra_options['MinConstraint'] = float( params.astrasirt_min_constraint) except ValueError: log.warning('Invalid astrasirt_min_constraint value. Ignoring.') try: extra_options['MaxConstraint'] = float( params.astrasirt_max_constraint) except ValueError: log.warning('Invalid astrasirt_max_constraint value. Ignoring.') options = { 'proj_type': params.astrasirt_proj_type, 'method': params.astrasirt_method, 'num_iter': params.astrasirt_num_iter, 'extra_options': extra_options, } if params.astrasirt_bootstrap: log.info(' *** *** bootstrapping with gridrec') rec = tomopy.recon(data, theta, center=rot_center, sinogram_order=sinogram_order, algorithm='gridrec', filter_name=params.gridrec_filter) rec = tomopy.misc.corr.gaussian_filter(rec, axis=1) rec = tomopy.misc.corr.gaussian_filter(rec, axis=2) shift = (int((data.shape[2] / 2 - rot_center) + .5)) data = np.roll(data, shift, axis=2) if params.astrasirt_bootstrap: log.info(' *** *** using gridrec to start astrasirt recon') rec = tomopy.recon(data, theta, init_recon=rec, algorithm=tomopy.astra, options=options) else: rec = tomopy.recon(data, theta, algorithm=tomopy.astra, options=options) elif params.reconstruction_algorithm == 'astrasart': extra_options = {} try: extra_options['MinConstraint'] = float( params.astrasart_min_constraint) except ValueError: log.warning('Invalid astrasart_min_constraint value. Ignoring.') try: extra_options['MaxConstraint'] = float( params.astrasart_max_constraint) except ValueError: log.warning('Invalid astrasart_max_constraint value. Ignoring.') options = { 'proj_type': params.astrasart_proj_type, 'method': params.astrasart_method, 'num_iter': params.astrasart_num_iter * data.shape[0], 'extra_options': extra_options, } if params.astrasart_bootstrap: log.info(' *** *** bootstrapping with gridrec') rec = tomopy.recon(data, theta, center=rot_center, sinogram_order=sinogram_order, algorithm='gridrec', filter_name=params.gridrec_filter) shift = (int((data.shape[2] / 2 - rot_center) + .5)) data = np.roll(data, shift, axis=2) if params.astrasart_bootstrap: log.info(' *** *** using gridrec to start astrasart recon') rec = tomopy.recon(data, theta, init_recon=rec, algorithm=tomopy.astra, options=options) else: rec = tomopy.recon(data, theta, algorithm=tomopy.astra, options=options) rec = tomopy.recon(data, theta, algorithm=tomopy.astra, options=options) elif params.reconstruction_algorithm == 'astracgls': extra_options = {} options = { 'proj_type': params.astracgls_proj_type, 'method': params.astracgls_method, 'num_iter': params.astracgls_num_iter, 'extra_options': extra_options, } if params.astracgls_bootstrap: log.info(' *** *** bootstrapping with gridrec') rec = tomopy.recon(data, theta, center=rot_center, sinogram_order=sinogram_order, algorithm='gridrec', filter_name=params.gridrec_filter) shift = (int((data.shape[2] / 2 - rot_center) + .5)) data = np.roll(data, shift, axis=2) if params.astracgls_bootstrap: log.info(' *** *** using gridrec to start astracgls recon') rec = tomopy.recon(data, theta, init_recon=rec, algorithm=tomopy.astra, options=options) else: rec = tomopy.recon(data, theta, algorithm=tomopy.astra, options=options) elif params.reconstruction_algorithm == 'gridrec': log.warning(" *** *** sinogram_order: %s" % sinogram_order) rec = tomopy.recon(data, theta, center=rot_center, sinogram_order=sinogram_order, algorithm='gridrec', filter_name=params.gridrec_filter) elif params.reconstruction_algorithm == 'lprec_fbp': log.warning(" *** *** sinogram_order: %s" % sinogram_order) rec = tomopy.recon(data, theta, center=rot_center, sinogram_order=sinogram_order, algorithm=tomopy.lprec, lpmethod='fbp', filter_name=params.lprec_fbp_filter) else: log.warning(" *** *** algorithm: %s is not supported yet" % params.reconstruction_algorithm) params.reconstruction_algorithm = 'gridrec' log.warning(" *** *** using: %s instead" % params.reconstruction_algorithm) log.warning(" *** *** sinogram_order: %s" % sinogram_order) rec = tomopy.recon(data, theta, center=rot_center, sinogram_order=sinogram_order, algorithm=params.reconstruction_algorithm, filter_name=params.filter) log.info(" *** reconstruction finished") return rec
def rec(params): data_shape = file_io.get_dx_dims(params) #Read parameters from DXchange file if requested params = file_io.auto_read_dxchange(params) if params.rotation_axis <= 0: params.rotation_axis = data_shape[2] / 2 log.warning( ' *** *** No rotation center given: assuming the middle of the projections at %f' % float(params.rotation_axis)) # Select sinogram range to reconstruct if (params.reconstruction_type == "full"): if params.start_row: sino_start = params.start_row else: sino_start = 0 if params.end_row < 0: sino_end = data_shape[1] else: sino_end = params.end_row #If params.nsino_per_chunk < 1, use # of processor cores if params.nsino_per_chunk < 1: params.nsino_per_chunk = cpu_count() nSino_per_chunk = params.nsino_per_chunk chunks = int(np.ceil((sino_end - sino_start) / nSino_per_chunk)) else: # "slice" and "try" nSino_per_chunk = pow(2, int(params.binning)) chunks = 1 ssino = int(data_shape[1] * params.nsino) sino_start = ssino sino_end = sino_start + pow(2, int(params.binning)) log.info("reconstructing [%d] slices from slice [%d] to [%d] in [%d] chunks of [%d] slices each" % \ ((sino_end - sino_start)/pow(2, int(params.binning)), sino_start/pow(2, int(params.binning)), sino_end/pow(2, int(params.binning)), \ chunks, nSino_per_chunk/pow(2, int(params.binning)))) strt = sino_start for iChunk in range(0, chunks): log.info('chunk # %i/%i' % (iChunk + 1, chunks)) sino_chunk_start = np.int(sino_start + nSino_per_chunk * iChunk) sino_chunk_end = np.int(sino_start + nSino_per_chunk * (iChunk + 1)) if sino_chunk_end > sino_end: log.warning( ' *** asking to go to row {0:d}, but our end row is {1:d}'. format(sino_chunk_end, sino_end)) sino_chunk_end = sino_end log.info(' *** [%i, %i]' % (sino_chunk_start / pow(2, int(params.binning)), sino_chunk_end / pow(2, int(params.binning)))) sino = (int(sino_chunk_start), int(sino_chunk_end)) # Read APS 32-BM raw data. proj, flat, dark, theta, rotation_axis = file_io.read_tomo( sino, params) # What if sino overruns the size of data? if sino[1] - sino[0] > proj.shape[1]: log.warning(" *** Chunk size > remaining data size.") sino = (sino[0], sino[0] + proj.shape[1]) # apply all preprocessing functions data = prep.all(proj, flat, dark, params, sino) # Reconstruct if (params.reconstruction_type == "try"): # try passes an array of rotation centers and this is only supported by gridrec reconstruction_algorithm_org = params.reconstruction_algorithm params.reconstruction_algorithm = 'gridrec' center_search_width = params.center_search_width / np.power( 2, float(params.binning)) center_range = (rotation_axis - center_search_width, rotation_axis + center_search_width, 0.5) stack = np.empty( (len(np.arange(*center_range)), data_shape[0], int(data_shape[2] / np.power(2, float(params.binning))))) index = 0 for axis in np.arange(*center_range): stack[index] = data[:, 0, :] index = index + 1 log.warning( ' reconstruct slice [%d] with rotation axis range [%.2f - %.2f] in [%.2f] pixel steps' % (ssino, center_range[0], center_range[1], center_range[2])) rotation_axis = np.arange(*center_range) rec = padded_rec(stack, theta, rotation_axis, params) # Save images to a temporary folder. fname = os.path.dirname( params.file_name ) + '_rec' + os.sep + 'try_center' + os.sep + file_io.path_base_name( params.file_name) + os.sep + 'recon_' index = 0 for axis in np.arange(*center_range): rfname = fname + str('{0:.2f}'.format( axis * np.power(2, float(params.binning))) + '.tiff') dxchange.write_tiff(rec[index], fname=rfname, overwrite=True) index = index + 1 # restore original method params.reconstruction_algorithm = reconstruction_algorithm_org else: # "slice" and "full" rec = padded_rec(data, theta, rotation_axis, params) # Save images if (params.reconstruction_type == "full"): tail = os.sep + os.path.splitext( os.path.basename(params.file_name))[0] + '_rec' + os.sep fname = os.path.dirname( params.file_name) + '_rec' + tail + 'recon' write_thread = threading.Thread( target=dxchange.write_tiff_stack, args=(rec, ), kwargs={ 'fname': fname, 'start': strt, 'overwrite': True }) write_thread.start() #dxchange.write_tiff_stack(rec, fname=fname, start=strt) strt += int( (sino[1] - sino[0]) / np.power(2, float(params.binning))) if (params.reconstruction_type == "slice"): fname = Path.joinpath( Path(os.path.dirname(params.file_name) + '_rec'), 'slice_rec', 'recon_' + Path(params.file_name).stem) # fname = Path.joinpath(Path(params.file_name).parent, 'slice_rec','recon_'+str(Path(params.file_name).stem)) dxchange.write_tiff_stack(rec, fname=str(fname), overwrite=False) log.info(" *** reconstructions: %s" % fname)
def rec(params): data_shape = file_io.get_dx_dims(params) if params.rotation_axis < 0: params.rotation_axis = data_shape[2] / 2 # Select sinogram range to reconstruct if (params.reconstruction_type == "full"): nSino_per_chunk = params.nsino_per_chunk chunks = int(np.ceil(data_shape[1] / nSino_per_chunk)) sino_start = 0 sino_end = chunks * nSino_per_chunk else: # "slice" and "try" nSino_per_chunk = pow(2, int(params.binning)) chunks = 1 ssino = int(data_shape[1] * params.nsino) sino_start = ssino sino_end = sino_start + pow(2, int(params.binning)) log.info("reconstructing [%d] slices from slice [%d] to [%d] in [%d] chunks of [%d] slices each" % \ ((sino_end - sino_start)/pow(2, int(params.binning)), sino_start/pow(2, int(params.binning)), sino_end/pow(2, int(params.binning)), \ chunks, nSino_per_chunk/pow(2, int(params.binning)))) strt = 0 for iChunk in range(0, chunks): log.info('chunk # %i/%i' % (iChunk, chunks)) sino_chunk_start = np.int(sino_start + nSino_per_chunk * iChunk) sino_chunk_end = np.int(sino_start + nSino_per_chunk * (iChunk + 1)) log.info(' *** [%i, %i]' % (sino_chunk_start / pow(2, int(params.binning)), sino_chunk_end / pow(2, int(params.binning)))) if sino_chunk_end > sino_end: break sino = (int(sino_chunk_start), int(sino_chunk_end)) # Read APS 32-BM raw data. proj, flat, dark, theta, rotation_axis = file_io.read_tomo( sino, params) # apply all preprocessing functions data = prep.all(proj, flat, dark, params) # Reconstruct if (params.reconstruction_type == "try"): # try passes an array of rotation centers and this is only supported by gridrec reconstruction_algorithm_org = params.reconstruction_algorithm params.reconstruction_algorithm = 'gridrec' center_search_width = params.center_search_width / np.power( 2, float(params.binning)) center_range = (rotation_axis - center_search_width, rotation_axis + center_search_width, 0.5) stack = np.empty( (len(np.arange(*center_range)), data_shape[0], int(data_shape[2] / np.power(2, float(params.binning))))) index = 0 for axis in np.arange(*center_range): stack[index] = data[:, 0, :] index = index + 1 log.warning( ' reconstruct slice [%d] with rotation axis range [%.2f - %.2f] in [%.2f] pixel steps' % (ssino, center_range[0], center_range[1], center_range[2])) rotation_axis = np.arange(*center_range) rec = padded_rec(stack, theta, rotation_axis, params) # Save images to a temporary folder. fname = os.path.dirname( params.hdf_file ) + '_rec' + os.sep + 'try_center' + os.sep + file_io.path_base_name( params.hdf_file) + os.sep + 'recon_' index = 0 for axis in np.arange(*center_range): rfname = fname + str('{0:.2f}'.format( axis * np.power(2, float(params.binning))) + '.tiff') dxchange.write_tiff(rec[index], fname=rfname, overwrite=True) index = index + 1 # restore original method params.reconstruction_algorithm = reconstruction_algorithm_org else: # "slice" and "full" rec = padded_rec(data, theta, rotation_axis, params) # handling of the last chunk if (params.reconstruction_type == "full"): if (iChunk == chunks - 1): log.info("handling of the last chunk") log.info(" *** chunk # %d" % (chunks)) log.info(" *** last rec size %d" % ((data_shape[1] - (chunks - 1) * nSino_per_chunk) / pow(2, int(params.binning)))) rec = rec[0:data_shape[1] - (chunks - 1) * nSino_per_chunk, :, :] # Save images if (params.reconstruction_type == "full"): tail = os.sep + os.path.splitext( os.path.basename(params.hdf_file))[0] + '_rec' + os.sep fname = os.path.dirname( params.hdf_file) + '_rec' + tail + 'recon' dxchange.write_tiff_stack(rec, fname=fname, start=strt) strt += int( (sino[1] - sino[0]) / np.power(2, float(params.binning))) if (params.reconstruction_type == "slice"): fname = os.path.dirname( params.hdf_file ) + os.sep + 'slice_rec/recon_' + os.path.splitext( os.path.basename(params.hdf_file))[0] dxchange.write_tiff_stack(rec, fname=fname, overwrite=False) log.info(" *** reconstructions: %s" % fname)