コード例 #1
0
def main(argv):          
	"""To do...


	"""
	lock = Lock()
	skip_flat = False
	skip_flat_after = True	

	# Get the from and to number of files to process:
	sino_idx = int(argv[0])
	   
	# Get paths:
	infile  = argv[1]
	outpath = argv[2]

	# Essential reconstruction parameters::
	angles   = float(argv[3])
	offset   = float(argv[4])
	param1   = argv[5]	
	scale    = int(float(argv[6]))
	
	overpad  = True if argv[7] == "True" else False
	logtrsf  = True if argv[8] == "True" else False
	circle   = True if argv[9] == "True" else False
	
	# Parameters for on-the-fly pre-processing:
	preprocessing_required = True if argv[10] == "True" else False		
	flat_end = True if argv[11] == "True" else False		
	half_half = True if argv[12] == "True" else False
		
	half_half_line = int(argv[13])
		
	ext_fov = True if argv[14] == "True" else False
		
	norm_sx = int(argv[19])
	norm_dx = int(argv[20])	
		
	ext_fov_rot_right = argv[15]
	if ext_fov_rot_right == "True":
		ext_fov_rot_right = True
		if (ext_fov):
			norm_sx = 0
	else:
		ext_fov_rot_right = False
		if (ext_fov):
			norm_dx = 0
		
	ext_fov_overlap = int(argv[16])

	ext_fov_normalize = True if argv[17] == "True" else False
	ext_fov_average = True if argv[18] == "True" else False
		
	skip_ringrem = True if argv[21] == "True" else False
	ringrem = argv[22]
	
	# Extra reconstruction parameters:
	zerone_mode = True if argv[23] == "True" else False		
	corr_offset = float(argv[24])
		
	reconmethod = argv[25]	
	
	decim_factor = int(argv[26])
	downsc_factor = int(argv[27])
	
	# Parameters for postprocessing:
	postprocess_required = True if argv[28] == "True" else False
	convert_opt = argv[29]
	crop_opt = argv[30]

	# Parameters for on-the-fly phase retrieval:
	phaseretrieval_required = True if argv[31] == "True" else False		
	phrtmethod = int(argv[32])
	phrt_param1 = double(argv[33])   # param1( e.g. regParam, or beta)
	phrt_param2 = double(argv[34])   # param2( e.g. thresh or delta)
	energy = double(argv[35])
	distance = double(argv[36])    
	pixsize = double(argv[37]) / 1000.0 # pixsixe from micron to mm:	
	phrtpad = True if argv[38] == "True" else False
	approx_win = int(argv[39])	

	preprocessingplan_fromcache = True if argv[40] == "True" else False
	tmppath    = argv[41]	
	if not tmppath.endswith(sep): tmppath += sep

	nr_threads = int(argv[42])	
	angles_from = float(argv[43])
	angles_to   = float(argv[44])

	slice_prefix = argv[45]
		
	logfilename = argv[46]	

	if not exists(outpath):
		makedirs(outpath)
	
	if not outpath.endswith(sep): outpath += sep	


	# Log info:
	log = open(logfilename,"w")
	log.write(linesep + "\tInput dataset: %s" % (infile))	
	log.write(linesep + "\tOutput path: %s" % (outpath))		
	log.write(linesep + "\t--------------")		
	log.write(linesep + "\tLoading flat and dark images...")	
	log.close()	
			
	# Open the HDF5 file:
	f_in = getHDF5(infile, 'r')
	if "/tomo" in f_in:
		dset = f_in['tomo']	
	else: 
		dset = f_in['exchange/data']
		if "/provenance/detector_output" in f_in:
			prov_dset = f_in['provenance/detector_output']				
	
	dset_min = -1
	dset_max = -1
	if (zerone_mode):
		if ('min' in dset.attrs):
			dset_min = float(dset.attrs['min'])								
		else:
			zerone_mode = False
			
		if ('max' in dset.attrs):
			dset_max = float(dset.attrs['max'])				
		else:
			zerone_mode = False	
		
	num_sinos = tdf.get_nr_sinos(dset) # Pay attention to the downscale factor
	
	if (num_sinos == 0):	
		exit()		

	# Check extrema:
	if (sino_idx >= num_sinos / downsc_factor):
		sino_idx = num_sinos / downsc_factor - 1
	
	# Get correction plan and phase retrieval plan (if required):
	corrplan = 0	
	if (preprocessing_required):		
		# Load flat fielding plan either from cache (if required) or from TDF file and cache it for faster re-use:
		if (preprocessingplan_fromcache):
			try:
				corrplan = cache2plan(infile, tmppath)
			except Exception as e:
				#print "Error(s) when reading from cache"
				corrplan = extract_flatdark(f_in, flat_end, logfilename)
				plan2cache(corrplan, infile, tmppath)
		else:			
			corrplan = extract_flatdark(f_in, flat_end, logfilename)		
			plan2cache(corrplan, infile, tmppath)	

		# Dowscale flat and dark images if necessary:
		if isinstance(corrplan['im_flat'], ndarray):
			corrplan['im_flat'] = corrplan['im_flat'][::downsc_factor,::downsc_factor]		
		if isinstance(corrplan['im_dark'], ndarray):
			corrplan['im_dark'] = corrplan['im_dark'][::downsc_factor,::downsc_factor]	
		if isinstance(corrplan['im_flat_after'], ndarray):
			corrplan['im_flat_after'] = corrplan['im_flat_after'][::downsc_factor,::downsc_factor]	
		if isinstance(corrplan['im_dark_after'], ndarray):
			corrplan['im_dark_after'] = corrplan['im_dark_after'][::downsc_factor,::downsc_factor]			

	f_in.close()	

	# Log infos:
	log = open(logfilename,"a")
	log.write(linesep + "\tPerforming preprocessing...")			
	log.close()			

	# Run computation:	
	process( sino_idx, num_sinos, infile, outpath, preprocessing_required, corrplan, norm_sx, 
			norm_dx, flat_end, half_half, half_half_line, ext_fov, ext_fov_rot_right, ext_fov_overlap, 
			ext_fov_normalize, ext_fov_average, ringrem, 
			phaseretrieval_required, phrtmethod, phrt_param1, phrt_param2, energy, distance, pixsize, phrtpad, 
			approx_win, angles, offset, logtrsf, param1, circle, scale, overpad, reconmethod, zerone_mode, 
			dset_min, dset_max, decim_factor, downsc_factor, corr_offset, postprocess_required, convert_opt, 
			crop_opt, nr_threads, angles_from, angles_to, logfilename, lock, slice_prefix )		
コード例 #2
0
ファイル: tools_guesscenter.py プロジェクト: decarlof/Pore3D
def main(argv):          
	"""Try to guess the center of rotation of the input CT dataset.

    Parameters
    ----------
    infile  : array_like
        HDF5 input dataset

    outfile : string
        Full path where the identified center of rotation will be written as output

	scale   : int
        If sub-pixel precision is interesting, use e.g. 2.0 to get a center of rotation 
		of .5 value. Use 1.0 if sub-pixel precision is not required

	angles  : int
        Total number of angles of the input dataset	

	method : string
		One of the following options: "registration"

	tmppath : string
        Temporary path where look for cached flat/dark files
       
    """ 	   
	# Get path:
	infile  = argv[0]          # The HDF5 file on the
	outfile = argv[1]          # The txt file with the proposed center
	scale   = float(argv[2])
	angles  = float(argv[3])
	method  = argv[4]
	tmppath = argv[5]	
	if not tmppath.endswith(sep): tmppath += sep	

	pyfftw_cache_disable()
	pyfftw_cache_enable()
	pyfftw_set_keepalive_time(1800)	

	# Create a silly temporary log:
	tmplog  = tmppath + basename(infile) + str(time.time())
			
	# Open the HDF5 file (take into account also older TDF versions):
	f_in = getHDF5( infile, 'r' )
	if "/tomo" in f_in:
		dset = f_in['tomo']
	else: 
		dset = f_in['exchange/data']
	num_proj = tdf.get_nr_projs(dset)	
	num_sinos = tdf.get_nr_sinos(dset)	

	# Get flats and darks from cache or from file:
	try:
		corrplan = cache2plan(infile, tmppath)
	except Exception as e:
		#print "Error(s) when reading from cache"
		corrplan = extract_flatdark(f_in, True, tmplog)
		remove(tmplog)
		plan2cache(corrplan, infile, tmppath)

	# Get first and the 180 deg projections: 	
	im1 = tdf.read_tomo(dset,0).astype(float32)	

	idx = int(round(num_proj/angles * pi)) - 1
	im2 = tdf.read_tomo(dset,idx).astype(float32)		

	# Apply simple flat fielding (if applicable):
	if (isinstance(corrplan['im_flat_after'], ndarray) and isinstance(corrplan['im_flat'], ndarray) and
		isinstance(corrplan['im_dark'], ndarray) and isinstance(corrplan['im_dark_after'], ndarray)) :		
		im1 = ((abs(im1 - corrplan['im_dark'])) / (abs(corrplan['im_flat'] - corrplan['im_dark']) 
			+ finfo(float32).eps)).astype(float32)	
		im2 = ((abs(im2 - corrplan['im_dark_after'])) / (abs(corrplan['im_flat_after'] - corrplan['im_dark_after']) 
			+ finfo(float32).eps)).astype(float32)	

	# Scale projections (if required) to get subpixel estimation:
	if ( abs(scale - 1.0) > finfo(float32).eps ):	
		im1 = imresize(im1, (int(round(scale*im1.shape[0])), int(round(scale*im1.shape[1]))), interp='bicubic', mode='F');	
		im2 = imresize(im2, (int(round(scale*im2.shape[0])), int(round(scale*im2.shape[1]))), interp='bicubic', mode='F');	

	# Find the center (flipping left-right im2):
	cen = findcenter.usecorrelation(im1, im2[ :,::-1])
	cen = cen / scale
	
	# Print center to output file:
	text_file = open(outfile, "w")
	text_file.write(str(int(cen)))
	text_file.close()
	
	# Close input HDF5:
	f_in.close()
コード例 #3
0
def main(argv):
    """To do...

	"""
    lock = Lock()

    skip_flat = True
    first_done = False
    pyfftw_cache_disable()
    pyfftw_cache_enable()
    pyfftw_set_keepalive_time(1800)

    # Get the from and to number of files to process:
    idx = int(argv[0])

    # Get full paths of input TDF and output TDF:
    infile = argv[1]
    outfile = argv[2]

    # Get the phase retrieval parameters:
    method = int(argv[3])
    param1 = double(argv[4])  # param1( e.g. regParam, or beta)
    param2 = double(argv[5])  # param2( e.g. thresh or delta)
    energy = double(argv[6])
    distance = double(argv[7])
    pixsize = double(argv[8]) / 1000.0  # pixsixe from micron to mm:
    pad = True if argv[9] == "True" else False

    # Tmp path and log file:
    tmppath = argv[10]
    if not tmppath.endswith(sep): tmppath += sep
    logfilename = argv[11]

    # Open the HDF5 file and check it contains flat files:
    skipflat = False
    f_in = getHDF5(infile, 'r')
    if "/tomo" in f_in:
        dset = f_in['tomo']
        if not "/flat" in f_in:
            skipflat = True
    else:
        dset = f_in['exchange/data']
        if not "/exchange/data_white" in f_in:
            skipflat = True
    num_proj = tdf.get_nr_projs(dset)
    num_sinos = tdf.get_nr_sinos(dset)

    # Check if the HDF5 makes sense:
    if (num_proj == 0):
        log = open(logfilename, "a")
        log.write(linesep + "\tNo projections found. Process will end.")
        log.close()
        exit()

    # Get flats and darks from cache or from file:
    if not skipflat:
        try:
            corrplan = cache2plan(infile, tmppath)
        except Exception as e:
            #print "Error(s) when reading from cache"
            corrplan = extract_flatdark(f_in, True, logfilename)
            remove(logfilename)
            plan2cache(corrplan, infile, tmppath)

    # Read projection:
    im = tdf.read_tomo(dset, idx).astype(float32)
    f_in.close()

    # Apply simple flat fielding (if applicable):
    if not skipflat:
        if (isinstance(corrplan['im_flat_after'], ndarray)
                and isinstance(corrplan['im_flat'], ndarray)
                and isinstance(corrplan['im_dark'], ndarray)
                and isinstance(corrplan['im_dark_after'], ndarray)):
            if (idx < num_proj / 2):
                im = (im - corrplan['im_dark']) / (
                    abs(corrplan['im_flat'] - corrplan['im_dark']) +
                    finfo(float32).eps)
            else:
                im = (im - corrplan['im_dark_after']) / (
                    abs(corrplan['im_flat_after'] - corrplan['im_dark_after'])
                    + finfo(float32).eps)

    # Prepare plan:
    im = im.astype(float32)
    if (method == 0):
        # Paganin 2002:
        plan = tiehom_plan(im, param1, param2, energy, distance, pixsize, pad)
        im = tiehom(im, plan).astype(float32)
    elif (method == 1):
        # Paganin 2020:
        plan = tiehom_plan2020(im, param1, param2, energy, distance, pixsize,
                               pad)
        im = tiehom2020(im, plan).astype(float32)
    else:
        plan = phrt_plan(im, energy, distance, pixsize, param2, param1, method,
                         pad)
        im = phrt(im, plan, method).astype(float32)

    # Write down reconstructed preview file (file name modified with metadata):
    im = im.astype(float32)
    outfile = outfile + '_' + str(im.shape[1]) + 'x' + str(
        im.shape[0]) + '_' + str(nanmin(im)) + '$' + str(nanmax(im))
    im.tofile(outfile)
コード例 #4
0
def main(argv):          
	"""Try to guess the amount of overlap in the case of extended FOV CT.

    Parameters
    ----------
    infile  : array_like
        HDF5 input dataset

    outfile : string
        Full path where the identified overlap will be written as output

	scale   : int
        If sub-pixel precision is interesting, use e.g. 2.0 to get an overlap 
		of .5 value. Use 1.0 if sub-pixel precision is not required

	tmppath : int
        Temporary path where look for cached flat/dark files
       
    """ 	
	   
	# Get path:
	infile  = argv[0]  # The HDF5 file on the SSD
	outfile  = argv[1]  # The txt file with the proposed center
	scale  = float(argv[2])
	tmppath = argv[3]	
	if not tmppath.endswith(sep): tmppath += sep	

	# Create a silly temporary log:
	tmplog  = tmppath + basename(infile) + str(time.time())	
			

	# Open the HDF5 file:
	f_in = getHDF5( infile, 'r' )
	if "/tomo" in f_in:
		dset = f_in['tomo']
	else: 
		dset = f_in['exchange/data']
	num_proj = tdf.get_nr_projs(dset)

	
	# Get first and 180 deg projections: 	
	im1 = tdf.read_tomo(dset,0).astype(float32)
	im2 = tdf.read_tomo(dset,num_proj/2).astype(float32)

	
	# Get flats and darks from cache or from file:
	try:
		corrplan = cache2plan(infile, tmppath)
	except Exception as e:
		#print "Error(s) when reading from cache"
		corrplan = extract_flatdark(f_in, True, tmplog)
		remove(tmplog)
		plan2cache(corrplan, infile, tmppath)

	# Apply simple flat fielding (if applicable):
	if (isinstance(corrplan['im_flat_after'], ndarray) and isinstance(corrplan['im_flat'], ndarray) and
		isinstance(corrplan['im_dark'], ndarray) and isinstance(corrplan['im_dark_after'], ndarray)) :		
		im1 = ((abs(im1 - corrplan['im_dark'])) / (abs(corrplan['im_flat'] - corrplan['im_dark'])  + finfo(float32).eps)).astype(float32)	
		im2 = ((abs(im2 - corrplan['im_dark_after'])) / (abs(corrplan['im_flat_after'] - corrplan['im_dark_after'])  + finfo(float32).eps)).astype(float32)		


	# Scale projections (if required) to get subpixel estimation:
	if ( abs(scale - 1.0) > finfo(float32).eps ):	
		im1 = imresize(im1, (int(round(scale*im1.shape[0])), int(round(scale*im1.shape[1]))), interp='bicubic', mode='F');	
		im2 = imresize(im2, (int(round(scale*im2.shape[0])), int(round(scale*im2.shape[1]))), interp='bicubic', mode='F');

			
	# Find the center (flipping left-right im2): DISTINGUISH BETWEEN AIR ON THE RIGHT AND ON THE LEFT??????
	cen = findcenter.usecorrelation(im1, im2[ :,::-1])
	cen = (cen / scale)*2.0	
	
	# Print center to output file:
	text_file = open(outfile, "w")
	text_file.write(str(int(abs(cen))))
	text_file.close()
	
	# Close input HDF5:
	f_in.close()
コード例 #5
0
ファイル: exec_reconstruct.py プロジェクト: decarlof/Pore3D
def main(argv):          
	"""To do...

	Usage
	-----
	

	Parameters
	---------
		   
	Example
	--------------------------
	The following line processes the first ten TIFF files of input path 
	"/home/in" and saves the processed files to "/home/out" with the 
	application of the Boin and Haibel filter with smoothing via a Butterworth
	filter of order 4 and cutoff frequency 0.01:

	reconstruct 0 4 C:\Temp\Dullin_Aug_2012\sino_noflat C:\Temp\Dullin_Aug_2012\sino_noflat\output 
	9.0 10.0 0.0 0.0 0.0 true sino slice C:\Temp\Dullin_Aug_2012\sino_noflat\tomo_conv flat dark

	"""
	lock = Lock()
	skip_flat = False
	skip_flat_after = True	

	# Get the from and to number of files to process:
	int_from = int(argv[0])
	int_to = int(argv[1])
	   
	# Get paths:
	infile = argv[2]
	outpath = argv[3]

	# Essential reconstruction parameters::
	angles = float(argv[4])
	offset = float(argv[5])
	param1 = argv[6]	
	scale  = int(float(argv[7]))
	
	overpad = True if argv[8] == "True" else False
	logtrsf = True if argv[9] == "True" else False
	circle = True if argv[10] == "True" else False

	outprefix = argv[11]	
	
	# Parameters for on-the-fly pre-processing:
	preprocessing_required = True if argv[12] == "True" else False		
	flat_end = True if argv[13] == "True" else False		
	half_half = True if argv[14] == "True" else False
		
	half_half_line = int(argv[15])
		
	ext_fov = True if argv[16] == "True" else False
		
	norm_sx = int(argv[19])
	norm_dx = int(argv[20])	
		
	ext_fov_rot_right = argv[17]
	if ext_fov_rot_right == "True":
		ext_fov_rot_right = True
		if (ext_fov):
			norm_sx = 0
	else:
		ext_fov_rot_right = False
		if (ext_fov):
			norm_dx = 0
		
	ext_fov_overlap = int(argv[18])
		
	skip_ringrem = True if argv[21] == "True" else False
	ringrem = argv[22]
	
	# Extra reconstruction parameters:
	zerone_mode = True if argv[23] == "True" else False		
	corr_offset = float(argv[24])
		
	reconmethod = argv[25]		
	
	decim_factor = int(argv[26])
	downsc_factor = int(argv[27])
	
	# Parameters for postprocessing:
	postprocess_required = True if argv[28] == "True" else False
	convert_opt = argv[29]
	crop_opt = argv[30]
	
	nr_threads = int(argv[31])	
	logfilename = argv[32]	
	process_id = int(logfilename[-6:-4])
	
	# Check prefixes and path:
	#if not infile.endswith(sep): infile += sep
	if not exists(outpath):
		makedirs(outpath)
	
	if not outpath.endswith(sep): outpath += sep
		
	# Open the HDF5 file:
	f_in = getHDF5(infile, 'r')
	if "/tomo" in f_in:
		dset = f_in['tomo']
		
		tomoprefix = 'tomo'
		flatprefix = 'flat'
		darkprefix = 'dark'
	else: 
		dset = f_in['exchange/data']
		if "/provenance/detector_output" in f_in:
			prov_dset = f_in['provenance/detector_output']		
			
			tomoprefix = prov_dset.attrs['tomo_prefix']
			flatprefix = prov_dset.attrs['flat_prefix']
			darkprefix = prov_dset.attrs['dark_prefix']
	
	dset_min = -1
	dset_max = -1
	if (zerone_mode):
		if ('min' in dset.attrs):
			dset_min = float(dset.attrs['min'])								
		else:
			zerone_mode = False
			
		if ('max' in dset.attrs):
			dset_max = float(dset.attrs['max'])				
		else:
			zerone_mode = False	
		
	num_sinos = tdf.get_nr_sinos(dset) # Pay attention to the downscale factor
	
	if (num_sinos == 0):
		log = open(logfilename,"a")
		log.write(linesep + "\tNo projections found. Process will end.")	
		log.close()			
		exit()		

	# Check extrema (int_to == -1 means all files):
	if ((int_to >= num_sinos) or (int_to == -1)):
		int_to = num_sinos - 1
		
	# Log info:
	log = open(logfilename,"w")
	log.write(linesep + "\tInput file: %s" % (infile))	
	log.write(linesep + "\tOutput path: %s" % (outpath))		
	log.write(linesep + "\t--------------")		
	log.write(linesep + "\tPreparing the working plan...")	
	log.close()	
	
	# Get correction plan and phase retrieval plan (if required):
	corrplan = -1
	phrtplan = -1
	
	if (preprocessing_required):		
		corrplan = extract_flatdark(f_in, flat_end, logfilename)
	
	f_in.close()
		
	# Log infos:
	log = open(logfilename,"a")
	log.write(linesep + "\tWorking plan prepared correctly.")	
	log.write(linesep + "\t-------")
	log.write(linesep + "\tPerforming reconstruction...")			
	log.close()	

	# Run several threads for independent computation without waiting for threads completion:
	for num in range(nr_threads):
		start = ( (int_to - int_from + 1) / nr_threads)*num + int_from
		if (num == nr_threads - 1):
			end = int_to
		else:
			end = ( (int_to - int_from + 1) / nr_threads)*(num + 1) + int_from - 1
		if (reconmethod == 'GRIDREC'):
			Process(target=process_gridrec, args=(lock, start, end, num_sinos, infile, outpath, preprocessing_required, corrplan, norm_sx, 						norm_dx, flat_end, half_half, half_half_line, ext_fov, ext_fov_rot_right, ext_fov_overlap, ringrem, 
						angles, offset, logtrsf, param1, circle, scale, overpad, 
						zerone_mode, dset_min, dset_max, decim_factor, downsc_factor, corr_offset, 
						postprocess_required, convert_opt, crop_opt, outprefix, logfilename )).start()
		else:
			Process(target=process, args=(lock, start, end, num_sinos, infile, outpath, preprocessing_required, corrplan, norm_sx, 
						norm_dx, flat_end, half_half, half_half_line, ext_fov, ext_fov_rot_right, ext_fov_overlap, ringrem, 
						angles, offset, logtrsf, param1, circle, scale, overpad, 
						reconmethod, zerone_mode, dset_min, dset_max, decim_factor, downsc_factor, corr_offset, 
						postprocess_required, convert_opt, crop_opt, outprefix, logfilename )).start()
コード例 #6
0
def main(argv):          
	"""To do...

	Usage
	-----
	

	Parameters
	---------
		   
	Example
	--------------------------
	The following line processes the first ten TIFF files of input path 
	"/home/in" and saves the processed files to "/home/out" with the 
	application of the Boin and Haibel filter with smoothing via a Butterworth
	filter of order 4 and cutoff frequency 0.01:

	destripe /home/in /home/out 1 10 1 0.01 4    

	"""
	lock = Lock()

	skip_ringrem = False
	skip_flat = False
	skip_flat_after = True
	first_done = False	

	# Get the number of sino to pre-process:
	idx = int(argv[0])
	   
	# Get paths:
	infile = argv[1]
	outfile = argv[2]
	
	# Normalization parameters:
	norm_sx = int(argv[3])
	norm_dx = int(argv[4])
	
	# Params for flat fielding with post flats/darks:
	flat_end = True if argv[5] == "True" else False
	half_half = True if argv[6] == "True" else False
	half_half_line = int(argv[7])
		
	# Params for extended FOV:
	ext_fov = True if argv[8] == "True" else False
	ext_fov_rot_right = argv[9]
	if ext_fov_rot_right == "True":
		ext_fov_rot_right = True
		if (ext_fov):
			norm_sx = 0
	else:
		ext_fov_rot_right = False
		if (ext_fov):
			norm_dx = 0		
	ext_fov_overlap = int(argv[10])
		
	# Method and parameters coded into a string:
	ringrem = argv[11]	
	
	# Tmp path and log file:
	tmppath = argv[12]	
	if not tmppath.endswith(sep): tmppath += sep		
	logfilename = argv[13]		

	
	# Open the HDF5 file:	
	f_in = getHDF5(infile, 'r')
	if "/tomo" in f_in:
		dset = f_in['tomo']		
	else: 
		dset = f_in['exchange/data']
		prov_dset = f_in['provenance/detector_output']			
		
	num_proj = tdf.get_nr_projs(dset)
	num_sinos = tdf.get_nr_sinos(dset)
	
	# Check if the HDF5 makes sense:
	if (num_sinos == 0):
		log = open(logfilename,"a")
		log.write(linesep + "\tNo projections found. Process will end.")	
		log.close()			
		exit()		

	# Get flat and darks from cache or from file:
	try:
		corrplan = cache2plan(infile, tmppath)
	except Exception as e:
		#print "Error(s) when reading from cache"
		corrplan = extract_flatdark(f_in, flat_end, logfilename)
		plan2cache(corrplan, infile, tmppath)		

	# Read input image:
	im = tdf.read_sino(dset,idx).astype(float32)		
	f_in.close()	

	# Perform pre-processing (flat fielding, extended FOV, ring removal):	
	im = flat_fielding(im, idx, corrplan, flat_end, half_half, half_half_line, norm_sx, norm_dx)			
	im = extfov_correction(im, ext_fov, ext_fov_rot_right, ext_fov_overlap)
	im = ring_correction (im, ringrem, flat_end, corrplan['skip_flat_after'], half_half, half_half_line, ext_fov)							
	
	# Write down reconstructed preview file (file name modified with metadata):		
	im = im.astype(float32)
	outfile = outfile + '_' + str(im.shape[1]) + 'x' + str(im.shape[0]) + '_' + str( nanmin(im)) + '$' + str( nanmax(im) )	
	im.tofile(outfile)
コード例 #7
0
def main(argv):          
	"""To do...

	Usage
	-----
	

	Parameters
	---------
		   
	Example
	--------------------------
	The following line processes the first ten TIFF files of input path 
	"/home/in" and saves the processed files to "/home/out" with the 
	application of the Boin and Haibel filter with smoothing via a Butterworth
	filter of order 4 and cutoff frequency 0.01:

	reconstruct 0 4 C:\Temp\Dullin_Aug_2012\sino_noflat C:\Temp\Dullin_Aug_2012\sino_noflat\output 
	9.0 10.0 0.0 0.0 0.0 true sino slice C:\Temp\Dullin_Aug_2012\sino_noflat\tomo_conv flat dark

	"""
	skip_flat = False
	skip_flat_after = True	

	# Get the from and to number of files to process:
	sino_idx = int(argv[0])
	   
	# Get paths:
	infile = argv[1]
	outfile = argv[2]

	# Essential reconstruction parameters::
	angles = float(argv[3])
	offset = float(argv[4])
	param1 = argv[5]	
	scale  = int(float(argv[6]))
	
	overpad = True if argv[7] == "True" else False
	logtrsf = True if argv[8] == "True" else False
	circle = True if argv[9] == "True" else False
	
	# Parameters for on-the-fly pre-processing:
	preprocessing_required = True if argv[10] == "True" else False		
	flat_end = True if argv[11] == "True" else False		
	half_half = True if argv[12] == "True" else False
		
	half_half_line = int(argv[13])
		
	ext_fov = True if argv[14] == "True" else False
		
	norm_sx = int(argv[17])
	norm_dx = int(argv[18])	
		
	ext_fov_rot_right = argv[15]
	if ext_fov_rot_right == "True":
		ext_fov_rot_right = True
		if (ext_fov):
			norm_sx = 0
	else:
		ext_fov_rot_right = False
		if (ext_fov):
			norm_dx = 0
		
	ext_fov_overlap = int(argv[16])
		
	skip_ringrem = True if argv[19] == "True" else False
	ringrem = argv[20]
	
	# Extra reconstruction parameters:
	zerone_mode = True if argv[21] == "True" else False		
	corr_offset = float(argv[22])
		
	reconmethod = argv[23]	
	
	decim_factor = int(argv[24])
	downsc_factor = int(argv[25])
	
	# Parameters for postprocessing:
	postprocess_required = True if argv[26] == "True" else False
	convert_opt = argv[27]
	crop_opt = argv[28]

	# Parameters for on-the-fly phase retrieval:
	phaseretrieval_required = True if argv[29] == "True" else False		
	beta = double(argv[30])   # param1( e.g. regParam, or beta)
	delta = double(argv[31])   # param2( e.g. thresh or delta)
	energy = double(argv[32])
	distance = double(argv[33])    
	pixsize = double(argv[34]) / 1000.0 # pixsixe from micron to mm:	
	phrtpad = True if argv[35] == "True" else False
	approx_win = int(argv[36])	

	preprocessingplan_fromcache = True if argv[37] == "True" else False
	
	nr_threads = int(argv[38])	
	tmppath = argv[39]	
	if not tmppath.endswith(sep): tmppath += sep
		
	logfilename = argv[40]		
			
	# Open the HDF5 file:
	f_in = getHDF5(infile, 'r')
	if "/tomo" in f_in:
		dset = f_in['tomo']	
	else: 
		dset = f_in['exchange/data']
		if "/provenance/detector_output" in f_in:
			prov_dset = f_in['provenance/detector_output']				
	
	dset_min = -1
	dset_max = -1
	if (zerone_mode):
		if ('min' in dset.attrs):
			dset_min = float(dset.attrs['min'])								
		else:
			zerone_mode = False
			
		if ('max' in dset.attrs):
			dset_max = float(dset.attrs['max'])				
		else:
			zerone_mode = False	
		
	num_sinos = tdf.get_nr_sinos(dset) # Pay attention to the downscale factor
	
	if (num_sinos == 0):	
		exit()		

	# Check extrema:
	if (sino_idx >= num_sinos):
		sino_idx = num_sinos - 1
	
	# Get correction plan and phase retrieval plan (if required):
	corrplan = 0	
	if (preprocessing_required):		
		# Load flat fielding plan either from cache (if required) or from TDF file and cache it for faster re-use:
		if (preprocessingplan_fromcache):
			try:
				corrplan = cache2plan(infile, tmppath)
			except Exception as e:
				#print "Error(s) when reading from cache"
				corrplan = extract_flatdark(f_in, flat_end, logfilename)
				plan2cache(corrplan, infile, tmppath)
		else:			
			corrplan = extract_flatdark(f_in, flat_end, logfilename)		
			plan2cache(corrplan, infile, tmppath)	

		# Dowscale flat and dark images if necessary:
		if isinstance(corrplan['im_flat'], ndarray):
			corrplan['im_flat'] = corrplan['im_flat'][::downsc_factor,::downsc_factor]		
		if isinstance(corrplan['im_dark'], ndarray):
			corrplan['im_dark'] = corrplan['im_dark'][::downsc_factor,::downsc_factor]	
		if isinstance(corrplan['im_flat_after'], ndarray):
			corrplan['im_flat_after'] = corrplan['im_flat_after'][::downsc_factor,::downsc_factor]	
		if isinstance(corrplan['im_dark_after'], ndarray):
			corrplan['im_dark_after'] = corrplan['im_dark_after'][::downsc_factor,::downsc_factor]			

	f_in.close()			

	# Run computation:	
	process( sino_idx, num_sinos, infile, outfile, preprocessing_required, corrplan, norm_sx, 
				norm_dx, flat_end, half_half, half_half_line, ext_fov, ext_fov_rot_right, ext_fov_overlap, ringrem, 
				phaseretrieval_required, beta, delta, energy, distance, pixsize, phrtpad, approx_win, angles, offset, 
				logtrsf, param1, circle, scale, overpad, reconmethod, zerone_mode, dset_min, dset_max, decim_factor, 
				downsc_factor, corr_offset, postprocess_required, convert_opt, crop_opt, nr_threads, logfilename )		
コード例 #8
0
def main(argv):
    """To do...

	Usage
	-----
	

	Parameters
	---------
		   
	Example
	--------------------------
	The following line processes the first ten TIFF files of input path 
	"/home/in" and saves the processed files to "/home/out" with the 
	application of the Boin and Haibel filter with smoothing via a Butterworth
	filter of order 4 and cutoff frequency 0.01:

	destripe /home/in /home/out 1 10 1 0.01 4    

	"""
    lock = Lock()
    rescale_factor = 10000.0  # For 16-bit floating point

    # Get the from and to number of files to process:
    int_from = int(argv[0])
    int_to = int(argv[1])

    # Get paths:
    infile = argv[2]
    outfile = argv[3]

    # Params for flat fielding with post flats/darks:
    flat_end = True if argv[4] == "True" else False
    half_half = True if argv[5] == "True" else False
    half_half_line = int(argv[6])

    # Flat fielding method (conventional or dynamic):
    dynamic_ff = True if argv[7] == "True" else False

    # Parameters for rotation:
    rotation = float(argv[8])
    interp = argv[9]
    border = argv[10]

    # Nr of threads and log file:
    nr_threads = int(argv[11])
    logfilename = argv[12]

    # Log input parameters:
    log = open(logfilename, "w")
    log.write(linesep + "\tInput TDF file: %s" % (infile))
    log.write(linesep + "\tOutput TDF file: %s" % (outfile))
    log.write(linesep + "\t--------------")
    log.write(linesep + "\tOpening input dataset...")
    log.close()

    # Remove a previous copy of output:
    if exists(outfile):
        remove(outfile)

    # Open the HDF5 file:
    f_in = getHDF5(infile, 'r')

    if "/tomo" in f_in:
        dset = f_in['tomo']

        tomoprefix = 'tomo'
        flatprefix = 'flat'
        darkprefix = 'dark'
    else:
        dset = f_in['exchange/data']
        if "/provenance/detector_output" in f_in:
            prov_dset = f_in['provenance/detector_output']

            tomoprefix = prov_dset.attrs['tomo_prefix']
            flatprefix = prov_dset.attrs['flat_prefix']
            darkprefix = prov_dset.attrs['dark_prefix']

    num_proj = tdf.get_nr_projs(dset)
    num_sinos = tdf.get_nr_sinos(dset)

    if (num_sinos == 0):
        log = open(logfilename, "a")
        log.write(linesep + "\tNo projections found. Process will end.")
        log.close()
        exit()

    # Check extrema (int_to == -1 means all files):
    if ((int_to >= num_proj) or (int_to == -1)):
        int_to = num_proj - 1

    # Prepare the work plan for flat and dark images:
    log = open(logfilename, "a")
    log.write(linesep + "\t--------------")
    log.write(linesep + "\tPreparing the work plan...")
    log.close()

    # Extract flat and darks:
    skipflat = False
    skipdark = False

    # Following variables make sense only for dynamic flat fielding:
    EFF = -1
    filtEFF = -1
    im_dark = -1

    # Following variable makes sense only for conventional flat fielding:
    plan = -1

    if not dynamic_ff:
        plan = extract_flatdark(f_in, flat_end, logfilename)
        if (isscalar(plan['im_flat']) and isscalar(plan['im_flat_after'])):
            skipflat = True
        else:
            skipflat = False
    else:
        # Dynamic flat fielding:
        if "/tomo" in f_in:
            if "/flat" in f_in:
                flat_dset = f_in['flat']
                if "/dark" in f_in:
                    im_dark = _medianize(f_in['dark'])
                else:
                    skipdark = True
            else:
                skipflat = True  # Nothing to do in this case
        else:
            if "/exchange/data_white" in f_in:
                flat_dset = f_in['/exchange/data_white']
                if "/exchange/data_dark" in f_in:
                    im_dark = _medianize(f_in['/exchange/data_dark'])
                else:
                    skipdark = True
            else:
                skipflat = True  # Nothing to do in this case

        # Prepare plan for dynamic flat fielding with 16 repetitions:
        if not skipflat:
            EFF, filtEFF = dff_prepare_plan(flat_dset, 16, im_dark)

    # Get the corrected outshape (in this case it's easy):
    im = tdf.read_tomo(dset, 0).astype(float32)
    outshape = tdf.get_dset_shape(im.shape[1], im.shape[0], num_proj)

    # Create the output HDF5 file:
    f_out = getHDF5(outfile, 'w')
    #f_out_dset = f_out.create_dataset('exchange/data', outshape, im.dtype)
    f_out_dset = f_out.create_dataset('exchange/data', outshape, float16)
    f_out_dset.attrs['min'] = str(amin(im[:]))
    f_out_dset.attrs['max'] = str(amax(im[:]))
    f_out_dset.attrs['version'] = '1.0'
    f_out_dset.attrs['axes'] = "y:theta:x"
    f_out_dset.attrs['rescale_factor'] = str(rescale_factor)

    f_out.close()
    f_in.close()

    # Log infos:
    log = open(logfilename, "a")
    log.write(linesep + "\tWork plan prepared correctly.")
    log.write(linesep + "\t--------------")
    log.write(linesep + "\tPerforming pre processing...")
    log.close()

    # Run several threads for independent computation without waiting for threads
    # completion:
    for num in range(nr_threads):
        start = (num_proj / nr_threads) * num
        if (num == nr_threads - 1):
            end = num_proj - 1
        else:
            end = (num_proj / nr_threads) * (num + 1) - 1
        Process(target=_process,
                args=(lock, start, end, infile, outfile, outshape, float16,
                      skipflat, plan, flat_end, half_half, half_half_line,
                      dynamic_ff, EFF, filtEFF, im_dark, rotation, interp,
                      border, rescale_factor, logfilename)).start()
コード例 #9
0
ファイル: exec_gdei.py プロジェクト: ElettraSciComp/STP-Core
def main(argv):
    """To do...

	Usage
	-----
	

	Parameters
	---------
		   
	Example
	--------------------------    

	"""
    lock = Lock()

    # Get the from and to number of files to process:
    int_from = int(argv[0])
    int_to = int(argv[1])

    # Get paths:
    infile_1 = argv[2]
    infile_2 = argv[3]
    infile_3 = argv[4]

    outfile_abs = argv[5]
    outfile_ref = argv[6]
    outfile_sca = argv[7]

    # Normalization parameters:
    norm_sx = int(argv[8])
    norm_dx = int(argv[9])

    # Params for flat fielding with post flats/darks:
    flat_end = True if argv[10] == "True" else False
    half_half = True if argv[11] == "True" else False
    half_half_line = int(argv[12])

    # Params for extended FOV:
    ext_fov = True if argv[13] == "True" else False
    ext_fov_rot_right = argv[14]
    if ext_fov_rot_right == "True":
        ext_fov_rot_right = True
        if (ext_fov):
            norm_sx = 0
    else:
        ext_fov_rot_right = False
        if (ext_fov):
            norm_dx = 0
    ext_fov_overlap = int(argv[15])

    ext_fov_normalize = True if argv[16] == "True" else False
    ext_fov_average = True if argv[17] == "True" else False

    # Method and parameters coded into a string:
    ringrem = argv[18]

    # Flat fielding method (conventional or dynamic):
    dynamic_ff = True if argv[19] == "True" else False

    # Shift parameters:
    shiftVert_1 = int(argv[20])
    shiftHoriz_1 = int(argv[21])
    shiftVert_2 = int(argv[22])
    shiftHoriz_2 = int(argv[23])
    shiftVert_3 = int(argv[24])
    shiftHoriz_3 = int(argv[25])

    # DEI coefficients:
    r1 = float(argv[26])
    r2 = float(argv[27])
    r3 = float(argv[28])
    d1 = float(argv[29])
    d2 = float(argv[30])
    d3 = float(argv[31])
    dd1 = float(argv[32])
    dd2 = float(argv[33])
    dd3 = float(argv[34])

    # Nr of threads and log file:
    nr_threads = int(argv[35])
    logfilename = argv[36]

    # Log input parameters:
    log = open(logfilename, "w")
    log.write(linesep + "\tInput TDF file #1: %s" % (infile_1))
    log.write(linesep + "\tInput TDF file #2: %s" % (infile_2))
    log.write(linesep + "\tInput TDF file #3: %s" % (infile_3))
    log.write(linesep + "\tOutput TDF file for Absorption: %s" % (outfile_abs))
    log.write(linesep + "\tOutput TDF file for Refraction: %s" % (outfile_ref))
    log.write(linesep + "\tOutput TDF file for Scattering: %s" % (outfile_sca))
    log.write(linesep + "\t--------------")
    log.write(linesep + "\tOpening input dataset...")
    log.close()

    # Remove a previous copy of output:
    #if exists(outfile):
    #	remove(outfile)

    # Open the HDF5 files:
    f_in_1 = getHDF5(infile_1, 'r')
    f_in_2 = getHDF5(infile_2, 'r')
    f_in_3 = getHDF5(infile_3, 'r')

    if "/tomo" in f_in_1:
        dset_1 = f_in_1['tomo']

        tomoprefix_1 = 'tomo'
        flatprefix_1 = 'flat'
        darkprefix_1 = 'dark'
    else:
        dset_1 = f_in_1['exchange/data']
        if "/provenance/detector_output" in f_in_1:
            prov_dset_1 = f_in_1['provenance/detector_output']

            tomoprefix_1 = prov_dset_1.attrs['tomo_prefix']
            flatprefix_1 = prov_dset_1.attrs['flat_prefix']
            darkprefix_1 = prov_dset_1.attrs['dark_prefix']

    if "/tomo" in f_in_2:
        dset_2 = f_in_2['tomo']

        tomoprefix_2 = 'tomo'
        flatprefix_2 = 'flat'
        darkprefix_2 = 'dark'
    else:
        dset_2 = f_in_2['exchange/data']
        if "/provenance/detector_output" in f_in_2:
            prov_dset_2 = f_in_2['provenance/detector_output']

            tomoprefix_2 = prov_dset_2.attrs['tomo_prefix']
            flatprefix_2 = prov_dset_2.attrs['flat_prefix']
            darkprefix_2 = prov_dset_2.attrs['dark_prefix']

    if "/tomo" in f_in_3:
        dset_3 = f_in_3['tomo']

        tomoprefix_3 = 'tomo'
        flatprefix_3 = 'flat'
        darkprefix_3 = 'dark'
    else:
        dset_3 = f_in_3['exchange/data']
        if "/provenance/detector_output" in f_in_3:
            prov_dset_3 = f_in_1['provenance/detector_output']

            tomoprefix_3 = prov_dset_3.attrs['tomo_prefix']
            flatprefix_3 = prov_dset_3.attrs['flat_prefix']
            darkprefix_3 = prov_dset_3.attrs['dark_prefix']

    # Assuming that what works for the dataset #1 works for the other two:
    num_proj = tdf.get_nr_projs(dset_1)
    num_sinos = tdf.get_nr_sinos(dset_1)

    if (num_sinos == 0):
        log = open(logfilename, "a")
        log.write(linesep + "\tNo projections found. Process will end.")
        log.close()
        exit()

    # Check extrema (int_to == -1 means all files):
    if ((int_to >= num_sinos) or (int_to == -1)):
        int_to = num_sinos - 1

    # Prepare the work plan for flat and dark images:
    log = open(logfilename, "a")
    log.write(linesep + "\t--------------")
    log.write(linesep + "\tPreparing the work plan...")
    log.close()

    # Extract flat and darks:
    skipflat_1 = False
    skipdark_1 = False
    skipflat_2 = False
    skipdark_2 = False
    skipflat_3 = False
    skipdark_3 = False

    # Following variables make sense only for dynamic flat fielding:
    EFF_1 = -1
    filtEFF_1 = -1
    im_dark_1 = -1

    EFF_2 = -1
    filtEFF_2 = -1
    im_dark_2 = -1

    EFF_3 = -1
    filtEFF_3 = -1
    im_dark_3 = -1

    # Following variable makes sense only for conventional flat fielding:
    plan_1 = -1
    plan_2 = -1
    plan_3 = -1

    if not dynamic_ff:
        plan_1 = extract_flatdark(f_in_1, flat_end, logfilename)
        if (isscalar(plan_1['im_flat']) and isscalar(plan_1['im_flat_after'])):
            skipflat_1 = True
        else:
            skipflat_1 = False

        plan_2 = extract_flatdark(f_in_2, flat_end, logfilename)
        if (isscalar(plan_2['im_flat']) and isscalar(plan_2['im_flat_after'])):
            skipflat_2 = True
        else:
            skipflat_2 = False

        plan_3 = extract_flatdark(f_in_3, flat_end, logfilename)
        if (isscalar(plan_3['im_flat']) and isscalar(plan_3['im_flat_after'])):
            skipflat_3 = True
        else:
            skipflat_3 = False

    else:
        # Dynamic flat fielding:
        if "/tomo" in f_in_1:
            if "/flat" in f_in_1:
                flat_dset_1 = f_in_1['flat']
                if "/dark" in f_in_1:
                    im_dark_1 = _medianize(f_in_1['dark'])
                else:
                    skipdark_1 = True
            else:
                skipflat_1 = True  # Nothing to do in this case
        else:
            if "/exchange/data_white" in f_in_1:
                flat_dset_1 = f_in_1['/exchange/data_white']
                if "/exchange/data_dark" in f_in_1:
                    im_dark_1 = _medianize(f_in_1['/exchange/data_dark'])
                else:
                    skipdark_1 = True
            else:
                skipflat_1 = True  # Nothing to do in this case

        # Prepare plan for dynamic flat fielding with 16 repetitions:
        if not skipflat_1:
            EFF_1, filtEFF_1 = dff_prepare_plan(flat_dset_1, 16, im_dark_1)

        # Dynamic flat fielding:
        if "/tomo" in f_in_2:
            if "/flat" in f_in_2:
                flat_dset_2 = f_in_2['flat']
                if "/dark" in f_in_2:
                    im_dark_2 = _medianize(f_in_2['dark'])
                else:
                    skipdark_2 = True
            else:
                skipflat_2 = True  # Nothing to do in this case
        else:
            if "/exchange/data_white" in f_in_2:
                flat_dset_2 = f_in_2['/exchange/data_white']
                if "/exchange/data_dark" in f_in_2:
                    im_dark_2 = _medianize(f_in_2['/exchange/data_dark'])
                else:
                    skipdark_2 = True
            else:
                skipflat_2 = True  # Nothing to do in this case

        # Prepare plan for dynamic flat fielding with 16 repetitions:
        if not skipflat_2:
            EFF_2, filtEFF_2 = dff_prepare_plan(flat_dset_2, 16, im_dark_2)

        # Dynamic flat fielding:
        if "/tomo" in f_in_3:
            if "/flat" in f_in_3:
                flat_dset_3 = f_in_3['flat']
                if "/dark" in f_in_3:
                    im_dark_3 = _medianize(f_in_3['dark'])
                else:
                    skipdark_3 = True
            else:
                skipflat_3 = True  # Nothing to do in this case
        else:
            if "/exchange/data_white" in f_in_3:
                flat_dset_3 = f_in_3['/exchange/data_white']
                if "/exchange/data_dark" in f_in_3:
                    im_dark_3 = _medianize(f_in_3['/exchange/data_dark'])
                else:
                    skipdark_3 = True
            else:
                skipflat_3 = True  # Nothing to do in this case

        # Prepare plan for dynamic flat fielding with 16 repetitions:
        if not skipflat_3:
            EFF_3, filtEFF_3 = dff_prepare_plan(flat_dset_3, 16, im_dark_3)

    # Outfile shape can be determined only after first processing in ext FOV mode:
    if (ext_fov):

        # Read input sino:
        idx = num_sinos / 2
        im = tdf.read_sino(dset_1, idx).astype(float32)
        im = extfov_correction(im, ext_fov_rot_right, ext_fov_overlap,
                               ext_fov_normalize, ext_fov_average)

        # Get the corrected outshape:
        outshape = tdf.get_dset_shape(im.shape[1], num_sinos, im.shape[0])

    else:
        # Get the corrected outshape (in this case it's easy):
        im = tdf.read_tomo(dset_1, 0).astype(float32)
        outshape = tdf.get_dset_shape(im.shape[1], im.shape[0], num_proj)

    f_in_1.close()
    f_in_2.close()
    f_in_3.close()

    # Create the output HDF5 files:
    f_out_abs = getHDF5(outfile_abs, 'w')
    f_out_dset_abs = f_out_abs.create_dataset('exchange/data', outshape,
                                              float32)
    f_out_dset_abs.attrs['min'] = str(finfo(float32).max)
    f_out_dset_abs.attrs['max'] = str(finfo(float32).min)
    f_out_dset_abs.attrs['version'] = '1.0'
    f_out_dset_abs.attrs['axes'] = "y:theta:x"
    f_out_abs.close()

    f_out_ref = getHDF5(outfile_ref, 'w')
    f_out_dset_ref = f_out_ref.create_dataset('exchange/data', outshape,
                                              float32)
    f_out_dset_ref.attrs['min'] = str(finfo(float32).max)
    f_out_dset_ref.attrs['max'] = str(finfo(float32).min)
    f_out_dset_ref.attrs['version'] = '1.0'
    f_out_dset_ref.attrs['axes'] = "y:theta:x"
    f_out_ref.close()

    f_out_sca = getHDF5(outfile_sca, 'w')
    f_out_dset_sca = f_out_sca.create_dataset('exchange/data', outshape,
                                              float32)
    f_out_dset_sca.attrs['min'] = str(finfo(float32).max)
    f_out_dset_sca.attrs['max'] = str(finfo(float32).min)
    f_out_dset_sca.attrs['version'] = '1.0'
    f_out_dset_sca.attrs['axes'] = "y:theta:x"
    f_out_sca.close()

    # Log infos:
    log = open(logfilename, "a")
    log.write(linesep + "\tWork plan prepared correctly.")
    log.write(linesep + "\t--------------")
    log.write(linesep + "\tPerforming GDEI...")
    log.close()

    # Run several threads for independent computation without waiting for threads
    # completion:
    for num in range(nr_threads):
        start = (num_sinos / nr_threads) * num
        if (num == nr_threads - 1):
            end = num_sinos - 1
        else:
            end = (num_sinos / nr_threads) * (num + 1) - 1
        Process(
            target=_process,
            args=(lock, start, end, num_sinos, infile_1, infile_2, infile_3,
                  outfile_abs, outfile_ref, outfile_sca, r1, r2, r3, d1, d2,
                  d3, dd1, dd2, dd3, shiftVert_1, shiftHoriz_1, shiftVert_2,
                  shiftHoriz_2, shiftVert_3, shiftHoriz_3, outshape, float32,
                  skipflat_1, skipflat_2, skipflat_3, plan_1, plan_2, plan_3,
                  norm_sx, norm_dx, flat_end, half_half, half_half_line,
                  ext_fov, ext_fov_rot_right, ext_fov_overlap,
                  ext_fov_normalize, ext_fov_average, ringrem, dynamic_ff,
                  EFF_1, EFF_2, EFF_3, filtEFF_1, filtEFF_2, filtEFF_3,
                  im_dark_1, im_dark_2, im_dark_3, logfilename)).start()
コード例 #10
0
def main(argv):
    """To do...


	"""
    # Get the zero-order index of the sinogram to pre-process:
    idx = int(argv[0])

    # Get paths:
    infile = argv[1]
    outfile = argv[2]

    # Normalization parameters:
    norm_sx = int(argv[3])
    norm_dx = int(argv[4])

    # Params for flat fielding with post flats/darks:
    flat_end = True if argv[5] == "True" else False
    half_half = True if argv[6] == "True" else False
    half_half_line = int(argv[7])

    # Flat fielding method (conventional or dynamic):
    dynamic_ff = True if argv[8] == "True" else False

    # Parameters for rotation:
    rotation = float(argv[9])
    interp = argv[10]
    border = argv[11]

    # Tmp path and log file:
    tmppath = argv[12]
    if not tmppath.endswith(sep): tmppath += sep
    logfilename = argv[13]

    # Open the HDF5 file:
    f_in = getHDF5(infile, 'r')

    try:
        if "/tomo" in f_in:
            dset = f_in['tomo']
        else:
            dset = f_in['exchange/data']

    except:
        log = open(logfilename, "a")
        log.write(linesep + "\tError reading input dataset. Process will end.")
        log.close()
        exit()

    num_proj = tdf.get_nr_projs(dset)
    num_sinos = tdf.get_nr_sinos(dset)

    # Check if the HDF5 makes sense:
    if (num_sinos == 0):
        log = open(logfilename, "a")
        log.write(linesep + "\tNo projections found. Process will end.")
        log.close()
        exit()

    # Get flat and darks from cache or from file:
    skipflat = False
    skipdark = False
    if not dynamic_ff:
        try:
            corrplan = cache2plan(infile, tmppath)
        except Exception as e:
            #print "Error(s) when reading from cache"
            corrplan = extract_flatdark(f_in, flat_end, logfilename)
            if (isscalar(corrplan['im_flat'])
                    and isscalar(corrplan['im_flat_after'])):
                skipflat = True
            else:
                plan2cache(corrplan, infile, tmppath)
    else:
        # Dynamic flat fielding:
        if "/tomo" in f_in:
            if "/flat" in f_in:
                flat_dset = f_in['flat']
                if "/dark" in f_in:
                    im_dark = _medianize(f_in['dark'])
                else:
                    skipdark = True
            else:
                skipflat = True  # Nothing to do in this case
        else:
            if "/exchange/data_white" in f_in:
                flat_dset = f_in['/exchange/data_white']
                if "/exchange/data_dark" in f_in:
                    im_dark = _medianize(f_in['/exchange/data_dark'])
                else:
                    skipdark = True
            else:
                skipflat = True  # Nothing to do in this case

        # Prepare plan for dynamic flat fielding with 16 repetitions:
        if not skipflat:
            EFF, filtEFF = dff_prepare_plan(flat_dset, 16, im_dark)

    # Read input image:
    im = tdf.read_tomo(dset, idx).astype(float32)
    f_in.close()

    # Perform pre-processing (flat fielding, extended FOV, ring removal):
    if not skipflat:
        if dynamic_ff:
            # Dynamic flat fielding with downsampling = 2:
            im = dynamic_flat_fielding(im, EFF, filtEFF, 2, im_dark)
        else:
            im = flat_fielding(im, idx, corrplan, flat_end, half_half,
                               half_half_line, norm_sx, norm_dx)

    # Rotate:
    rows, cols = im.shape
    M = cv2.getRotationMatrix2D((cols / 2, rows / 2), rotation, 1)

    if interp == 'nearest':
        interpflag = cv2.INTER_NEAREST
    elif interp == 'cubic':
        interpflag = cv2.INTER_CUBIC
    elif interp == 'lanczos':
        interpflag = cv2.INTER_LANCZOS4
    else:
        interpflag = cv2.INTER_LINEAR

    if border == 'constant':
        borderflag = cv2.BORDER_CONSTANT
    else:
        borderflag = cv2.BORDER_REPLICATE

    im = cv2.warpAffine(im,
                        M, (cols, rows),
                        flags=interpflag,
                        borderMode=borderflag)

    # Write down reconstructed preview file (file name modified with metadata):
    im = im.astype(float32)
    outfile2 = outfile + '_' + str(im.shape[1]) + 'x' + str(
        im.shape[0]) + '_' + str(nanmin(im)) + '$' + str(
            nanmax(im)) + '_after.raw'
    im.tofile(outfile2)
コード例 #11
0
def main(argv):
    """To do...


	"""
    # Get the zero-order index of the sinogram to pre-process:
    idx = int(argv[0])

    # Get paths:
    infile = argv[1]
    outfile = argv[2]

    # Normalization parameters:
    norm_sx = int(argv[3])
    norm_dx = int(argv[4])

    # Params for flat fielding with post flats/darks:
    flat_end = True if argv[5] == "True" else False
    half_half = True if argv[6] == "True" else False
    half_half_line = int(argv[7])

    # Params for extended FOV:
    ext_fov = True if argv[8] == "True" else False
    ext_fov_rot_right = argv[9]
    if ext_fov_rot_right == "True":
        ext_fov_rot_right = True
        if (ext_fov):
            norm_sx = 0
    else:
        ext_fov_rot_right = False
        if (ext_fov):
            norm_dx = 0
    ext_fov_overlap = int(argv[10])

    ext_fov_normalize = True if argv[11] == "True" else False
    ext_fov_average = True if argv[12] == "True" else False

    # Method and parameters coded into a string:
    ringrem = argv[13]

    # Flat fielding method (conventional or dynamic):
    dynamic_ff = True if argv[14] == "True" else False

    # Tmp path and log file:
    tmppath = argv[15]
    if not tmppath.endswith(sep): tmppath += sep
    logfilename = argv[16]

    # Open the HDF5 file:
    f_in = getHDF5(infile, 'r')

    try:
        if "/tomo" in f_in:
            dset = f_in['tomo']
        else:
            dset = f_in['exchange/data']

    except:
        log = open(logfilename, "a")
        log.write(linesep + "\tError reading input dataset. Process will end.")
        log.close()
        exit()

    num_proj = tdf.get_nr_projs(dset)
    num_sinos = tdf.get_nr_sinos(dset)

    # Check if the HDF5 makes sense:
    if (num_sinos == 0):
        log = open(logfilename, "a")
        log.write(linesep + "\tNo projections found. Process will end.")
        log.close()
        exit()

    # Get flat and darks from cache or from file:
    skipflat = False
    skipdark = False
    if not dynamic_ff:
        try:
            corrplan = cache2plan(infile, tmppath)
        except Exception as e:
            #print "Error(s) when reading from cache"
            corrplan = extract_flatdark(f_in, flat_end, logfilename)
            if (isscalar(corrplan['im_flat'])
                    and isscalar(corrplan['im_flat_after'])):
                skipflat = True
            else:
                plan2cache(corrplan, infile, tmppath)
    else:
        # Dynamic flat fielding:
        if "/tomo" in f_in:
            if "/flat" in f_in:
                flat_dset = f_in['flat']
                if "/dark" in f_in:
                    im_dark = _medianize(f_in['dark'])
                else:
                    skipdark = True
            else:
                skipflat = True  # Nothing to do in this case
        else:
            if "/exchange/data_white" in f_in:
                flat_dset = f_in['/exchange/data_white']
                if "/exchange/data_dark" in f_in:
                    im_dark = _medianize(f_in['/exchange/data_dark'])
                else:
                    skipdark = True
            else:
                skipflat = True  # Nothing to do in this case

        # Prepare plan for dynamic flat fielding with 16 repetitions:
        if not skipflat:
            EFF, filtEFF = dff_prepare_plan(flat_dset, 16, im_dark)

    # Read input image:
    im = tdf.read_sino(dset, idx).astype(float32)
    f_in.close()

    # Perform pre-processing (flat fielding, extended FOV, ring removal):
    if not skipflat:
        if dynamic_ff:
            # Dynamic flat fielding with downsampling = 2:
            im = dynamic_flat_fielding(im, idx, EFF, filtEFF, 2, im_dark,
                                       norm_sx, norm_dx)
        else:
            im = flat_fielding(im, idx, corrplan, flat_end, half_half,
                               half_half_line, norm_sx, norm_dx)
    if ext_fov:
        im = extfov_correction(im, ext_fov_rot_right, ext_fov_overlap,
                               ext_fov_normalize, ext_fov_average)
    if not skipflat and not dynamic_ff:
        im = ring_correction(im, ringrem, flat_end,
                             corrplan['skip_flat_after'], half_half,
                             half_half_line, ext_fov)
    else:
        im = ring_correction(im, ringrem, False, False, half_half,
                             half_half_line, ext_fov)

    # Write down reconstructed preview file (file name modified with metadata):
    im = im.astype(float32)
    outfile = outfile + '_' + str(im.shape[1]) + 'x' + str(
        im.shape[0]) + '_' + str(nanmin(im)) + '$' + str(nanmax(im))
    im.tofile(outfile)
コード例 #12
0
def main(argv):
    """To do...

	Usage
	-----
	

	Parameters
	---------
		   
	Example
	--------------------------


	"""
    # Get the from and to number of files to process:
    sino_idx = int(argv[0])

    # Get paths:
    infile = argv[1]
    outfile = argv[2]

    # Essential reconstruction parameters:
    angles = float(argv[3])
    offset = float(argv[4])
    recpar = argv[5]
    scale = int(float(argv[6]))

    overpad = True if argv[7] == "True" else False
    logtrsf = True if argv[8] == "True" else False
    circle = True if argv[9] == "True" else False

    # Parameters for on-the-fly pre-processing:
    preprocessing_required = True if argv[10] == "True" else False
    flat_end = True if argv[11] == "True" else False
    half_half = True if argv[12] == "True" else False

    half_half_line = int(argv[13])

    ext_fov = True if argv[14] == "True" else False

    norm_sx = int(argv[19])
    norm_dx = int(argv[20])

    ext_fov_rot_right = argv[15]
    if ext_fov_rot_right == "True":
        ext_fov_rot_right = True
        if (ext_fov):
            norm_sx = 0
    else:
        ext_fov_rot_right = False
        if (ext_fov):
            norm_dx = 0

    ext_fov_overlap = int(argv[16])

    ext_fov_normalize = True if argv[17] == "True" else False
    ext_fov_average = True if argv[18] == "True" else False

    skip_ringrem = True if argv[21] == "True" else False
    ringrem = argv[22]

    # Extra reconstruction parameters:
    zerone_mode = True if argv[23] == "True" else False
    corr_offset = float(argv[24])

    reconmethod = argv[25]
    # Force overpadding in case of GRIDREC for unknown reasons:
    if reconmethod == "GRIDREC":
        overpad = True

    decim_factor = int(argv[26])
    downsc_factor = int(argv[27])

    # Parameters for postprocessing:
    postprocess_required = True if argv[28] == "True" else False
    polarfilt_opt = argv[29]
    convert_opt = argv[30]
    crop_opt = argv[31]

    # Parameters for on-the-fly phase retrieval:
    phaseretrieval_required = True if argv[32] == "True" else False
    phrtmethod = int(argv[33])
    phrt_param1 = double(argv[34])  # param1( e.g.  regParam, or beta)
    phrt_param2 = double(argv[35])  # param2( e.g.  thresh or delta)
    energy = double(argv[36])
    distance = double(argv[37])
    pixsize = double(argv[38]) / 1000.0  # pixsixe from micron to mm:
    phrtpad = True if argv[39] == "True" else False
    approx_win = int(argv[40])

    angles_projfrom = int(argv[41])
    angles_projto = int(argv[42])

    rolling = True if argv[43] == "True" else False
    roll_shift = int(int(argv[44]) / decim_factor)

    preprocessingplan_fromcache = True if argv[45] == "True" else False
    dynamic_ff = True if argv[46] == "True" else False

    nr_threads = int(argv[47])
    tmppath = argv[48]
    if not tmppath.endswith(sep): tmppath += sep

    logfilename = argv[49]

    # Open the HDF5 file:
    f_in = getHDF5(infile, 'r')
    if "/tomo" in f_in:
        dset = f_in['tomo']
    else:
        dset = f_in['exchange/data']
        if "/provenance/detector_output" in f_in:
            prov_dset = f_in['provenance/detector_output']

    dset_min = -1
    dset_max = -1
    if (zerone_mode):
        if ('min' in dset.attrs):
            dset_min = float(dset.attrs['min'])
        else:
            zerone_mode = False

        if ('max' in dset.attrs):
            dset_max = float(dset.attrs['max'])
        else:
            zerone_mode = False

    num_sinos = tdf.get_nr_sinos(dset)  # Pay attention to the downscale factor

    if (num_sinos == 0):
        exit()

    # Check extrema:
    if (sino_idx >= num_sinos / downsc_factor):
        sino_idx = num_sinos / downsc_factor - 1

    # Get correction plan and phase retrieval plan (if required):
    skipflat = False

    corrplan = 0
    im_dark = 0
    EFF = 0
    filtEFF = 0
    if (preprocessing_required):
        if not dynamic_ff:
            # Load flat fielding plan either from cache (if required) or from TDF file
            # and cache it for faster re-use:
            if (preprocessingplan_fromcache):
                try:
                    corrplan = cache2plan(infile, tmppath)
                except Exception as e:
                    #print "Error(s) when reading from cache"
                    corrplan = extract_flatdark(f_in, flat_end, logfilename)
                    if (isscalar(corrplan['im_flat'])
                            and isscalar(corrplan['im_flat_after'])):
                        skipflat = True
                    else:
                        plan2cache(corrplan, infile, tmppath)
            else:
                corrplan = extract_flatdark(f_in, flat_end, logfilename)
                if (isscalar(corrplan['im_flat'])
                        and isscalar(corrplan['im_flat_after'])):
                    skipflat = True
                else:
                    plan2cache(corrplan, infile, tmppath)

            # Dowscale flat and dark images if necessary:
            if isinstance(corrplan['im_flat'], ndarray):
                corrplan['im_flat'] = corrplan[
                    'im_flat'][::downsc_factor, ::downsc_factor]
            if isinstance(corrplan['im_dark'], ndarray):
                corrplan['im_dark'] = corrplan[
                    'im_dark'][::downsc_factor, ::downsc_factor]
            if isinstance(corrplan['im_flat_after'], ndarray):
                corrplan['im_flat_after'] = corrplan[
                    'im_flat_after'][::downsc_factor, ::downsc_factor]
            if isinstance(corrplan['im_dark_after'], ndarray):
                corrplan['im_dark_after'] = corrplan[
                    'im_dark_after'][::downsc_factor, ::downsc_factor]

        else:
            # Dynamic flat fielding:
            if "/tomo" in f_in:
                if "/flat" in f_in:
                    flat_dset = f_in['flat']
                    if "/dark" in f_in:
                        im_dark = _medianize(f_in['dark'])
                    else:
                        skipdark = True
                else:
                    skipflat = True  # Nothing to do in this case
            else:
                if "/exchange/data_white" in f_in:
                    flat_dset = f_in['/exchange/data_white']
                    if "/exchange/data_dark" in f_in:
                        im_dark = _medianize(f_in['/exchange/data_dark'])
                    else:
                        skipdark = True
                else:
                    skipflat = True  # Nothing to do in this case

            # Prepare plan for dynamic flat fielding with 16 repetitions:
            if not skipflat:
                EFF, filtEFF = dff_prepare_plan(flat_dset, 16, im_dark)

                # Downscale images if necessary:
                im_dark = im_dark[::downsc_factor, ::downsc_factor]
                EFF = EFF[::downsc_factor, ::downsc_factor, :]
                filtEFF = filtEFF[::downsc_factor, ::downsc_factor, :]

    f_in.close()

    # Run computation:
    process(sino_idx, num_sinos, infile, outfile, preprocessing_required,
            corrplan, skipflat, norm_sx, norm_dx, flat_end, half_half,
            half_half_line, ext_fov, ext_fov_rot_right, ext_fov_overlap,
            ext_fov_normalize, ext_fov_average, ringrem,
            phaseretrieval_required, phrtmethod, phrt_param1, phrt_param2,
            energy, distance, pixsize, phrtpad, approx_win, angles,
            angles_projfrom, angles_projto, offset, logtrsf, recpar, circle,
            scale, overpad, reconmethod, rolling, roll_shift, zerone_mode,
            dset_min, dset_max, decim_factor, downsc_factor, corr_offset,
            postprocess_required, polarfilt_opt, convert_opt, crop_opt,
            dynamic_ff, EFF, filtEFF, im_dark, nr_threads, logfilename,
            tmppath)
コード例 #13
0
def main(argv):          
	"""To do...

	Usage
	-----
	

	Parameters
	---------
		   
	Example
	--------------------------
	The following line processes the first ten TIFF files of input path 
	"/home/in" and saves the processed files to "/home/out" with the 
	application of the Boin and Haibel filter with smoothing via a Butterworth
	filter of order 4 and cutoff frequency 0.01:

	reconstruct 0 4 C:\Temp\Dullin_Aug_2012\sino_noflat C:\Temp\Dullin_Aug_2012\sino_noflat\output 
	9.0 10.0 0.0 0.0 0.0 true sino slice C:\Temp\Dullin_Aug_2012\sino_noflat\tomo_conv flat dark

	"""
	lock = Lock()
	skip_flat = False
	skip_flat_after = True	

	# Get the from and to number of files to process:
	int_from = int(argv[0])
	int_to = int(argv[1])
	   
	# Get paths:
	infile = argv[2]
	outpath = argv[3]

	# Essential reconstruction parameters:
	angles = float(argv[4])
	offset = float(argv[5])
	param1 = argv[6]	
	scale  = int(float(argv[7]))
	
	overpad = True if argv[8] == "True" else False
	logtrsf = True if argv[9] == "True" else False
	circle = True if argv[10] == "True" else False

	outprefix = argv[11]	
	
	# Parameters for on-the-fly pre-processing:
	preprocessing_required = True if argv[12] == "True" else False		
	flat_end = True if argv[13] == "True" else False		
	half_half = True if argv[14] == "True" else False
		
	half_half_line = int(argv[15])
		
	ext_fov = True if argv[16] == "True" else False
		
	norm_sx = int(argv[21])
	norm_dx = int(argv[22])	
		
	ext_fov_rot_right = argv[17]
	if ext_fov_rot_right == "True":
		ext_fov_rot_right = True
		if (ext_fov):
			norm_sx = 0
	else:
		ext_fov_rot_right = False
		if (ext_fov):
			norm_dx = 0
		
	ext_fov_overlap = int(argv[18])

	ext_fov_normalize = True if argv[19] == "True" else False
	ext_fov_average = True if argv[20] == "True" else False
		
	skip_ringrem = True if argv[23] == "True" else False
	ringrem = argv[24]
	
	# Extra reconstruction parameters:
	zerone_mode = True if argv[25] == "True" else False		
	corr_offset = float(argv[26])
		
	reconmethod = argv[27]	
	# Force overpadding in case of GRIDREC for unknown reasons:
	if reconmethod == "GRIDREC":
		overpad = True	
	
	decim_factor = int(argv[28])
	downsc_factor = int(argv[29])
	
	# Parameters for postprocessing:
	postprocess_required = True if argv[30] == "True" else False
	polarfilt_opt = argv[31]
	convert_opt = argv[32]
	crop_opt = argv[33]

	angles_projfrom = int(argv[34])	
	angles_projto = int(argv[35])

	rolling = True if argv[36] == "True" else False
	roll_shift = int(int(argv[37]) / decim_factor)

	dynamic_ff 	= True if argv[38] == "True" else False
	
	nr_threads = int(argv[39])	
	logfilename = argv[40]	
	process_id = int(logfilename[-6:-4])
	
	# Check prefixes and path:
	#if not infile.endswith(sep): infile += sep
	if not exists(outpath):
		makedirs(outpath)
	
	if not outpath.endswith(sep): outpath += sep
		
	# Open the HDF5 file:
	f_in = getHDF5(infile, 'r')
	if "/tomo" in f_in:
		dset = f_in['tomo']
		
		tomoprefix = 'tomo'
		flatprefix = 'flat'
		darkprefix = 'dark'
	else: 
		dset = f_in['exchange/data']
		if "/provenance/detector_output" in f_in:
			prov_dset = f_in['provenance/detector_output']		
			
			tomoprefix = prov_dset.attrs['tomo_prefix']
			flatprefix = prov_dset.attrs['flat_prefix']
			darkprefix = prov_dset.attrs['dark_prefix']
	
	dset_min = -1
	dset_max = -1
	if (zerone_mode):
		if ('min' in dset.attrs):
			dset_min = float(dset.attrs['min'])								
		else:
			zerone_mode = False
			
		if ('max' in dset.attrs):
			dset_max = float(dset.attrs['max'])				
		else:
			zerone_mode = False	
		
	num_sinos = tdf.get_nr_sinos(dset) # Pay attention to the downscale factor
	
	if (num_sinos == 0):
		log = open(logfilename,"a")
		log.write(linesep + "\tNo projections found. Process will end.")	
		log.close()			
		exit()		

	# Check extrema (int_to == -1 means all files):
	if ((int_to >= num_sinos / downsc_factor) or (int_to == -1)):
		int_to = num_sinos / downsc_factor - 1
		
	# Log info:
	log = open(logfilename,"w")
	log.write(linesep + "\tInput file: %s" % (infile))	
	log.write(linesep + "\tOutput path: %s" % (outpath))		
	log.write(linesep + "\t--------------")		
	log.write(linesep + "\tPreparing the work plan...")	
	log.close()	
	
	# Get correction plan and phase retrieval plan (if required):
	corrplan = -1
	phrtplan = -1
	
	skipflat = False	

	im_dark = -1
	EFF = -1
	filtEFF = -1
	if (preprocessing_required):
		if not dynamic_ff:
			# Load flat fielding plan either from cache (if required) or from TDF file and cache it for faster re-use:			
			corrplan = extract_flatdark(f_in, flat_end, logfilename)
			if (isscalar(corrplan['im_flat']) and isscalar(corrplan['im_flat_after']) ):
				skipflat = True
			
			# Dowscale flat and dark images if necessary:
			if isinstance(corrplan['im_flat'], ndarray):
				corrplan['im_flat'] = corrplan['im_flat'][::downsc_factor,::downsc_factor]		
			if isinstance(corrplan['im_dark'], ndarray):
				corrplan['im_dark'] = corrplan['im_dark'][::downsc_factor,::downsc_factor]	
			if isinstance(corrplan['im_flat_after'], ndarray):
				corrplan['im_flat_after'] = corrplan['im_flat_after'][::downsc_factor,::downsc_factor]	
			if isinstance(corrplan['im_dark_after'], ndarray):
				corrplan['im_dark_after'] = corrplan['im_dark_after'][::downsc_factor,::downsc_factor]			

		else:
			# Dynamic flat fielding:
			if "/tomo" in f_in:				
				if "/flat" in f_in:
					flat_dset = f_in['flat']
					if "/dark" in f_in:
						im_dark = _medianize(f_in['dark'])
					else:										
						skipdark = True
				else:
					skipflat = True # Nothing to do in this case			
			else: 
				if "/exchange/data_white" in f_in:
					flat_dset = f_in['/exchange/data_white']
					if "/exchange/data_dark" in f_in:
						im_dark = _medianize(f_in['/exchange/data_dark'])	
					else:					
						skipdark = True
				else:
					skipflat = True # Nothing to do in this case
	
			# Prepare plan for dynamic flat fielding with 16 repetitions:		
			if not skipflat:
				EFF, filtEFF = dff_prepare_plan(flat_dset, 16, im_dark)

				# Downscale images if necessary:
				im_dark = im_dark[::downsc_factor,::downsc_factor]
				EFF = EFF[::downsc_factor,::downsc_factor,:]	
				filtEFF = filtEFF[::downsc_factor,::downsc_factor,:]	
			
	f_in.close()			
		
	# Log infos:
	log = open(logfilename,"a")
	log.write(linesep + "\tWork plan prepared correctly.")	
	log.write(linesep + "\t--------------")
	log.write(linesep + "\tPerforming reconstruction...")			
	log.close()	

	# Run several threads for independent computation without waiting for threads completion:
	for num in range(nr_threads):
		start = ( (int_to - int_from + 1) / nr_threads)*num + int_from
		if (num == nr_threads - 1):
			end = int_to
		else:
			end = ( (int_to - int_from + 1) / nr_threads)*(num + 1) + int_from - 1
		if (reconmethod == 'GRIDREC'):
			Process(target=process_gridrec, args=(lock, start, end, num_sinos, infile, outpath, preprocessing_required, skipflat, 
						corrplan, norm_sx, norm_dx, flat_end, half_half, half_half_line, ext_fov, ext_fov_rot_right, 
						ext_fov_overlap, ext_fov_normalize, ext_fov_average, ringrem, angles, angles_projfrom, angles_projto, 
						offset, logtrsf, param1, circle, scale, overpad, rolling, roll_shift,
						zerone_mode, dset_min, dset_max, decim_factor, downsc_factor, corr_offset, 
						postprocess_required, polarfilt_opt, convert_opt, crop_opt, dynamic_ff, EFF, filtEFF, im_dark, outprefix, 
						logfilename )).start()
		else:
			Process(target=process, args=(lock, start, end, num_sinos, infile, outpath, preprocessing_required, skipflat, 
						corrplan, norm_sx, norm_dx, flat_end, half_half, half_half_line, ext_fov, ext_fov_rot_right, 
						ext_fov_overlap, ext_fov_normalize, ext_fov_average, ringrem, angles, angles_projfrom, angles_projto, 
						offset, logtrsf, param1, circle, scale, overpad, reconmethod, rolling, roll_shift,
						zerone_mode, dset_min, dset_max, decim_factor, downsc_factor, corr_offset, 
						postprocess_required, polarfilt_opt, convert_opt, crop_opt, dynamic_ff, EFF, filtEFF, im_dark, outprefix, 
						logfilename )).start()
コード例 #14
0
def main(argv):
	"""To do...

	"""
	lock = Lock()

	skip_flat = True
	first_done = False	
	pyfftw_cache_disable()
	pyfftw_cache_enable()
	pyfftw_set_keepalive_time(1800)	

	# Get the from and to number of files to process:
	idx = int(argv[0])
	   
	# Get full paths of input TDF and output TDF:
	infile = argv[1]
	outfile = argv[2]
	
	# Get the phase retrieval parameters:
	beta = double(argv[3])   # param1( e.g. regParam, or beta)
	delta = double(argv[4])   # param2( e.g. thresh or delta)
	energy = double(argv[5])
	distance = double(argv[6])    
	pixsize = double(argv[7]) / 1000.0 # pixsixe from micron to mm:	
	pad = True if argv[8] == "True" else False
	
	# Tmp path and log file:
	tmppath = argv[9]	
	if not tmppath.endswith(sep): tmppath += sep		
	logfilename = argv[10]		

	
	# Open the HDF5 file:
	f_in = getHDF5(infile, 'r')
	if "/tomo" in f_in:
		dset = f_in['tomo']
	else: 
		dset = f_in['exchange/data']
	num_proj = tdf.get_nr_projs(dset)
	num_sinos = tdf.get_nr_sinos(dset)
	
	# Check if the HDF5 makes sense:
	if (num_proj == 0):
		log = open(logfilename,"a")
		log.write(linesep + "\tNo projections found. Process will end.")	
		log.close()			
		exit()		


	# Get flats and darks from cache or from file:
	try:
		corrplan = cache2plan(infile, tmppath)
	except Exception as e:
		#print "Error(s) when reading from cache"
		corrplan = extract_flatdark(f_in, True, logfilename)
		remove(logfilename)
		plan2cache(corrplan, infile, tmppath)

	# Read projection:
	im = tdf.read_tomo(dset,idx).astype(float32)		
	f_in.close()

	# Apply simple flat fielding (if applicable):
	if (isinstance(corrplan['im_flat_after'], ndarray) and isinstance(corrplan['im_flat'], ndarray) and
		isinstance(corrplan['im_dark'], ndarray) and isinstance(corrplan['im_dark_after'], ndarray)) :	
		if (idx < num_proj/2):
			im = (im - corrplan['im_dark']) / (abs(corrplan['im_flat'] - corrplan['im_dark']) + finfo(float32).eps)
		else:
			im = (im - corrplan['im_dark_after']) / (abs(corrplan['im_flat_after'] - corrplan['im_dark_after']) 
				+ finfo(float32).eps)	
					
	# Prepare plan:
	im = im.astype(float32)
	plan = prepare_plan (im, beta, delta, energy, distance, pixsize, padding=pad)

	# Perform phase retrieval (first time also PyFFTW prepares a plan):		
	im = phase_retrieval(im, plan)
	
	# Write down reconstructed preview file (file name modified with metadata):		
	im = im.astype(float32)
	outfile = outfile + '_' + str(im.shape[1]) + 'x' + str(im.shape[0]) + '_' + str( nanmin(im)) + '$' + str( nanmax(im) )	
	im.tofile(outfile)		
コード例 #15
0
def main(argv):          
	"""To do...


	"""
	# Get the zero-order index of the sinogram to pre-process:
	idx = int(argv[0])
	   
	# Get paths:
	infile = argv[1]
	outfile = argv[2]
	
	# Normalization parameters:
	norm_sx = int(argv[3])
	norm_dx = int(argv[4])
	
	# Params for flat fielding with post flats/darks:
	flat_end = True if argv[5] == "True" else False
	half_half = True if argv[6] == "True" else False
	half_half_line = int(argv[7])
		
	# Params for extended FOV:
	ext_fov = True if argv[8] == "True" else False
	ext_fov_rot_right = argv[9]
	if ext_fov_rot_right == "True":
		ext_fov_rot_right = True
		if (ext_fov):
			norm_sx = 0
	else:
		ext_fov_rot_right = False
		if (ext_fov):
			norm_dx = 0		
	ext_fov_overlap = int(argv[10])

	ext_fov_normalize = True if argv[11] == "True" else False
	ext_fov_average = True if argv[12] == "True" else False
		
	# Method and parameters coded into a string:
	ringrem = argv[13]	
	
	# Flat fielding method (conventional or dynamic):
	dynamic_ff = True if argv[14] == "True" else False
	
	# Tmp path and log file:
	tmppath = argv[15]	
	if not tmppath.endswith(sep): tmppath += sep		
	logfilename = argv[16]		

	
	# Open the HDF5 file:	
	f_in = getHDF5(infile, 'r')
	
	try:
		if "/tomo" in f_in:
			dset = f_in['tomo']		
		else: 
			dset = f_in['exchange/data']		
	
	except:
		log = open(logfilename,"a")
		log.write(linesep + "\tError reading input dataset. Process will end.")		
		log.close()			
		exit()
		
	num_proj = tdf.get_nr_projs(dset)
	num_sinos = tdf.get_nr_sinos(dset)
	
	# Check if the HDF5 makes sense:
	if (num_sinos == 0):
		log = open(logfilename,"a")
		log.write(linesep + "\tNo projections found. Process will end.")	
		log.close()			
		exit()		

	# Get flat and darks from cache or from file:
	skipflat = False
	skipdark = False
	if not dynamic_ff:
		try:
			corrplan = cache2plan(infile, tmppath)
		except Exception as e:
			#print "Error(s) when reading from cache"
			corrplan = extract_flatdark(f_in, flat_end, logfilename)
			if (isscalar(corrplan['im_flat']) and isscalar(corrplan['im_flat_after']) ):
				skipflat = True
			else:
				plan2cache(corrplan, infile, tmppath)					
	else:
		# Dynamic flat fielding:
		if "/tomo" in f_in:				
			if "/flat" in f_in:
				flat_dset = f_in['flat']
				if "/dark" in f_in:
					im_dark = _medianize(f_in['dark'])
				else:										
					skipdark = True
			else:
				skipflat = True # Nothing to do in this case			
		else: 
			if "/exchange/data_white" in f_in:
				flat_dset = f_in['/exchange/data_white']
				if "/exchange/data_dark" in f_in:
					im_dark = _medianize(f_in['/exchange/data_dark'])
				else:					
					skipdark = True
			else:
				skipflat = True # Nothing to do in this case
	
		# Prepare plan for dynamic flat fielding with 16 repetitions:
		if not skipflat:	
			EFF, filtEFF = dff_prepare_plan(flat_dset, 16, im_dark)

	# Read input image:
	im = tdf.read_sino(dset,idx).astype(float32)	
	f_in.close()	

	# Perform pre-processing (flat fielding, extended FOV, ring removal):	
	if not skipflat:
		if dynamic_ff:
			# Dynamic flat fielding with downsampling = 2:
			im = dynamic_flat_fielding(im, idx, EFF, filtEFF, 2, im_dark, norm_sx, norm_dx)
		else:
			im = flat_fielding(im, idx, corrplan, flat_end, half_half, half_half_line, norm_sx, norm_dx)	
	if ext_fov:			
		im = extfov_correction(im, ext_fov_rot_right, ext_fov_overlap, ext_fov_normalize, ext_fov_average)
	if not skipflat and not dynamic_ff:
		im = ring_correction (im, ringrem, flat_end, corrplan['skip_flat_after'], half_half, half_half_line, ext_fov)		
	else:
		im = ring_correction (im, ringrem, False, False, half_half, half_half_line, ext_fov)						

	# Write down reconstructed preview file (file name modified with metadata):		
	im = im.astype(float32)
	outfile = outfile + '_' + str(im.shape[1]) + 'x' + str(im.shape[0]) + '_' + str( nanmin(im)) + '$' + str( nanmax(im) )	
	im.tofile(outfile)
コード例 #16
0
def main(argv):          
	"""To do...

	Usage
	-----
	

	Parameters
	---------
		   
	Example
	--------------------------


	"""
	# Get the from and to number of files to process:
	sino_idx = int(argv[0])
	   
	# Get paths:
	infile = argv[1]
	outfile = argv[2]

	# Essential reconstruction parameters:
	angles = float(argv[3])	
	offset = float(argv[4])
	recpar = argv[5]	
	scale = int(float(argv[6]))
	
	overpad = True if argv[7] == "True" else False
	logtrsf = True if argv[8] == "True" else False
	circle = True if argv[9] == "True" else False
	
	# Parameters for on-the-fly pre-processing:
	preprocessing_required = True if argv[10] == "True" else False		
	flat_end = True if argv[11] == "True" else False		
	half_half = True if argv[12] == "True" else False
		
	half_half_line = int(argv[13])
		
	ext_fov = True if argv[14] == "True" else False
		
	norm_sx = int(argv[19])
	norm_dx = int(argv[20])	
		
	ext_fov_rot_right = argv[15]
	if ext_fov_rot_right == "True":
		ext_fov_rot_right = True
		if (ext_fov):
			norm_sx = 0
	else:
		ext_fov_rot_right = False
		if (ext_fov):
			norm_dx = 0
		
	ext_fov_overlap = int(argv[16])
	
	ext_fov_normalize = True if argv[17] == "True" else False
	ext_fov_average = True if argv[18] == "True" else False
		
	skip_ringrem = True if argv[21] == "True" else False
	ringrem = argv[22]
	
	# Extra reconstruction parameters:
	zerone_mode = True if argv[23] == "True" else False		
	corr_offset = float(argv[24])
		
	reconmethod = argv[25]	
	# Force overpadding in case of GRIDREC for unknown reasons:
	if reconmethod == "GRIDREC":
		overpad = True
	
	decim_factor = int(argv[26])
	downsc_factor = int(argv[27])
	
	# Parameters for postprocessing:
	postprocess_required = True if argv[28] == "True" else False
	polarfilt_opt = argv[29]
	convert_opt = argv[30]
	crop_opt = argv[31]

	# Parameters for on-the-fly phase retrieval:
	phaseretrieval_required = True if argv[32] == "True" else False		
	phrtmethod = int(argv[33])
	phrt_param1 = double(argv[34])   # param1( e.g.  regParam, or beta)
	phrt_param2 = double(argv[35])   # param2( e.g.  thresh or delta)
	energy = double(argv[36])
	distance = double(argv[37])    
	pixsize = double(argv[38]) / 1000.0 # pixsixe from micron to mm:
	phrtpad = True if argv[39] == "True" else False
	approx_win = int(argv[40])	

	angles_projfrom = int(argv[41])	
	angles_projto = int(argv[42])	

	rolling = True if argv[43] == "True" else False
	roll_shift = int(int(argv[44]) / decim_factor)

	preprocessingplan_fromcache = True if argv[45] == "True" else False
	dynamic_ff = True if argv[46] == "True" else False

	nr_threads = int(argv[47])	
	tmppath = argv[48]	
	if not tmppath.endswith(sep): tmppath += sep
		
	logfilename = argv[49]		
			
	# Open the HDF5 file:
	f_in = getHDF5(infile, 'r')
	if "/tomo" in f_in:
		dset = f_in['tomo']	
	else: 
		dset = f_in['exchange/data']
		if "/provenance/detector_output" in f_in:
			prov_dset = f_in['provenance/detector_output']				
	
	dset_min = -1
	dset_max = -1
	if (zerone_mode):
		if ('min' in dset.attrs):
			dset_min = float(dset.attrs['min'])								
		else:
			zerone_mode = False
			
		if ('max' in dset.attrs):
			dset_max = float(dset.attrs['max'])				
		else:
			zerone_mode = False	
		
	num_sinos = tdf.get_nr_sinos(dset) # Pay attention to the downscale factor
	
	if (num_sinos == 0):	
		exit()		

	# Check extrema:
	if (sino_idx >= num_sinos / downsc_factor):
		sino_idx = num_sinos / downsc_factor - 1
	
	# Get correction plan and phase retrieval plan (if required):
	skipflat = False
	
	corrplan = 0	
	im_dark = 0
	EFF = 0
	filtEFF = 0
	if (preprocessing_required):
		if not dynamic_ff:
			# Load flat fielding plan either from cache (if required) or from TDF file
			# and cache it for faster re-use:
			if (preprocessingplan_fromcache):
				try:
					corrplan = cache2plan(infile, tmppath)
				except Exception as e:
					#print "Error(s) when reading from cache"
					corrplan = extract_flatdark(f_in, flat_end, logfilename)
					if (isscalar(corrplan['im_flat']) and isscalar(corrplan['im_flat_after'])):
						skipflat = True
					else:
						plan2cache(corrplan, infile, tmppath)		
			else:			
				corrplan = extract_flatdark(f_in, flat_end, logfilename)		
				if (isscalar(corrplan['im_flat']) and isscalar(corrplan['im_flat_after'])):
					skipflat = True
				else:
					plan2cache(corrplan, infile, tmppath)	

			# Dowscale flat and dark images if necessary:
			if isinstance(corrplan['im_flat'], ndarray):
				corrplan['im_flat'] = corrplan['im_flat'][::downsc_factor,::downsc_factor]		
			if isinstance(corrplan['im_dark'], ndarray):
				corrplan['im_dark'] = corrplan['im_dark'][::downsc_factor,::downsc_factor]	
			if isinstance(corrplan['im_flat_after'], ndarray):
				corrplan['im_flat_after'] = corrplan['im_flat_after'][::downsc_factor,::downsc_factor]	
			if isinstance(corrplan['im_dark_after'], ndarray):
				corrplan['im_dark_after'] = corrplan['im_dark_after'][::downsc_factor,::downsc_factor]			

		else:
			# Dynamic flat fielding:
			if "/tomo" in f_in:				
				if "/flat" in f_in:
					flat_dset = f_in['flat']
					if "/dark" in f_in:
						im_dark = _medianize(f_in['dark'])
					else:										
						skipdark = True
				else:
					skipflat = True # Nothing to do in this case
			else: 
				if "/exchange/data_white" in f_in:
					flat_dset = f_in['/exchange/data_white']
					if "/exchange/data_dark" in f_in:
						im_dark = _medianize(f_in['/exchange/data_dark'])	
					else:					
						skipdark = True
				else:
					skipflat = True # Nothing to do in this case
	
			# Prepare plan for dynamic flat fielding with 16 repetitions:
			if not skipflat:
				EFF, filtEFF = dff_prepare_plan(flat_dset, 16, im_dark)

				# Downscale images if necessary:
				im_dark = im_dark[::downsc_factor,::downsc_factor]
				EFF = EFF[::downsc_factor,::downsc_factor,:]	
				filtEFF = filtEFF[::downsc_factor,::downsc_factor,:]	
			
	f_in.close()			

	# Run computation:
	process(sino_idx, num_sinos, infile, outfile, preprocessing_required, corrplan, skipflat, norm_sx, 
				norm_dx, flat_end, half_half, half_half_line, ext_fov, ext_fov_rot_right, ext_fov_overlap, 
				ext_fov_normalize, ext_fov_average, ringrem, phaseretrieval_required, phrtmethod, phrt_param1, 
				phrt_param2, energy, distance, pixsize, phrtpad, approx_win, angles, angles_projfrom, 
				angles_projto, offset, logtrsf, recpar, circle, scale, overpad, reconmethod, rolling, 
				roll_shift, zerone_mode, dset_min, dset_max, decim_factor, downsc_factor, corr_offset, 
				postprocess_required, polarfilt_opt, convert_opt, crop_opt, dynamic_ff, EFF, filtEFF, 
                im_dark, nr_threads, logfilename, tmppath)		
コード例 #17
0
def main(argv):
    """Try to guess the amount of overlap in the case of extended FOV CT.

    Parameters
    ----------
    infile  : array_like
        HDF5 input dataset

    outfile : string
        Full path where the identified overlap will be written as output

	scale   : int
        If sub-pixel precision is interesting, use e.g. 2.0 to get an overlap 
		of .5 value. Use 1.0 if sub-pixel precision is not required

	tmppath : int
        Temporary path where look for cached flat/dark files
       
    """

    # Get path:
    infile = argv[0]  # The HDF5 file on the SSD
    outfile = argv[1]  # The txt file with the proposed center
    scale = float(argv[2])
    tmppath = argv[3]
    if not tmppath.endswith(sep): tmppath += sep

    # Create a silly temporary log:
    tmplog = tmppath + basename(infile) + str(time.time())

    # Open the HDF5 file:
    f_in = getHDF5(infile, 'r')
    if "/tomo" in f_in:
        dset = f_in['tomo']
    else:
        dset = f_in['exchange/data']
    num_proj = tdf.get_nr_projs(dset)

    # Get first and 180 deg projections:
    im1 = tdf.read_tomo(dset, 0).astype(float32)
    im2 = tdf.read_tomo(dset, num_proj / 2).astype(float32)

    # Get flats and darks from cache or from file:
    try:
        corrplan = cache2plan(infile, tmppath)
    except Exception as e:
        #print "Error(s) when reading from cache"
        corrplan = extract_flatdark(f_in, True, tmplog)
        remove(tmplog)
        plan2cache(corrplan, infile, tmppath)

    # Apply simple flat fielding (if applicable):
    if (isinstance(corrplan['im_flat_after'], ndarray)
            and isinstance(corrplan['im_flat'], ndarray)
            and isinstance(corrplan['im_dark'], ndarray)
            and isinstance(corrplan['im_dark_after'], ndarray)):
        im1 = ((abs(im1 - corrplan['im_dark'])) /
               (abs(corrplan['im_flat'] - corrplan['im_dark']) +
                finfo(float32).eps)).astype(float32)
        im2 = ((abs(im2 - corrplan['im_dark_after'])) /
               (abs(corrplan['im_flat_after'] - corrplan['im_dark_after']) +
                finfo(float32).eps)).astype(float32)

    # Scale projections (if required) to get subpixel estimation:
    if (abs(scale - 1.0) > finfo(float32).eps):
        im1 = imresize(im1, (int(round(
            scale * im1.shape[0])), int(round(scale * im1.shape[1]))),
                       interp='bicubic',
                       mode='F')
        im2 = imresize(im2, (int(round(
            scale * im2.shape[0])), int(round(scale * im2.shape[1]))),
                       interp='bicubic',
                       mode='F')

    # Find the center (flipping left-right im2): DISTINGUISH BETWEEN AIR ON THE RIGHT AND ON THE LEFT??????
    cen = findcenter.usecorrelation(im1, im2[:, ::-1])
    cen = (cen / scale) * 2.0

    # Print center to output file:
    text_file = open(outfile, "w")
    text_file.write(str(int(abs(cen))))
    text_file.close()

    # Close input HDF5:
    f_in.close()
コード例 #18
0
def main(argv):          
	"""To do...

	Usage
	-----
	

	Parameters
	---------
		   
	Example
	--------------------------
	The following line processes the first ten TIFF files of input path 
	"/home/in" and saves the processed files to "/home/out" with the 
	application of the Boin and Haibel filter with smoothing via a Butterworth
	filter of order 4 and cutoff frequency 0.01:

	destripe /home/in /home/out 1 10 1 0.01 4    

	"""
	lock = Lock()
	rescale_factor = 10000.0 # For 16-bit floating point

	# Get the from and to number of files to process:
	int_from = int(argv[0])
	int_to = int(argv[1])
	   
	# Get paths:
	infile = argv[2]
	outfile = argv[3]	
	
	# Params for flat fielding with post flats/darks:
	flat_end = True if argv[4] == "True" else False
	half_half = True if argv[5] == "True" else False
	half_half_line = int(argv[6])

	# Flat fielding method (conventional or dynamic):
	dynamic_ff = True if argv[7] == "True" else False

	# Parameters for rotation:
	rotation = float(argv[8])
	interp = argv[9]
	border = argv[10]
	
	# Nr of threads and log file:
	nr_threads = int(argv[11])
	logfilename = argv[12]		




	# Log input parameters:
	log = open(logfilename,"w")
	log.write(linesep + "\tInput TDF file: %s" % (infile))	
	log.write(linesep + "\tOutput TDF file: %s" % (outfile))		
	log.write(linesep + "\t--------------")	
	log.write(linesep + "\tOpening input dataset...")	
	log.close()
	
	# Remove a previous copy of output:
	if exists(outfile):
		remove(outfile)
	
	# Open the HDF5 file:
	f_in = getHDF5(infile, 'r')


	if "/tomo" in f_in:
		dset = f_in['tomo']

		tomoprefix = 'tomo'
		flatprefix = 'flat'
		darkprefix = 'dark'
	else: 
		dset = f_in['exchange/data']
		if "/provenance/detector_output" in f_in:
			prov_dset = f_in['provenance/detector_output']		
	
			tomoprefix = prov_dset.attrs['tomo_prefix']
			flatprefix = prov_dset.attrs['flat_prefix']
			darkprefix = prov_dset.attrs['dark_prefix']
			
	num_proj = tdf.get_nr_projs(dset)
	num_sinos = tdf.get_nr_sinos(dset)
	
	if (num_sinos == 0):
		log = open(logfilename,"a")
		log.write(linesep + "\tNo projections found. Process will end.")	
		log.close()			
		exit()		

	# Check extrema (int_to == -1 means all files):
	if ((int_to >= num_proj) or (int_to == -1)):
		int_to = num_proj - 1

	# Prepare the work plan for flat and dark images:
	log = open(logfilename,"a")
	log.write(linesep + "\t--------------")
	log.write(linesep + "\tPreparing the work plan...")				
	log.close()

	# Extract flat and darks:
	skipflat = False
	skipdark = False

	# Following variables make sense only for dynamic flat fielding:
	EFF = -1
	filtEFF = -1
	im_dark = -1
	
	# Following variable makes sense only for conventional flat fielding:
	plan = -1

	if not dynamic_ff:
		plan = extract_flatdark(f_in, flat_end, logfilename)
		if (isscalar(plan['im_flat']) and isscalar(plan['im_flat_after'])):
			skipflat = True
		else:
			skipflat = False		
	else:
		# Dynamic flat fielding:
		if "/tomo" in f_in:				
			if "/flat" in f_in:
				flat_dset = f_in['flat']
				if "/dark" in f_in:
					im_dark = _medianize(f_in['dark'])
				else:										
					skipdark = True
			else:
				skipflat = True # Nothing to do in this case
		else: 
			if "/exchange/data_white" in f_in:
				flat_dset = f_in['/exchange/data_white']
				if "/exchange/data_dark" in f_in:
					im_dark = _medianize(f_in['/exchange/data_dark'])
				else:					
					skipdark = True
			else:
				skipflat = True # Nothing to do in this case
	
		# Prepare plan for dynamic flat fielding with 16 repetitions:
		if not skipflat:	
			EFF, filtEFF = dff_prepare_plan(flat_dset, 16, im_dark)
	
	# Get the corrected outshape (in this case it's easy):
	im = tdf.read_tomo(dset,0).astype(float32)	
	outshape = tdf.get_dset_shape(im.shape[1], im.shape[0], num_proj)			
	
	# Create the output HDF5 file:
	f_out = getHDF5(outfile, 'w')
	#f_out_dset = f_out.create_dataset('exchange/data', outshape, im.dtype)
	f_out_dset = f_out.create_dataset('exchange/data', outshape, float16) 
	f_out_dset.attrs['min'] = str(amin(im[:]))
	f_out_dset.attrs['max'] = str(amax(im[:]))
	f_out_dset.attrs['version'] = '1.0'
	f_out_dset.attrs['axes'] = "y:theta:x"
	f_out_dset.attrs['rescale_factor'] = str(rescale_factor)

	f_out.close()
	f_in.close()
		
	# Log infos:
	log = open(logfilename,"a")
	log.write(linesep + "\tWork plan prepared correctly.")	
	log.write(linesep + "\t--------------")
	log.write(linesep + "\tPerforming pre processing...")			
	log.close()	

	# Run several threads for independent computation without waiting for threads
	# completion:
	for num in range(nr_threads):
		start = (num_proj / nr_threads) * num
		if (num == nr_threads - 1):
			end = num_proj - 1
		else:
			end = (num_proj / nr_threads) * (num + 1) - 1
		Process(target=_process, args=(lock, start, end, infile, outfile, outshape, float16, skipflat, plan, 
				   flat_end, half_half, half_half_line, dynamic_ff, EFF, filtEFF, im_dark, rotation, interp, border, 
				   rescale_factor, logfilename)).start()
コード例 #19
0
def main(argv):
    """To do...

	Usage
	-----
	

	Parameters
	---------
		   
	Example
	--------------------------
	The following line processes the first ten TIFF files of input path 
	"/home/in" and saves the processed files to "/home/out" with the 
	application of the Boin and Haibel filter with smoothing via a Butterworth
	filter of order 4 and cutoff frequency 0.01:

	reconstruct 0 4 C:\Temp\Dullin_Aug_2012\sino_noflat C:\Temp\Dullin_Aug_2012\sino_noflat\output 
	9.0 10.0 0.0 0.0 0.0 true sino slice C:\Temp\Dullin_Aug_2012\sino_noflat\tomo_conv flat dark

	"""
    lock = Lock()
    skip_flat = False
    skip_flat_after = True

    # Get the from and to number of files to process:
    int_from = int(argv[0])
    int_to = int(argv[1])

    # Get paths:
    infile = argv[2]
    outpath = argv[3]

    # Essential reconstruction parameters:
    angles = float(argv[4])
    offset = float(argv[5])
    param1 = argv[6]
    scale = int(float(argv[7]))

    overpad = True if argv[8] == "True" else False
    logtrsf = True if argv[9] == "True" else False
    circle = True if argv[10] == "True" else False

    outprefix = argv[11]

    # Parameters for on-the-fly pre-processing:
    preprocessing_required = True if argv[12] == "True" else False
    flat_end = True if argv[13] == "True" else False
    half_half = True if argv[14] == "True" else False

    half_half_line = int(argv[15])

    ext_fov = True if argv[16] == "True" else False

    norm_sx = int(argv[21])
    norm_dx = int(argv[22])

    ext_fov_rot_right = argv[17]
    if ext_fov_rot_right == "True":
        ext_fov_rot_right = True
        if (ext_fov):
            norm_sx = 0
    else:
        ext_fov_rot_right = False
        if (ext_fov):
            norm_dx = 0

    ext_fov_overlap = int(argv[18])

    ext_fov_normalize = True if argv[19] == "True" else False
    ext_fov_average = True if argv[20] == "True" else False

    skip_ringrem = True if argv[23] == "True" else False
    ringrem = argv[24]

    # Extra reconstruction parameters:
    zerone_mode = True if argv[25] == "True" else False
    corr_offset = float(argv[26])

    reconmethod = argv[27]
    # Force overpadding in case of GRIDREC for unknown reasons:
    if reconmethod == "GRIDREC":
        overpad = True

    decim_factor = int(argv[28])
    downsc_factor = int(argv[29])

    # Parameters for postprocessing:
    postprocess_required = True if argv[30] == "True" else False
    polarfilt_opt = argv[31]
    convert_opt = argv[32]
    crop_opt = argv[33]

    angles_projfrom = int(argv[34])
    angles_projto = int(argv[35])

    rolling = True if argv[36] == "True" else False
    roll_shift = int(int(argv[37]) / decim_factor)

    dynamic_ff = True if argv[38] == "True" else False

    nr_threads = int(argv[39])
    logfilename = argv[40]
    process_id = int(logfilename[-6:-4])

    # Check prefixes and path:
    #if not infile.endswith(sep): infile += sep
    if not exists(outpath):
        makedirs(outpath)

    if not outpath.endswith(sep): outpath += sep

    # Open the HDF5 file:
    f_in = getHDF5(infile, 'r')
    if "/tomo" in f_in:
        dset = f_in['tomo']

        tomoprefix = 'tomo'
        flatprefix = 'flat'
        darkprefix = 'dark'
    else:
        dset = f_in['exchange/data']
        if "/provenance/detector_output" in f_in:
            prov_dset = f_in['provenance/detector_output']

            tomoprefix = prov_dset.attrs['tomo_prefix']
            flatprefix = prov_dset.attrs['flat_prefix']
            darkprefix = prov_dset.attrs['dark_prefix']

    dset_min = -1
    dset_max = -1
    if (zerone_mode):
        if ('min' in dset.attrs):
            dset_min = float(dset.attrs['min'])
        else:
            zerone_mode = False

        if ('max' in dset.attrs):
            dset_max = float(dset.attrs['max'])
        else:
            zerone_mode = False

    num_sinos = tdf.get_nr_sinos(dset)  # Pay attention to the downscale factor

    if (num_sinos == 0):
        log = open(logfilename, "a")
        log.write(linesep + "\tNo projections found. Process will end.")
        log.close()
        exit()

    # Check extrema (int_to == -1 means all files):
    if ((int_to >= num_sinos / downsc_factor) or (int_to == -1)):
        int_to = num_sinos / downsc_factor - 1

    # Log info:
    log = open(logfilename, "w")
    log.write(linesep + "\tInput file: %s" % (infile))
    log.write(linesep + "\tOutput path: %s" % (outpath))
    log.write(linesep + "\t--------------")
    log.write(linesep + "\tPreparing the work plan...")
    log.close()

    # Get correction plan and phase retrieval plan (if required):
    corrplan = -1
    phrtplan = -1

    skipflat = False

    im_dark = -1
    EFF = -1
    filtEFF = -1
    if (preprocessing_required):
        if not dynamic_ff:
            # Load flat fielding plan either from cache (if required) or from TDF file
            # and cache it for faster re-use:
            corrplan = extract_flatdark(f_in, flat_end, logfilename)
            if (isscalar(corrplan['im_flat'])
                    and isscalar(corrplan['im_flat_after'])):
                skipflat = True

            # Dowscale flat and dark images if necessary:
            if isinstance(corrplan['im_flat'], ndarray):
                corrplan['im_flat'] = corrplan[
                    'im_flat'][::downsc_factor, ::downsc_factor]
            if isinstance(corrplan['im_dark'], ndarray):
                corrplan['im_dark'] = corrplan[
                    'im_dark'][::downsc_factor, ::downsc_factor]
            if isinstance(corrplan['im_flat_after'], ndarray):
                corrplan['im_flat_after'] = corrplan[
                    'im_flat_after'][::downsc_factor, ::downsc_factor]
            if isinstance(corrplan['im_dark_after'], ndarray):
                corrplan['im_dark_after'] = corrplan[
                    'im_dark_after'][::downsc_factor, ::downsc_factor]

        else:
            # Dynamic flat fielding:
            if "/tomo" in f_in:
                if "/flat" in f_in:
                    flat_dset = f_in['flat']
                    if "/dark" in f_in:
                        im_dark = _medianize(f_in['dark'])
                    else:
                        skipdark = True
                else:
                    skipflat = True  # Nothing to do in this case
            else:
                if "/exchange/data_white" in f_in:
                    flat_dset = f_in['/exchange/data_white']
                    if "/exchange/data_dark" in f_in:
                        im_dark = _medianize(f_in['/exchange/data_dark'])
                    else:
                        skipdark = True
                else:
                    skipflat = True  # Nothing to do in this case

            # Prepare plan for dynamic flat fielding with 16 repetitions:
            if not skipflat:
                EFF, filtEFF = dff_prepare_plan(flat_dset, 16, im_dark)

                # Downscale images if necessary:
                im_dark = im_dark[::downsc_factor, ::downsc_factor]
                EFF = EFF[::downsc_factor, ::downsc_factor, :]
                filtEFF = filtEFF[::downsc_factor, ::downsc_factor, :]

    f_in.close()

    # Log infos:
    log = open(logfilename, "a")
    log.write(linesep + "\tWork plan prepared correctly.")
    log.write(linesep + "\t--------------")
    log.write(linesep + "\tPerforming reconstruction...")
    log.close()

    # Run several threads for independent computation without waiting for threads
    # completion:
    for num in range(nr_threads):
        start = ((int_to - int_from + 1) / nr_threads) * num + int_from
        if (num == nr_threads - 1):
            end = int_to
        else:
            end = ((int_to - int_from + 1) / nr_threads) * (num +
                                                            1) + int_from - 1
        if (reconmethod == 'GRIDREC'):
            Process(
                target=process_gridrec,
                args=(lock, start, end, num_sinos, infile, outpath,
                      preprocessing_required, skipflat, corrplan, norm_sx,
                      norm_dx, flat_end, half_half, half_half_line, ext_fov,
                      ext_fov_rot_right, ext_fov_overlap, ext_fov_normalize,
                      ext_fov_average, ringrem, angles, angles_projfrom,
                      angles_projto, offset, logtrsf, param1, circle, scale,
                      overpad, rolling, roll_shift, zerone_mode, dset_min,
                      dset_max, decim_factor, downsc_factor, corr_offset,
                      postprocess_required, polarfilt_opt, convert_opt,
                      crop_opt, dynamic_ff, EFF, filtEFF, im_dark, outprefix,
                      logfilename)).start()
        else:
            Process(
                target=process,
                args=(lock, start, end, num_sinos, infile, outpath,
                      preprocessing_required, skipflat, corrplan, norm_sx,
                      norm_dx, flat_end, half_half, half_half_line, ext_fov,
                      ext_fov_rot_right, ext_fov_overlap, ext_fov_normalize,
                      ext_fov_average, ringrem, angles, angles_projfrom,
                      angles_projto, offset, logtrsf, param1, circle, scale,
                      overpad, reconmethod, rolling, roll_shift, zerone_mode,
                      dset_min, dset_max, decim_factor, downsc_factor,
                      corr_offset, postprocess_required, polarfilt_opt,
                      convert_opt, crop_opt, dynamic_ff, EFF, filtEFF, im_dark,
                      outprefix, logfilename)).start()
コード例 #20
0
def main(argv):
    """To do...



	"""
    lock = Lock()
    skip_flat = False
    skip_flat_after = True

    # Get the from and to number of files to process:
    sino_idx = int(argv[0])

    # Get paths:
    infile = argv[1]
    outpath = argv[2]

    # Essential reconstruction parameters::
    angles = float(argv[3])
    off_step = float(argv[4])
    param1 = argv[5]
    scale = int(float(argv[6]))

    overpad = True if argv[7] == "True" else False
    logtrsf = True if argv[8] == "True" else False
    circle = True if argv[9] == "True" else False

    # Parameters for on-the-fly pre-processing:
    preprocessing_required = True if argv[10] == "True" else False
    flat_end = True if argv[11] == "True" else False
    half_half = True if argv[12] == "True" else False

    half_half_line = int(argv[13])

    ext_fov = True if argv[14] == "True" else False

    norm_sx = int(argv[19])
    norm_dx = int(argv[20])

    ext_fov_rot_right = argv[15]
    if ext_fov_rot_right == "True":
        ext_fov_rot_right = True
        if (ext_fov):
            norm_sx = 0
    else:
        ext_fov_rot_right = False
        if (ext_fov):
            norm_dx = 0

    ext_fov_overlap = int(argv[16])

    ext_fov_normalize = True if argv[17] == "True" else False
    ext_fov_average = True if argv[18] == "True" else False

    skip_ringrem = True if argv[21] == "True" else False
    ringrem = argv[22]

    # Extra reconstruction parameters:
    zerone_mode = True if argv[23] == "True" else False
    corr_offset = float(argv[24])

    reconmethod = argv[25]

    decim_factor = int(argv[26])
    downsc_factor = int(argv[27])

    # Parameters for postprocessing:
    postprocess_required = True if argv[28] == "True" else False
    convert_opt = argv[29]
    crop_opt = argv[30]

    # Parameters for on-the-fly phase retrieval:
    phaseretrieval_required = True if argv[31] == "True" else False
    phrtmethod = int(argv[32])
    phrt_param1 = double(argv[33])  # param1( e.g. regParam, or beta)
    phrt_param2 = double(argv[34])  # param2( e.g. thresh or delta)
    energy = double(argv[35])
    distance = double(argv[36])
    pixsize = double(argv[37]) / 1000.0  # pixsixe from micron to mm:
    phrtpad = True if argv[38] == "True" else False
    approx_win = int(argv[39])

    angles_projfrom = int(argv[40])
    angles_projto = int(argv[41])

    preprocessingplan_fromcache = True if argv[42] == "True" else False
    tmppath = argv[43]
    if not tmppath.endswith(sep): tmppath += sep

    nr_threads = int(argv[44])
    off_from = float(argv[45])
    off_to = float(argv[46])

    slice_prefix = argv[47]

    logfilename = argv[48]

    if not exists(outpath):
        makedirs(outpath)

    if not outpath.endswith(sep): outpath += sep

    # Log info:
    log = open(logfilename, "w")
    log.write(linesep + "\tInput dataset: %s" % (infile))
    log.write(linesep + "\tOutput path: %s" % (outpath))
    log.write(linesep + "\t--------------")
    log.write(linesep + "\tLoading flat and dark images...")
    log.close()

    # Open the HDF5 file:
    f_in = getHDF5(infile, 'r')
    if "/tomo" in f_in:
        dset = f_in['tomo']
    else:
        dset = f_in['exchange/data']
        if "/provenance/detector_output" in f_in:
            prov_dset = f_in['provenance/detector_output']

    dset_min = -1
    dset_max = -1
    if (zerone_mode):
        if ('min' in dset.attrs):
            dset_min = float(dset.attrs['min'])
        else:
            zerone_mode = False

        if ('max' in dset.attrs):
            dset_max = float(dset.attrs['max'])
        else:
            zerone_mode = False

    num_sinos = tdf.get_nr_sinos(dset)  # Pay attention to the downscale factor

    if (num_sinos == 0):
        exit()

    # Check extrema:
    if (sino_idx >= num_sinos / downsc_factor):
        sino_idx = num_sinos / downsc_factor - 1

    # Get correction plan and phase retrieval plan (if required):
    corrplan = 0
    if (preprocessing_required):
        # Load flat fielding plan either from cache (if required) or from TDF file and cache it for faster re-use:
        if (preprocessingplan_fromcache):
            try:
                corrplan = cache2plan(infile, tmppath)
            except Exception as e:
                #print "Error(s) when reading from cache"
                corrplan = extract_flatdark(f_in, flat_end, logfilename)
                plan2cache(corrplan, infile, tmppath)
        else:
            corrplan = extract_flatdark(f_in, flat_end, logfilename)
            plan2cache(corrplan, infile, tmppath)

        # Dowscale flat and dark images if necessary:
        if isinstance(corrplan['im_flat'], ndarray):
            corrplan['im_flat'] = corrplan[
                'im_flat'][::downsc_factor, ::downsc_factor]
        if isinstance(corrplan['im_dark'], ndarray):
            corrplan['im_dark'] = corrplan[
                'im_dark'][::downsc_factor, ::downsc_factor]
        if isinstance(corrplan['im_flat_after'], ndarray):
            corrplan['im_flat_after'] = corrplan[
                'im_flat_after'][::downsc_factor, ::downsc_factor]
        if isinstance(corrplan['im_dark_after'], ndarray):
            corrplan['im_dark_after'] = corrplan[
                'im_dark_after'][::downsc_factor, ::downsc_factor]

    f_in.close()

    # Log infos:
    log = open(logfilename, "a")
    log.write(linesep + "\tPerforming preprocessing...")
    log.close()

    # Run computation:
    process(sino_idx, num_sinos, infile, outpath, preprocessing_required,
            corrplan, norm_sx, norm_dx, flat_end, half_half, half_half_line,
            ext_fov, ext_fov_rot_right, ext_fov_overlap, ext_fov_normalize,
            ext_fov_average, ringrem, phaseretrieval_required, phrtmethod,
            phrt_param1, phrt_param2, energy, distance, pixsize, phrtpad,
            approx_win, angles, angles_projfrom, angles_projto, off_step,
            logtrsf, param1, circle, scale, overpad, reconmethod, zerone_mode,
            dset_min, dset_max, decim_factor, downsc_factor, corr_offset,
            postprocess_required, convert_opt, crop_opt, nr_threads, off_from,
            off_to, logfilename, lock, slice_prefix)
コード例 #21
0
def main(argv):
    """Try to guess the center of rotation of the input CT dataset.

    Parameters
    ----------
    infile  : array_like
        HDF5 input dataset

    outfile : string
        Full path where the identified center of rotation will be written as output

	scale   : int
        If sub-pixel precision is interesting, use e.g. 2.0 to get a center of rotation 
		of .5 value. Use 1.0 if sub-pixel precision is not required

	angles  : int
        Total number of angles of the input dataset	

	proj_from : int
        Initial projections to consider for the assumed angles

	proj_to : int
        Final projections to consider for the assumed angles

	method : string
		(not implemented yet)

	tmppath : string
        Temporary path where look for cached flat/dark files
       
    """
    # Get path:
    infile = argv[0]  # The HDF5 file on the
    outfile = argv[1]  # The txt file with the proposed center
    scale = float(argv[2])
    angles = float(argv[3])
    proj_from = int(argv[4])
    proj_to = int(argv[5])
    method = argv[6]
    tmppath = argv[7]
    if not tmppath.endswith(sep): tmppath += sep

    pyfftw_cache_disable()
    pyfftw_cache_enable()
    pyfftw_set_keepalive_time(1800)

    # Create a silly temporary log:
    tmplog = tmppath + basename(infile) + str(time.time())

    # Open the HDF5 file (take into account also older TDF versions):
    f_in = getHDF5(infile, 'r')
    if "/tomo" in f_in:
        dset = f_in['tomo']
    else:
        dset = f_in['exchange/data']
    num_proj = tdf.get_nr_projs(dset)
    num_sinos = tdf.get_nr_sinos(dset)

    # Get flats and darks from cache or from file:
    try:
        corrplan = cache2plan(infile, tmppath)
    except Exception as e:
        #print "Error(s) when reading from cache"
        corrplan = extract_flatdark(f_in, True, tmplog)
        remove(tmplog)
        plan2cache(corrplan, infile, tmppath)

    # Get first and the 180 deg projections:
    im1 = tdf.read_tomo(dset, proj_from).astype(float32)

    idx = int(round((proj_to - proj_from) / angles * pi)) + proj_from
    im2 = tdf.read_tomo(dset, idx).astype(float32)

    # Apply simple flat fielding (if applicable):
    if (isinstance(corrplan['im_flat_after'], ndarray)
            and isinstance(corrplan['im_flat'], ndarray)
            and isinstance(corrplan['im_dark'], ndarray)
            and isinstance(corrplan['im_dark_after'], ndarray)):
        im1 = ((abs(im1 - corrplan['im_dark'])) /
               (abs(corrplan['im_flat'] - corrplan['im_dark']) +
                finfo(float32).eps)).astype(float32)
        im2 = ((abs(im2 - corrplan['im_dark_after'])) /
               (abs(corrplan['im_flat_after'] - corrplan['im_dark_after']) +
                finfo(float32).eps)).astype(float32)

    # Scale projections (if required) to get subpixel estimation:
    if (abs(scale - 1.0) > finfo(float32).eps):
        im1 = imresize(im1, (int(round(
            scale * im1.shape[0])), int(round(scale * im1.shape[1]))),
                       interp='bicubic',
                       mode='F')
        im2 = imresize(im2, (int(round(
            scale * im2.shape[0])), int(round(scale * im2.shape[1]))),
                       interp='bicubic',
                       mode='F')

    # Find the center (flipping left-right im2):
    cen = findcenter.usecorrelation(im1, im2[:, ::-1])
    cen = cen / scale

    # Print center to output file:
    text_file = open(outfile, "w")
    text_file.write(str(int(cen)))
    text_file.close()

    # Close input HDF5:
    f_in.close()