def test_validkwargs_valid_kwargs(self): self.assertTrue( valid.valid_kwargs(kwargs={ "test": 0, "red": None }, allowed_kwargs={"test", "red"}))
def reload_cdi_data( data, mask, scan_number, setup, normalize_method="skip", debugging=False, **kwargs, ): """ Reload forward CDI data, apply optional threshold, normalization and binning. :param data: the 3D data array :param mask: the 3D mask array :param scan_number: the scan number to load :param setup: an instance of the class Setup :param normalize_method: 'skip' to skip, 'monitor' to normalize by the default monitor, 'sum_roi' to normalize by the integrated intensity in a defined region of interest :param debugging: set to True to see plots :parama kwargs: - 'photon_threshold' = float, photon threshold to apply before binning :return: - the updated 3D data and mask arrays - the monitor values used for the intensity normalization """ valid.valid_ndarray(arrays=(data, mask), ndim=3) # check and load kwargs valid.valid_kwargs( kwargs=kwargs, allowed_kwargs={"photon_threshold"}, name="kwargs", ) photon_threshold = kwargs.get("photon_threshold", 0) valid.valid_item( photon_threshold, allowed_types=Real, min_included=0, name="photon_threshold", ) nbz, nby, nbx = data.shape frames_logical = np.ones(nbz) print((data < 0).sum(), " negative data points masked" ) # can happen when subtracting a background mask[data < 0] = 1 data[data < 0] = 0 # normalize by the incident X-ray beam intensity if normalize_method == "skip": print("Skip intensity normalization") monitor = [] else: if normalize_method == "sum_roi": monitor = data[:, setup.detector.sum_roi[0]:setup.detector.sum_roi[1], setup.detector.sum_roi[2]:setup.detector. sum_roi[3], ].sum(axis=(1, 2)) else: # use the default monitor of the beamline monitor = setup.loader.read_monitor( scan_number=scan_number, setup=setup, ) print("Intensity normalization using " + normalize_method) data, monitor = loader.normalize_dataset( array=data, monitor=monitor, norm_to_min=True, savedir=setup.detector.savedir, debugging=True, ) # pad the data to the shape defined by the ROI if (setup.detector.roi[1] - setup.detector.roi[0] > nby or setup.detector.roi[3] - setup.detector.roi[2] > nbx): start = ( 0, max(0, abs(setup.detector.roi[0])), max(0, abs(setup.detector.roi[2])), ) print("Paddind the data to the shape defined by the ROI") data = util.crop_pad( array=data, pad_start=start, output_shape=( data.shape[0], setup.detector.roi[1] - setup.detector.roi[0], setup.detector.roi[3] - setup.detector.roi[2], ), ) mask = util.crop_pad( array=mask, pad_value=1, pad_start=start, output_shape=( mask.shape[0], setup.detector.roi[1] - setup.detector.roi[0], setup.detector.roi[3] - setup.detector.roi[2], ), ) # apply optional photon threshold before binning if photon_threshold != 0: mask[data < photon_threshold] = 1 data[data < photon_threshold] = 0 print("Applying photon threshold before binning: < ", photon_threshold) # bin data and mask in the detector plane if needed # binning in the stacking dimension is done at the very end of the data processing if (setup.detector.binning[1] != 1) or (setup.detector.binning[2] != 1): print( "Binning the data: detector vertical axis by", setup.detector.binning[1], ", detector horizontal axis by", setup.detector.binning[2], ) data = util.bin_data( data, (1, setup.detector.binning[1], setup.detector.binning[2]), debugging=debugging, ) mask = util.bin_data( mask, (1, setup.detector.binning[1], setup.detector.binning[2]), debugging=debugging, ) mask[np.nonzero(mask)] = 1 return data, mask, frames_logical, monitor
def load_cdi_data( scan_number, setup, bin_during_loading=False, flatfield=None, hotpixels=None, background=None, normalize="skip", debugging=False, **kwargs, ): """ Load forward CDI data and preprocess it. It applies beam stop correction and an optional photon threshold, normalization and binning. :param scan_number: the scan number to load :param setup: an instance of the class Setup :param bin_during_loading: True to bin the data during loading (faster) :param flatfield: the 2D flatfield array :param hotpixels: the 2D hotpixels array. 1 for a hotpixel, 0 for normal pixels. :param background: the 2D background array to subtract to the data :param normalize: 'skip' to skip, 'monitor' to normalize by the default monitor, 'sum_roi' to normalize by the integrated intensity in the region of interest defined by detector.sum_roi :param debugging: set to True to see plots :param kwargs: - 'photon_threshold': float, photon threshold to apply before binning - 'frames_pattern': 1D array of int, of length data.shape[0]. If frames_pattern is 0 at index, the frame at data[index] will be skipped, if 1 the frame will added to the stack. :return: - the 3D data and mask arrays - frames_logical: array of initial length the number of measured frames. In case of padding the length changes. A frame whose index is set to 1 means that it is used, 0 means not used, -1 means padded (added) frame. - the monitor values used for the intensity normalization """ valid.valid_item(bin_during_loading, allowed_types=bool, name="bin_during_loading") # check and load kwargs valid.valid_kwargs( kwargs=kwargs, allowed_kwargs={"photon_threshold", "frames_pattern"}, name="kwargs", ) photon_threshold = kwargs.get("photon_threshold", 0) valid.valid_item( photon_threshold, allowed_types=Real, min_included=0, name="photon_threshold", ) frames_pattern = kwargs.get("frames_pattern") valid.valid_1d_array(frames_pattern, allow_none=True, allowed_values={0, 1}, name="frames_pattern") rawdata, rawmask, monitor, frames_logical = setup.loader.load_check_dataset( scan_number=scan_number, setup=setup, frames_pattern=frames_pattern, bin_during_loading=bin_during_loading, flatfield=flatfield, hotpixels=hotpixels, background=background, normalize=normalize, debugging=debugging, ) ################################# # apply the beamstop correction # ################################# rawdata = beamstop_correction(data=rawdata, setup=setup, debugging=debugging) ##################################################### # apply an optional photon threshold before binning # ##################################################### if photon_threshold != 0: rawmask[rawdata < photon_threshold] = 1 rawdata[rawdata < photon_threshold] = 0 print("Applying photon threshold before binning: < ", photon_threshold) #################################################################################### # bin data and mask in the detector plane if not already done during loading # # binning in the stacking dimension is done at the very end of the data processing # #################################################################################### if not bin_during_loading and ((setup.detector.binning[1] != 1) or (setup.detector.binning[2] != 1)): print( "Binning the data: detector vertical axis by", setup.detector.binning[1], ", detector horizontal axis by", setup.detector.binning[2], ) rawdata = util.bin_data( rawdata, (1, setup.detector.binning[1], setup.detector.binning[2]), debugging=False, ) rawmask = util.bin_data( rawmask, (1, setup.detector.binning[1], setup.detector.binning[2]), debugging=False, ) rawmask[np.nonzero(rawmask)] = 1 ################################################ # pad the data to the shape defined by the ROI # ################################################ rawdata, rawmask = util.pad_from_roi( arrays=(rawdata, rawmask), roi=setup.detector.roi, binning=setup.detector.binning[1:], pad_value=(0, 1), ) return rawdata, rawmask, frames_logical, monitor
def partial_coherence_rl(measured_intensity, coherent_intensity, iterations=20, debugging=False, **kwargs): """ Partial coherence deconvolution using Richardson-Lucy algorithm. See J.N. Clark et al., Nat. Comm. 3, 993 (2012). :param measured_intensity: measured object with partial coherent illumination :param coherent_intensity: estimate of the object measured by a fully coherent illumination :param iterations: number of iterations for the Richardson-Lucy algorithm :param debugging: True to see plots :param kwargs: - 'scale': scale for the plot, 'linear' or 'log' - 'reciprocal_space': True if the data is in reciprocal space, False otherwise. - 'is_orthogonal': set to True is the frame is orthogonal, False otherwise (detector frame) Used for plot labels. - 'vmin' = lower boundary for the colorbar. Float or tuple of 3 floats - 'vmax' = [higher boundary for the colorbar. Float or tuple of 3 floats - 'guess': ndarray, initial guess for the psf, of the same shape as measured_intensity :return: the retrieved psf (ndarray), the error metric (1D ndarray of len=iterations) """ validation_name = 'algorithms_utils.psf_rl' # check and load kwargs valid.valid_kwargs(kwargs=kwargs, allowed_kwargs={ 'scale', 'reciprocal_space', 'is_orthogonal', 'vmin', 'vmax', 'guess' }, name=validation_name) scale = kwargs.get('scale', 'log') if scale not in {'log', 'linear'}: raise ValueError('"scale" should be either "log" or "linear"') reciprocal_space = kwargs.get('reciprocal_space', True) if not isinstance(reciprocal_space, bool): raise TypeError('"reciprocal_space" should be a boolean') is_orthogonal = kwargs.get('is_orthogonal', True) if not isinstance(is_orthogonal, bool): raise TypeError('"is_orthogonal" should be a boolean') vmin = kwargs.get('vmin', np.nan) valid.valid_item(vmin, allowed_types=Real, name=validation_name) vmax = kwargs.get('vmax', np.nan) valid.valid_item(vmax, allowed_types=Real, name=validation_name) guess = kwargs.get('guess', None) if guess is not None: if not isinstance(guess, np.ndarray): raise TypeError(f"guess should be a ndarray, got {type(guess)}") if guess.shape != measured_intensity.shape: raise ValueError( 'the guess array should have the same shape as measured_intensity' ) # calculate the psf psf, error = richardson_lucy(image=measured_intensity, psf=coherent_intensity, iterations=iterations, clip=False, guess=guess) # optional plot if debugging: gu.multislices_plot(psf, scale=scale, sum_frames=False, title='psf', vmin=vmin, vmax=vmax, reciprocal_space=reciprocal_space, is_orthogonal=is_orthogonal, plot_colorbar=True) _, ax = plt.subplots(figsize=(12, 9)) ax.plot(error, 'r.') ax.set_yscale('log') ax.set_xlabel('iteration number') ax.set_ylabel('difference between consecutive iterates') return psf, error
def blind_deconvolution_rl(blurred_object, perfect_object, psf, nb_cycles=10, sub_iterations=10, update_psf_first=True, debugging=False, **kwargs): """ Blind deconvolution using Richardson-Lucy algorithm. Estimates of the perfect object and psf have to be provided. See Figure 1 and equations (4) & (5) in D. A. Fish et al. J. Opt. Soc. Am. A, 12, 58 (1995). :param blurred_object: ndarray, measured object with partial coherent illumination :param perfect_object: ndarray, estimate of the object measured by a fully coherent illumination, same shape as blurred_object :param psf: ndarray, estimate of the psf, same shape as blurred_object :param nb_cycles: number of blind deconvolution interations :param sub_iterations: number of iterations of the Richardson-Lucy algorithm during a single blind iteration :param update_psf_first: bool, if True the psf estimate is updated first and then the perfect object estimate :param debugging: True to see plots :param kwargs: - 'scale': tuple, scale for the plots, 'linear' or 'log' - 'reciprocal_space': bool, True if the data is in reciprocal space, False otherwise. - 'is_orthogonal': bool, True is the frame is orthogonal, False otherwise (detector frame) Used for plot labels. - 'vmin' = tuple of two floats (np.nan to use default), lower boundary for the colorbars - 'vmax' = tuple of two floats (np.nan to use default), higher boundary for the colorbars :return: """ validation_name = 'algorithms_utils.psf_rl' # check and load kwargs valid.valid_kwargs(kwargs=kwargs, allowed_kwargs={ 'scale', 'reciprocal_space', 'is_orthogonal', 'vmin', 'vmax' }, name=validation_name) scale = kwargs.get('scale', ('linear', 'log')) valid.valid_container(scale, container_types=(tuple, list), length=2, name=validation_name) if not all(val in {'log', 'linear'} for val in scale): raise ValueError('"scale" should be either "log" or "linear"') reciprocal_space = kwargs.get('reciprocal_space', True) if not isinstance(reciprocal_space, bool): raise TypeError('"reciprocal_space" should be a boolean') is_orthogonal = kwargs.get('is_orthogonal', True) if not isinstance(is_orthogonal, bool): raise TypeError('"is_orthogonal" should be a boolean') vmin = kwargs.get('vmin', (np.nan, np.nan)) valid.valid_container(vmin, container_types=(tuple, list), item_types=Real, name=validation_name) vmax = kwargs.get('vmax', (np.nan, np.nan)) valid.valid_container(vmax, container_types=(tuple, list), item_types=Real, name=validation_name) # check parameters if not isinstance(blurred_object, np.ndarray): raise TypeError( f"blurred_object should be a ndarray, got {type(blurred_object)}") if not isinstance(perfect_object, np.ndarray): raise TypeError( f"perfect_object should be a ndarray, got {type(perfect_object)}") if not isinstance(psf, np.ndarray): raise TypeError(f"psf should be a ndarray, got {type(psf)}") if not isinstance(debugging, bool): raise TypeError('"debugging" should be a boolean') if not isinstance(update_psf_first, bool): raise TypeError('"update_psf_first" should be a boolean') if perfect_object.shape != blurred_object.shape or psf.shape != blurred_object.shape: raise ValueError( 'blurred_object, perfect_object and psf should have the same shape' ) ######################## # plot initial guesses # ######################## if debugging: gu.multislices_plot(perfect_object, scale=scale[0], sum_frames=False, title='guessed perfect object', reciprocal_space=reciprocal_space, is_orthogonal=is_orthogonal, vmin=vmin[0], vmax=vmax[0], plot_colorbar=True) gu.multislices_plot(psf, scale=scale[1], sum_frames=False, title='guessed psf', vmin=vmin[1], vmax=vmax[1], reciprocal_space=reciprocal_space, is_orthogonal=is_orthogonal, plot_colorbar=True) ########################################### # loop over the blind deconvolution steps # ########################################### for cycle in range(nb_cycles): if update_psf_first: # update the estimate of the psf psf, _ = richardson_lucy(image=blurred_object, psf=perfect_object, iterations=sub_iterations, clip=False, guess=psf) # udpate the estimate of the perfect object perfect_object, _ = richardson_lucy(image=blurred_object, psf=psf, iterations=sub_iterations, clip=True, guess=perfect_object) else: # udpate the estimate of the perfect object perfect_object, _ = richardson_lucy(image=blurred_object, psf=psf, iterations=sub_iterations, clip=True, guess=perfect_object) # update the estimate of the psf psf, _ = richardson_lucy(image=blurred_object, psf=perfect_object, iterations=sub_iterations, clip=False, guess=psf) psf = (np.abs(psf) / np.abs(psf).sum()).astype(np.float) ############### # plot result # ############### if debugging: gu.multislices_plot(perfect_object, scale=scale[0], sum_frames=False, title='retrieved perfect object', reciprocal_space=reciprocal_space, is_orthogonal=is_orthogonal, vmin=vmin[0], vmax=vmax[0], plot_colorbar=True) gu.multislices_plot(psf, scale=scale[1], sum_frames=False, title='retrieved psf', vmin=vmin[1], vmax=vmax[1], reciprocal_space=reciprocal_space, is_orthogonal=is_orthogonal, plot_colorbar=True) return psf
def test_validkwargs_kwargs_type_dict(self): self.assertTrue(valid.valid_kwargs(kwargs={}, allowed_kwargs="test"))