def get_distortion_residuals(self, mask_radius, spread): """Obtain residuals for experimental data and distortion corrected data with respect to a simulated symmetric ring pattern. Parameters ---------- mask_radius : int Radius, in pixels, for a mask over the direct beam disc. spread : float Gaussian spread of each ring in the simulated pattern. Returns ------- diff_init : ElectronDiffraction2D Difference between experimental data and simulated symmetric ring pattern. diff_end : ElectronDiffraction2D Difference between distortion corrected data and simulated symmetric ring pattern. """ # Check all required parameters are defined as attributes if self.calibration_data.au_x_grating_dp is None: raise ValueError( "This method requires an Au X-grating diffraction " "pattern to be provided. Please update the " "CalibrationDataLibrary." ) if self.affine_matrix is None: raise ValueError( "This method requires a distortion matrix to have " "been determined. Use get_elliptical_distortion " "to determine this matrix." ) # Set name for experimental data pattern dpeg = self.calibration_data.au_x_grating_dp ringP = self.ring_params size = dpeg.data.shape[0] dpref = generate_ring_pattern( image_size=size, mask=True, mask_radius=mask_radius, scale=ringP[0], amplitude=ringP[1], spread=spread, direct_beam_amplitude=ringP[3], asymmetry=1, rotation=ringP[5], ) # Apply distortion corrections to experimental data dpegs = stack_method([dpeg, dpeg, dpeg, dpeg]) dpegs = ElectronDiffraction2D(dpegs.data.reshape((2, 2, size, size))) dpegs.apply_affine_transformation( self.affine_matrix, preserve_range=True, inplace=True ) # Calculate residuals to be returned diff_init = ElectronDiffraction2D(dpeg.data - dpref.data) diff_end = ElectronDiffraction2D(dpegs.inav[0, 0].data - dpref.data) residuals = stack_method([diff_init, diff_end]) return ElectronDiffraction2D(residuals)
def plot_corrected_diffraction_pattern(self, reference_circle=True): """Plot the distortion corrected diffraction pattern with an optional reference circle. Parameters ---------- reference_circle : bool If True a CircleROI widget is added to the plot for reference. """ # Check all required parameters are defined as attributes if self.calibration_data.au_x_grating_dp is None: raise ValueError("This method requires an Au X-grating diffraction " "pattern to be provided. Please update the " "CalibrationDataLibrary.") if self.affine_matrix is None: raise ValueError("This method requires a distortion matrix to have " "been determined. Use get_elliptical_distortion " "to determine this matrix.") # Set name for experimental data pattern dpeg = self.calibration_data.au_x_grating_dp # Apply distortion corrections to experimental data size = dpeg.data.shape[0] dpegs = stack_method([dpeg, dpeg, dpeg, dpeg]) dpegs = ElectronDiffraction2D(dpegs.data.reshape((2, 2, size, size))) dpegs.apply_affine_transformation(self.affine_matrix, preserve_range=True, inplace=True) dpegm = dpegs.mean((0, 1)) # Plot distortion corrected data dpegm.plot(cmap='magma', vmax=0.1) # add reference circle if specified if reference_circle is True: circ = CircleROI(cx=128, cy=128, r=53.5, r_inner=0) circ.add_widget(dpegm)
def plot_calibrated_data(self, data_to_plot, *args, **kwargs): """ Plot calibrated data for visual inspection. Parameters ---------- data_to_plot : string Specify the calibrated data to be plotted. Valid options are: {'au_x_grating_dp', 'au_x_grating_im', 'moo3_dp', 'moo3_im'} """ # Construct object containing user defined data to plot and set the # calibration checking that it is defined. if data_to_plot == 'au_x_grating_dp': dpeg = self.calibration_data.au_x_grating_dp size = dpeg.data.shape[0] dpegs = stack_method([dpeg, dpeg, dpeg, dpeg]) dpegs = ElectronDiffraction2D(dpegs.data.reshape((2, 2, size, size))) dpegs.apply_affine_transformation(self.affine_matrix, preserve_range=True, inplace=True) data = dpegs.mean((0, 1)) data.set_diffraction_calibration(self.diffraction_calibration) elif data_to_plot == 'au_x_grating_im': data = self.calibration_data.au_x_grating_im # Plot the data data.plot(*args, **kwargs)
def get_diffraction_calibration(self, mask_length, linewidth): """Determine the diffraction pattern pixel size calibration in units of reciprocal Angsstroms per pixel. Parameters ---------- mask_length : float Halfwidth of the region excluded from peak finding around the diffraction pattern center. linewidth : float Width of Line2DROI used to obtain line trace from distortion corrected diffraction pattern. Returns ------- diff_cal : float Diffraction calibration in reciprocal Angstroms per pixel. """ # Check that necessary calibration data is provided if self.calibration_data.au_x_grating_dp is None: raise ValueError("This method requires an Au X-grating diffraction " "pattern to be provided. Please update the " "CalibrationDataLibrary.") if self.affine_matrix is None: raise ValueError("This method requires a distortion matrix to have " "been determined. Use get_elliptical_distortion " "to determine this matrix.") dpeg = self.calibration_data.au_x_grating_dp size = dpeg.data.shape[0] dpegs = stack_method([dpeg, dpeg, dpeg, dpeg]) dpegs = ElectronDiffraction2D(dpegs.data.reshape((2, 2, size, size))) dpegs.apply_affine_transformation(self.affine_matrix, preserve_range=True, inplace=True) dpegm = dpegs.mean((0, 1)) # Define line roi along which to take trace for calibration line = Line2DROI(x1=5, y1=5, x2=250, y2=250, linewidth=linewidth) # Obtain line trace trace = line(dpegm) trace = trace.as_signal1D(0) # Find peaks in line trace either side of direct beam db = (np.sqrt(2) * 128) - (5 * np.sqrt(2)) pka = trace.isig[db + mask_length:].find_peaks1D_ohaver()[0]['position'] pkb = trace.isig[:db - mask_length].find_peaks1D_ohaver()[0]['position'] # Determine predicted position of 022 peak of Au pattern d022=1.437 au_pre = db - (self.ring_params[0] / 1.437) au_post = db + (self.ring_params[0] / 1.437) # Calculate differences between predicted and measured positions prediff = np.abs(pkb - au_pre) postdiff = np.abs(pka - au_post) # Calculate new calibration value based on most accurate peak positions dc = (2 / 1.437) / (pka[postdiff == min(postdiff)] - pkb[prediff == min(prediff)]) # Store diffraction calibration value as attribute self.diffraction_calibration = dc[0] return dc[0]
def load(filenames=None, signal_type=None, stack=False, stack_axis=None, new_axis_name="stack_element", lazy=False, **kwds): """Load supported files into a pyxem structure. Supported formats: hspy (HDF5), Medipix (hdr + mib), blockfile, Gatan dm3, tif, msa, Bruker bcf, FEI ser and emi, SEMPER unf, EMD, EDAX spd/spc, tif, and a number of image formats. Additional keyword arguments are passed to the corresponding reader. For available options see their individual documentation, which may be found in either pyxem or hyperspy. Parameters ---------- filenames : None, str or list of strings The filename to be loaded. If None, a window will open to select a file to load. If a valid filename is passed in that single file is loaded. If multiple file names are passed in a list, a list of objects or a single object containing the data of the individual files stacked are returned. This behaviour is controlled by the `stack` parameter (see bellow). Multiple files can be loaded by using simple shell-style wildcards, e.g. 'my_file*.msa' loads all the files that starts by 'my_file' and has the '.msa' extension. signal_type : {None, "electron_diffraction", "diffraction_profile", "diffraction_vectors", "crystallographic_map", str} The acronym that identifies the signal type. The value provided may determine the Signal subclass assigned to the data. If None the value is read/guessed from the file. Any other value overrides the value stored in the file if any. For electron energy-loss spectroscopy use "EELS". For energy dispersive x-rays use "EDS_TEM" if acquired from an electron-transparent sample — as it is usually the case in a transmission electron microscope (TEM) —, "EDS_SEM" if acquired from a non electron-transparent sample — as it is usually the case in a scanning electron microscope (SEM) —. If "" (empty string) the value is not read from the file and is considered undefined. stack : bool If True and multiple filenames are passed in, stacking all the data into a single object is attempted. All files must match in shape. If each file contains multiple (N) signals, N stacks will be created, with the requirement that each file contains the same number of signals. stack_axis : {None, int, str} If None, the signals are stacked over a new axis. The data must have the same dimensions. Otherwise the signals are stacked over the axis given by its integer index or its name. The data must have the same shape, except in the dimension corresponding to `axis`. new_axis_name : string The name of the new axis when `axis` is None. If an axis with this name already exists it automatically append '-i', where `i` are integers, until it finds a name that is not yet in use. lazy : {None, bool} Open the data lazily - i.e. without actually reading the data from the disk until required. Allows opening arbitrary-sized datasets. default is `False`. print_info: bool For SEMPER unf- and EMD (Berkley)-files, if True (default is False) additional information read during loading is printed for a quick overview. downsample : int (1–4095) For Bruker bcf files, if set to integer (>=2) (default 1) bcf is parsed into down-sampled size array by given integer factor, multiple values from original bcf pixels are summed forming downsampled pixel. This allows to improve signal and conserve the memory with the cost of lower resolution. cutoff_at_kV : {None, int, float} For Bruker bcf files, if set to numerical (default is None) bcf is parsed into array with depth cutoff at coresponding given energy. This allows to conserve the memory, with cutting-off unused spectra's tail, or force enlargement of the spectra size. select_type: {'spectrum', 'image', None} For Bruker bcf files, if one of 'spectrum' or 'image' (default is None) the loader returns either only hypermap or only SEM/TEM electron images. Returns ------- Signal instance or list of signal instances Examples -------- Loading a single file providing the signal type: >>> d = hs.load('file.dm3', signal_type="EDS_TEM") Loading multiple files: >>> d = hs.load('file1.dm3','file2.dm3') Loading multiple files matching the pattern: >>> d = hs.load('file*.dm3') Loading (potentially larger than the available memory) files lazily and stacking: >>> s = hs.load('file*.blo', lazy=True, stack=True) """ deprecated = ['mmap_dir', 'load_to_memory'] warn_str = "'{}' argument is deprecated, please use 'lazy' instead" for k in deprecated: if k in kwds: lazy = True warnings.warn(warn_str.format(k), VisibleDeprecationWarning) del kwds[k] kwds['signal_type'] = signal_type if filenames is None: from hyperspy.signal_tools import Load load_ui = Load() get_gui(load_ui, toolkey="load") if load_ui.filename: filenames = load_ui.filename lazy = load_ui.lazy if filenames is None: raise ValueError("No file provided to reader") if isinstance(filenames, str): filenames = natsorted( [f for f in glob.glob(filenames) if os.path.isfile(f)]) if not filenames: raise ValueError('No file name matches this pattern') elif not isinstance(filenames, (list, tuple)): raise ValueError( 'The filenames parameter must be a list, tuple, string or None') if not filenames: raise ValueError('No file provided to reader.') else: if len(filenames) > 1: _logger.info('Loading individual files') if stack is True: # We are loading a stack! # Note that while each file might contain several signals, all # files are required to contain the same number of signals. We # therefore use the first file to determine the number of signals. for i, filename in enumerate(filenames): obj = load_single_file(filename, lazy=lazy, **kwds) if i == 0: # First iteration, determine number of signals, if several: if isinstance(obj, (list, tuple)): n = len(obj) else: n = 1 # Initialize signal 2D list: signals = [[] for j in range(n)] else: # Check that number of signals per file doesn't change # for other files: if isinstance(obj, (list, tuple)): if n != len(obj): raise ValueError( "The number of sub-signals per file does not " "match:\n" + (f_error_fmt % (1, n, filenames[0])) + (f_error_fmt % (i, len(obj), filename))) elif n != 1: raise ValueError( "The number of sub-signals per file does not " "match:\n" + (f_error_fmt % (1, n, filenames[0])) + (f_error_fmt % (i, len(obj), filename))) # Append loaded signals to 2D list: if n == 1: signals[0].append(obj) elif n > 1: for j in range(n): signals[j].append(obj[j]) # Next, merge the signals in the `stack_axis` direction: # When each file had N signals, we create N stacks! objects = [] for i in range(n): signal = signals[i] # Sublist, with len = len(filenames) signal = stack_method(signal, axis=stack_axis, new_axis_name=new_axis_name, lazy=lazy) signal.metadata.General.title = os.path.split( os.path.split(os.path.abspath(filenames[0]))[0])[1] _logger.info('Individual files loaded correctly') _logger.info(signal._summary()) objects.append(signal) else: # No stack, so simply we load all signals in all files separately objects = [ load_single_file(filename, lazy=lazy, **kwds) for filename in filenames ] if len(objects) == 1: objects = objects[0] return objects
def load(filenames=None, signal_type=None, stack=False, stack_axis=None, new_axis_name="stack_element", lazy=False, convert_units=False, escape_square_brackets=False, **kwds): """Load potentially multiple supported files into HyperSpy. Supported formats: hspy (HDF5), msa, Gatan dm3, Ripple (rpl+raw), Bruker bcf and spx, FEI ser and emi, SEMPER unf, EMD, EDAX spd/spc, tif, and a number of image formats. Depending on the number of datasets to load in the file, this function will return a HyperSpy signal instance or list of HyperSpy signal instances. Any extra keywords are passed to the corresponding reader. For available options, see their individual documentation. Parameters ---------- filenames : None or str or list(str) or pathlib.Path or list(pathlib.Path) The filename to be loaded. If None, a window will open to select a file to load. If a valid filename is passed in that single file is loaded. If multiple file names are passed in a list, a list of objects or a single object containing the data of the individual files stacked are returned. This behaviour is controlled by the `stack` parameter (see bellow). Multiple files can be loaded by using simple shell-style wildcards, e.g. 'my_file*.msa' loads all the files that starts by 'my_file' and has the '.msa' extension. signal_type : {None, "EELS", "EDS_SEM", "EDS_TEM", "", str} The acronym that identifies the signal type. The value provided may determine the Signal subclass assigned to the data. If None the value is read/guessed from the file. Any other value overrides the value stored in the file if any. For electron energy-loss spectroscopy use "EELS". For energy dispersive x-rays use "EDS_TEM" if acquired from an electron-transparent sample — as it is usually the case in a transmission electron microscope (TEM) —, "EDS_SEM" if acquired from a non electron-transparent sample — as it is usually the case in a scanning electron microscope (SEM). If "" (empty string) the value is not read from the file and is considered undefined. stack : bool If True and multiple filenames are passed in, stacking all the data into a single object is attempted. All files must match in shape. If each file contains multiple (N) signals, N stacks will be created, with the requirement that each file contains the same number of signals. stack_axis : {None, int, str} If None, the signals are stacked over a new axis. The data must have the same dimensions. Otherwise the signals are stacked over the axis given by its integer index or its name. The data must have the same shape, except in the dimension corresponding to `axis`. new_axis_name : string The name of the new axis when `axis` is None. If an axis with this name already exists it automatically append '-i', where `i` are integers, until it finds a name that is not yet in use. lazy : {None, bool} Open the data lazily - i.e. without actually reading the data from the disk until required. Allows opening arbitrary-sized datasets. The default is `False`. convert_units : {bool} If True, convert the units using the `convert_to_units` method of the `axes_manager`. If False, does nothing. The default is False. escape_square_brackets : bool, default False If True, and ``filenames`` is a str containing square brackets, then square brackets are escaped before wildcard matching with ``glob.glob()``. If False, square brackets are used to represent character classes (e.g. ``[a-z]`` matches lowercase letters. reader : None or str or custom file reader object, default None Specify the file reader to use when loading the file(s). If None, will use the file extension to infer the file type and appropriate reader. If str, will select the appropriate file reader from the list of available readers in HyperSpy. If a custom reader object, it should implement the ``file_reader`` function, which returns a dictionary containing the data and metadata for conversion to a HyperSpy signal. print_info: bool, default False For SEMPER unf- and EMD (Berkeley)-files, if True additional information read during loading is printed for a quick overview. downsample : int (1–4095) For Bruker bcf files, if set to integer (>=2) (default 1) bcf is parsed into down-sampled size array by given integer factor, multiple values from original bcf pixels are summed forming downsampled pixel. This allows to improve signal and conserve the memory with the cost of lower resolution. cutoff_at_kV : {None, int, float} For Bruker bcf files, if set to numerical (default is None) bcf is parsed into array with depth cutoff at coresponding given energy. This allows to conserve the memory, with cutting-off unused spectra's tail, or force enlargement of the spectra size. select_type : {'spectrum_image', 'image', 'single_spectrum', None} If `None` (default), all data are loaded. For Bruker bcf and Velox emd files: if one of 'spectrum_image', 'image' or 'single_spectrum', the loader return single_spectrumns either only the spectrum image or only the images (including EDS map for Velox emd files) or only the single spectra (for Velox emd files). first_frame : int (default 0) Only for Velox emd files: load only the data acquired after the specified fname. last_frame : None or int (default None) Only for Velox emd files: load only the data acquired up to specified fname. If None, load up the data to the end. sum_frames : bool (default is True) Only for Velox emd files: if False, load each EDS frame individually. sum_EDS_detectors : bool (default is True) Only for Velox emd files: if True, the signal from the different detector are summed. If False, a distinct signal is returned for each EDS detectors. rebin_energy : int, a multiple of the length of the energy dimension (default 1) Only for Velox emd files: rebin the energy axis by the integer provided during loading in order to save memory space. SI_dtype : numpy.dtype Only for Velox emd files: set the dtype of the spectrum image data in order to save memory space. If None, the default dtype from the Velox emd file is used. load_SI_image_stack : bool (default False) Only for Velox emd files: if True, load the stack of STEM images acquired simultaneously as the EDS spectrum image. dataset_path : None, str or list of str, optional For filetypes which support several datasets in the same file, this will only load the specified dataset. Several datasets can be loaded by using a list of strings. Only for EMD (NCEM) and hdf5 (USID) files. stack_group : bool, optional Only for EMD NCEM. Stack datasets of groups with common name. Relevant for emd file version >= 0.5 where groups can be named 'group0000', 'group0001', etc. ignore_non_linear_dims : bool, default is True Only for HDF5 USID. If True, parameters that were varied non-linearly in the desired dataset will result in Exceptions. Else, all such non-linearly varied parameters will be treated as linearly varied parameters and a Signal object will be generated. only_valid_data : bool, optional Only for FEI emi/ser file in case of series or linescan with the acquisition stopped before the end: if True, load only the acquired data. If False, fill empty data with zeros. Default is False and this default value will change to True in version 2.0. Returns ------- Signal instance or list of signal instances Examples -------- Loading a single file providing the signal type: >>> d = hs.load('file.dm3', signal_type="EDS_TEM") Loading multiple files: >>> d = hs.load('file1.dm3','file2.dm3') Loading multiple files matching the pattern: >>> d = hs.load('file*.dm3') Loading multiple files containing square brackets: >>> d = hs.load('file[*].dm3', escape_square_brackets=True) Loading (potentially larger than the available memory) files lazily and stacking: >>> s = hs.load('file*.blo', lazy=True, stack=True) Specify the file reader to use >>> s = hs.load('a_nexus_file.h5', reader='nxs') """ deprecated = ['mmap_dir', 'load_to_memory'] warn_str = "'{}' argument is deprecated, please use 'lazy' instead" for k in deprecated: if k in kwds: lazy = True warnings.warn(warn_str.format(k), VisibleDeprecationWarning) del kwds[k] kwds['signal_type'] = signal_type kwds['convert_units'] = convert_units if filenames is None: from hyperspy.signal_tools import Load load_ui = Load() get_gui(load_ui, toolkey="hyperspy.load") if load_ui.filename: filenames = load_ui.filename lazy = load_ui.lazy if filenames is None: raise ValueError("No file provided to reader") if isinstance(filenames, str): pattern = filenames if escape_square_brackets: filenames = _escape_square_brackets(filenames) filenames = natsorted([f for f in glob.glob(filenames) if os.path.isfile(f)]) if not filenames: raise ValueError(f'No filename matches the pattern "{pattern}"') elif isinstance(filenames, Path): # Just convert to list for now, pathlib.Path not # fully supported in io_plugins filenames = [f for f in [filenames] if f.is_file()] elif isgenerator(filenames): filenames = list(filenames) elif not isinstance(filenames, (list, tuple)): raise ValueError( 'The filenames parameter must be a list, tuple, ' f'string or None, not {type(filenames)}' ) if not filenames: raise ValueError('No file(s) provided to reader.') # pathlib.Path not fully supported in io_plugins, # so convert to str here to maintain compatibility filenames = [str(f) if isinstance(f, Path) else f for f in filenames] if len(filenames) > 1: _logger.info('Loading individual files') if stack is True: # We are loading a stack! # Note that while each file might contain several signals, all # files are required to contain the same number of signals. We # therefore use the first file to determine the number of signals. for i, filename in enumerate(filenames): obj = load_single_file(filename, lazy=lazy, **kwds) if i == 0: # First iteration, determine number of signals, if several: n = len(obj) if isinstance(obj, (list, tuple)) else 1 # Initialize signal 2D list: signals = [[] for j in range(n)] else: # Check that number of signals per file doesn't change # for other files: if isinstance(obj, (list, tuple)): if n != len(obj): raise ValueError( "The number of sub-signals per file does not match:\n" + (f_error_fmt % (1, n, filenames[0])) + (f_error_fmt % (i, len(obj), filename)) ) elif n != 1: raise ValueError( "The number of sub-signals per file does not match:\n" + (f_error_fmt % (1, n, filenames[0])) + (f_error_fmt % (i, len(obj), filename)) ) # Append loaded signals to 2D list: if n == 1: signals[0].append(obj) elif n > 1: for j in range(n): signals[j].append(obj[j]) # Next, merge the signals in the `stack_axis` direction: # When each file had N signals, we create N stacks! objects = [] for i in range(n): signal = signals[i] # Sublist, with len = len(filenames) signal = stack_method( signal, axis=stack_axis, new_axis_name=new_axis_name, lazy=lazy, ) signal.metadata.General.title = Path(filenames[0]).parent.stem _logger.info('Individual files loaded correctly') _logger.info(signal._summary()) objects.append(signal) else: # No stack, so simply we load all signals in all files separately objects = [load_single_file(filename, lazy=lazy, **kwds) for filename in filenames] if len(objects) == 1: objects = objects[0] return objects
def plot_calibrated_data(self, data_to_plot, line=None, *args, **kwargs): # pragma: no cover """ Plot calibrated data for visual inspection. Parameters ---------- data_to_plot : str Specify the calibrated data to be plotted. Valid options are: {'au_x_grating_dp', 'au_x_grating_im', 'moo3_dp', 'moo3_im', 'rotation_overlay'} line : :obj:`hyperspy.roi.Line2DROI` An optional Line2DROI object, as detailed in HyperSpy, to be added as a widget to the calibration data plot and the trace plotted interactively. *args : arguments Arguments to be passed to the plot method. **kwargs : keyword arguments Keyword arguments to be passed to the plot method. """ # Construct object containing user defined data to plot and set the # calibration checking that it is defined. if data_to_plot == "au_x_grating_dp": dpeg = self.calibration_data.au_x_grating_dp size = dpeg.data.shape[0] if self.correction_matrix is None: self.get_correction_matrix() dpegs = stack_method([dpeg, dpeg, dpeg, dpeg]) dpegs = ElectronDiffraction2D( dpegs.data.reshape((2, 2, size, size))) dpegs.apply_affine_transformation(self.correction_matrix, preserve_range=True, inplace=True) data = dpegs.mean((0, 1)) data.set_diffraction_calibration(self.diffraction_calibration) # Plot the calibrated diffraction data data.plot(*args, **kwargs) elif data_to_plot == "au_x_grating_im": data = self.calibration_data.au_x_grating_im # Plot the calibrated image data data.plot(*args, **kwargs) elif data_to_plot == "moo3_dp": dpeg = self.calibration_data.moo3_dp size = dpeg.data.shape[0] if self.correction_matrix is None: self.get_correction_matrix() dpegs = stack_method([dpeg, dpeg, dpeg, dpeg]) dpegs = ElectronDiffraction2D( dpegs.data.reshape((2, 2, size, size))) dpegs.apply_affine_transformation(self.correction_matrix, preserve_range=True, inplace=True) data = dpegs.mean((0, 1)) data.set_diffraction_calibration(self.diffraction_calibration) # Plot the calibrated diffraction data data.plot(*args, **kwargs) elif data_to_plot == "moo3_im": data = self.calibration_data.moo3_im # Plot the calibrated image data data.plot(*args, **kwargs) elif data_to_plot == "rotation_overlay": dpeg = self.calibration_data.moo3_dp size = dpeg.data.shape[0] if self.correction_matrix is None: self.get_correction_matrix() dpegs = stack_method([dpeg, dpeg, dpeg, dpeg]) dpegs = ElectronDiffraction2D( dpegs.data.reshape((2, 2, size, size))) dpegs.apply_affine_transformation(self.correction_matrix, preserve_range=True, inplace=True) dp = dpegs.mean((0, 1)) im = self.calibration_data.moo3_im.rebin(dp.data.shape) stack1 = np.zeros((dp.data.shape[0], dp.data.shape[1], 3)) stack1[:, :, 0] = dp.data / (0.05 * dp.data.max()) stack1[:, :, 2] = im.data / im.data.max() plt.figure(1) plt.imshow(stack1) if line: line.add_widget(data, axes=data.axes_manager.signal_axes) trace = line.interactive(data, navigation_signal="same") trace.plot() return trace
def load(filenames=None, signal_type=None, stack=False, stack_axis=None, new_axis_name='stack_element', lazy=False, convert_units=False, **kwargs): """Load potentially multiple supported files into a KikuchiPy structure. Any extra keyword is passed to the corresponding reader. For available options see their individual documentation, which may be found in either KikuchiPy or HyperSpy. Parameters ---------- filenames : {None, str or list of strings}, optional The filenames to be loaded. If None, a window will open to select a file to load. If a valid filename is passed in that single file is loaded. If multiple file names are passed in a list, a list of objects or a single object containing the data of the individual files stacked are returned. This behaviour is controlled by the `stack` parameter (see bellow). Multiple files can be loaded by using simple shell-style wildcards, e.g. 'my_file*.dat' loads all the files that starts by 'my_file' and has the '.dat' extension. signal_type : {None, 'EBSD', str}, optional The name or acronym that identifies the signal type. The value provided may determine the Signal subclass assigned to the data. If None the value is read/guessed from the file. Any other value overrides the value stored in the file if any. If '' (empty string) the value is not read from the file and is considered undefined. stack : bool, optional If True and multiple filenames are passed in, stacking all the data into a single object is attempted. All files must match in shape. If each file contains multiple (N) _signals, N stacks will be created, with the requirement that each file contains the same number of _signals. stack_axis : {None, int, str}, optional If None, the _signals are stacked over a new axis. The data must have the same dimensions. Otherwise the _signals are stacked over the axis given by its integer index or its name. The data must have the same shape, except in the dimension corresponding to `axis`. new_axis_name : str, optional The name of the new axis when `axis` is None. If an axis with this name already exists it automatically appends '-i', where `i` are integers, until it finds a name that is not yet in use. lazy : bool, optional Open the data lazily - i.e. without actually reading the data from the disk until required. Allows opening arbitrary-sized datasets. The default is `False`. convert_units : bool, optional If True, convert the units using the `convert_to_units` method of the `axes_manager`. If False (default), nothing is done. **kwargs : Keyword arguments passed to the corresponding reader. Returns ------- objects : signal instance or list of signal instances Examples -------- Loading a single file providing the signal type: >>> s = kp.load('Pattern.h5', signal_type='EBSD') Loading multiple scans from single file: >>> s1, s2 = kp.load('Pattern.h5', scans=[1, 2]) """ kwargs['signal_type'] = signal_type kwargs['convert_units'] = convert_units if filenames is None: from hyperspy.signal_tools import Load load_ui = Load() get_gui(load_ui, toolkey='load') if load_ui.filename: filenames = load_ui.filename lazy = load_ui.lazy if filenames is None: raise IOError("No file provided to reader.") if isinstance(filenames, str): filenames = natsorted( [f for f in glob.glob(filenames) if os.path.isfile(f)]) if not filenames: raise ValueError("No file name matches this pattern.") elif not isinstance(filenames, (list, tuple)): raise ValueError("The filenames parameter must be a list, tuple, " "string or None.") if stack is True: # We are loading a stack! # Note that while each file might contain several signals, all # files are required to contain the same number of signals. We # therefore use the first file to determine the number of signals. for i, filename in enumerate(filenames): obj = load_single_file(filename, lazy=lazy, **kwargs) if i == 0: # First iteration, determine number of signals, if several n = 1 if isinstance(obj, (list, tuple)): n = len(obj) # Initialize signal 2D list signals = [[] for j in range(n)] else: # Check that number of signals per file doesn't change for other # files if isinstance(obj, (list, tuple)): if n != len(obj): raise ValueError( "The number of signals per file does not match:\n" + (f_error_fmt % (1, n, filenames[0])) + (f_error_fmt % (i, len(obj), filename))) # Append loaded _signals to 2D list if n == 1: signals[0].append(obj) elif n > 1: for j in range(n): signals[j].append(obj[j]) # Next, merge the signals in the `stack_axis` direction. # When each file has N signals, we create N stacks. objects = [] for i in range(n): signal = signals[i] # Sublist, with len = len(filenames) signal = stack_method(signal, axis=stack_axis, new_axis_name=new_axis_name, lazy=lazy) signal.metadata.General.title = os.path.split( os.path.split(os.path.abspath(filenames[0]))[0])[1] objects.append(signal) else: # No stack, so simply load all signals in all files separately objects = [ load_single_file(filename, lazy=lazy, **kwargs) for filename in filenames ] if len(objects) == 1: objects = objects[0] return objects