def test_stack_non_uniform_axis(self): s = self.signal s2 = s.deepcopy() s2.axes_manager[2].offset = 2.5 s.axes_manager[1].convert_to_non_uniform_axis() s.axes_manager[2].convert_to_non_uniform_axis() s2.axes_manager[2].convert_to_non_uniform_axis() # test error for overlapping axes with pytest.raises(ValueError, match="Signals can only be stacked"): rs = utils.stack([s, s], axis=2) # test stacking along non-uniform axis rs = utils.stack([s, s2], axis=2) assert rs.axes_manager[2].axis.size == rs.data.shape[2] # Test stacking without specified axis rs = utils.stack([s, s]) assert rs.axes_manager.shape == (2, 3, 2, 5) assert rs.axes_manager[0].axis.size == 2 # Test stacking along uniform axis rs = utils.stack([s, s], axis=0) assert rs.axes_manager[0].axis.size == 4 # Test stacking axes with inverse vectors s.axes_manager[2].axis = s.axes_manager[2].axis[::-1] s2.axes_manager[2].axis = s2.axes_manager[2].axis[::-1] rs = utils.stack([s2, s], axis=2) assert rs.axes_manager[2].axis.size == rs.data.shape[2]
def fem(self, version="omega"): """Calculated the variance among some image Parameters ---------- version : str The name of the FEM equation to use. 'rings' calculates the mean of the variances of all the patterns at some k. 'omega' calculates the variance of the annular means for every value of k. """ if not self.metadata.has_item('HAADF'): print("No thickness filter applied...") if version is 'rings': var = self.nanmean(axis=-1) var.map(square) var = var.nanmean() center = self.nanmean(axis=-1).nanmean() center.map(square) int_vs_k = (var - center) / center print(int_vs_k.axes_manager) elif version is 'omega': var = self.map(square, show_progressbar=False, inplace=False).nanmean().nanmean(axis=1) center = self.nanmean(axis=-1) center.map(square) center = center.nanmean() int_vs_k = (var - center) / center print(int_vs_k.axes_manager) else: filt, thickness = self.thickness_filter() if version is 'rings': int_vs_k = [] for i, th in enumerate(thickness): index = np.where(filt.transpose() == i + 1) index = tuple(zip(index[0], index[1])) var = stack([self.inav[ind] for ind in index]) v = var.map(square, inplace=False).nanmean().nanmean(axis=-2) center = var.nanmean(axis=-2) center.map(square) center = center.nanmean() int_vs_k.append((v - center) / center) if version is 'omega': int_vs_k = [] for i, th in enumerate(thickness): index = np.where(filt.transpose() == i + 1) index = tuple(zip(index[0], index[1])) var = stack([self.inav[ind] for ind in index]) v = var.map(square, inplace=False).nanmean(axis=-2) center = var.nanmean(axis=-2) center.map(square) center = center.nanmean() int_vs_k.append(((v - center) / center).nanmean()) int_vs_k = stack(int_vs_k) int_vs_k.axes_manager.navigation_axes[0].offset = thickness[0] int_vs_k.axes_manager.navigation_axes[ 0].scale = thickness[1] - thickness[0] return int_vs_k
def test_stack_of_stack(self): s = self.signal s1 = utils.stack([s] * 2) s2 = utils.stack([s1] * 3) s3 = s2.split()[0] s4 = s3.split()[0] np.testing.assert_array_almost_equal(s4.data, s.data) assert not hasattr(s4.original_metadata, 'stack_elements') assert s4.metadata.General.title == 'test'
def test_stack_of_stack(self): s = self.signal s1 = utils.stack([s] * 2) s2 = utils.stack([s1] * 3) s3 = s2.split()[0] s4 = s3.split()[0] np.testing.assert_array_almost_equal(s4.data, s.data) nt.assert_false(hasattr(s4.original_metadata, 'stack_elements')) nt.assert_equal(s4.metadata.General.title, 'test')
def test_stack_of_stack(self): s = self.signal s1 = utils.stack([s] * 2) s2 = utils.stack([s1] * 3) s3 = s2.split()[0] s4 = s3.split()[0] assert_true((s4.data == s.data).all()) assert_true((hasattr(s4.original_metadata, 'stack_elements')is False)) assert_true((s4.metadata.General.title == 'test'))
def test_stack_of_stack(self): s = self.signal s1 = utils.stack([s] * 2) s2 = utils.stack([s1] * 3) s3 = s2.split()[0] s4 = s3.split()[0] assert_true((s4.data == s.data).all()) assert_true((hasattr(s4.original_metadata, 'stack_elements') is False)) assert_true((s4.metadata.General.title == 'test'))
def test_stack_not_default(self): s = self.signal s1 = s.deepcopy() + 1 s2 = s.deepcopy() * 4 result_signal = utils.stack([s, s1, s2], axis=1) result_list = result_signal.split() assert_true(len(result_list) == 3) assert_true((result_list[0].data == result_signal[::, 0].data).all()) result_signal = utils.stack([s, s1, s2], axis='y') assert_true((result_list[0].data == result_signal[::, 0].data).all())
def test_stack_not_default(self): s = self.signal s1 = s.deepcopy() + 1 s2 = s.deepcopy() * 4 result_signal = utils.stack([s, s1, s2], axis=1) axis_size = s.axes_manager[1].size result_list = result_signal.split() nt.assert_equal(len(result_list), 3) np.testing.assert_array_almost_equal( result_list[0].data, result_signal.inav[:, :axis_size].data) result_signal = utils.stack([s, s1, s2], axis='y') np.testing.assert_array_almost_equal( result_list[0].data, result_signal.inav[:, :axis_size].data)
def test_stack_functional_data_axis(self): s = self.signal s2 = s.deepcopy() # Test stacking of functional data axes with uniform x vector s.axes_manager[0].convert_to_functional_data_axis(expression='x') s2.axes_manager[0].offset = 2 s2.axes_manager[0].convert_to_functional_data_axis(expression='x') rs = utils.stack([s, s2], axis=0) assert rs.axes_manager[0].axis.size == rs.data.shape[1] # Test stacking of functional data axes with uniform x vector s.axes_manager[0].x.convert_to_non_uniform_axis() s2.axes_manager[0].x.convert_to_non_uniform_axis() rs = utils.stack([s, s2], axis=0) assert rs.axes_manager[0].axis.size == rs.data.shape[1]
def test_stack_stack_metadata_index(self): s = self.signal s1 = s.deepcopy() + 1 s1.metadata.General.title = 'first signal' s1.original_metadata.om_title = 'first signal om' s2 = s.deepcopy() * 4 s2.metadata.General.title = 'second_signal' s2.original_metadata.om_title = 'second signal om' res = utils.stack([s1, s2, s], stack_metadata=0) assert res.metadata.General.title == s1.metadata.General.title res2 = utils.stack([s1, s2, s], stack_metadata=2) assert res2.metadata.General.title == s.metadata.General.title
def get_abs_corr_cross_section( composition, number_of_atoms, take_off_angle, probe_area): # take_off_angle, temporary value for testing """ Calculate absorption correction terms. Parameters ---------- number_of_atoms: list of signal Stack of maps with number of atoms per pixel. take_off_angle: float X-ray take-off angle in degrees. """ toa_rad = np.radians(take_off_angle) Av = constants.Avogadro elements = [ intensity.metadata.Sample.elements[0] for intensity in number_of_atoms ] lines = [ intensity.metadata.Sample.xray_lines[0] for intensity in number_of_atoms ] atomic_weights = np.array([ elements_db[element]['General_properties']['atomic_weight'] for element in elements ]) number_of_atoms = utils.stack(number_of_atoms).data #calculate the total_mass per pixel, or mass thicknessself. total_mass = np.zeros_like(number_of_atoms[0], dtype='float') for i, (weight) in enumerate(atomic_weights): total_mass += (number_of_atoms[i] * weight / Av / probe_area / 1E-15) # determine mass absorption coefficients and convert from cm^2/g to m^2/atom. mac = utils.stack( utils.material.mass_absorption_mixture( weight_percent=utils.material.atomic_to_weight(composition))) * 0.1 acf = np.zeros_like(number_of_atoms) constant = 1 / (Av * math.sin(toa_rad) * probe_area * 1E-16) #determine an absorption coeficcient per element per pixel. for i, (weight) in enumerate(atomic_weights): expo = (mac.data[i] * total_mass * constant) acf[i] = expo / (1 - math.e**(-expo)) return acf
def test_with_signals_examples(self): from hyperspy.misc.example_signals_loading import \ load_1D_EDS_SEM_spectrum as EDS_SEM_Spectrum s = EDS_SEM_Spectrum() np.testing.assert_allclose( utils.stack(s.get_lines_intensity()).data.squeeze(), np.array([84163, 89063, 96117, 96700, 99075]))
def test_with_signals_examples(self): from hyperspy.misc.example_signals_loading import load_1D_EDS_SEM_spectrum as EDS_SEM_Spectrum s = EDS_SEM_Spectrum() np.testing.assert_allclose( utils.stack(s.get_lines_intensity()).data.squeeze(), np.array([84163, 89063, 96117, 96700, 99075]) )
def get_abs_corr_zeta( weight_percent, mass_thickness, take_off_angle): # take_off_angle, temporary value for testing """ Calculate absorption correction terms. Parameters ---------- weight_percent: list of signal Composition in weight percent. mass_thickness: signal Density-thickness map in kg/m^2 take_off_angle: float X-ray take-off angle in degrees. """ toa_rad = np.radians(take_off_angle) csc_toa = 1.0 / np.sin(toa_rad) # convert from cm^2/g to m^2/kg mac = utils.stack( utils.material.mass_absorption_mixture( weight_percent=weight_percent)) * 0.1 acf = mac.data * mass_thickness.data * csc_toa acf = acf / (1.0 - np.exp(-(acf))) return acf
def test_function_nd(lazy): s = Signal1D(np.empty((200, ))) axis = s.axes_manager.signal_axes[0] axis.scale = .05 axis.offset = -5 A, sigma1, sigma2, fraction, centre = 5, 0.3, 0.75, 0.5, 1 g1 = SplitVoigt(A=A, sigma1=sigma1, sigma2=sigma2, fraction=fraction, centre=centre) s.data = g1.function(axis.axis) s2 = stack([s] * 2) if lazy: s2 = s2.as_lazy() g2 = SplitVoigt() assert g2.estimate_parameters(s2, axis.low_value, axis.high_value, False) g2.A.map['values'] = [A] * 2 g2.sigma1.map['values'] = [sigma1] * 2 g2.sigma2.map['values'] = [sigma2] * 2 g2.fraction.map['values'] = [fraction] * 2 g2.centre.map['values'] = [centre] * 2 np.testing.assert_allclose(g2.function_nd(axis.axis), s2.data)
def get_strain_maps( self, rot_matr ): """Obtain strain maps from the displacement gradient tensor at each navigation position in the small strain approximation. Arguments --------- rot_matr : DisplacementGradientMap Object containing information on the rotation of the grid Returns ------- strain_results : BaseSignal Signal of shape < 4 | , > , navigation order is e11,e22,e12,theta """ # This may need to be eliminated if an existing rotation matrix is used R, U = self.polar_decomposition() e11 = -U.isig[0, 0].T + 1 e12 = U.isig[0, 1].T e21 = U.isig[1, 0].T e22 = -U.isig[1, 1].T + 1 theta = rot_matr.map(_get_rotation_angle, inplace=False) theta.axes_manager.set_signal_dimension(2) strain_results = stack([e11, e22, e12, theta]) return StrainMap(strain_results)
def test_stack_bigger_than_ten(self): s = self.signal list_s = [s] * 12 list_s.append(s.deepcopy() * 3) s1 = utils.stack(list_s) res = s1.split() np.testing.assert_array_almost_equal(list_s[-1].data, res[-1].data) assert res[-1].metadata.General.title == 'test'
def test_plot_xray_lines(): # It should be the same image as with previous test (test_plot_eds_lines) a = EDS_TEM_Spectrum() s = stack([a, a * 5]) s.plot() s._plot_xray_lines(xray_lines=True) s.axes_manager.navigation_axes[0].index = 1 return s._plot.signal_plot.figure
def fem(self, version="omega", indicies=None): """Calculated the variance among some image Parameters ---------- version : str The name of the FEM equation to use. 'rings' calculates the mean of the variances of all the patterns at some k. 'omega' calculates the variance of the annular means for every value of k. patterns: indicies Calculates the FEM pattern using only some of the patterns based on their indexes """ print("Here") if version is "omega": if indicies: var = stack([self.inav[ind] for ind in indicies]) annular_mean = var.nanmean(axis=-2) annular_mean_squared = annular_mean.nanmean()**2 v = (annular_mean**2).nanmean() int_vs_k = (annular_mean_squared / v) - 1 else: with self.unfolded(unfold_navigation=True, unfold_signal=False): annular_mean = self.nanmean(axis=-2) annular_mean_squared = annular_mean.nanmean()**2 v = (annular_mean**2).nanmean() int_vs_k = (annular_mean_squared / v) - 1 self.set_signal_type("PolarSignal") if version is 'rings': if indicies: s = stack([self.inav[ind] for ind in indicies]) ring_squared_average = (s**2).nanmean(axis=-2) ring_squared = s.nanmean(axis=-2)**2 int_vs_k = (ring_squared_average / ring_squared) - 1 else: with self.unfolded(unfold_navigation=True, unfold_signal=False): ring_squared_average = (self**2).nanmean(axis=-2) ring_squared = self.nanmean(axis=-2)**2 int_vs_k = (ring_squared_average / ring_squared) - 1 self.set_signal_type("PolarSignal") int_vs_k.axes_manager[0].units = "$nm^{-1}$" int_vs_k.axes_manager[0].name = "k" return int_vs_k
def test_stack_bigger_than_ten(self): s = self.signal list_s = [s] * 12 list_s.append(s.deepcopy() * 3) list_s[-1].metadata.General.title = 'test' s1 = utils.stack(list_s) res = s1.split() assert_true((list_s[-1].data == res[-1].data).all()) assert_true((res[-1].metadata.General.title == 'test'))
def test_stack_not_default(self): s = self.signal s1 = s.inav[:, :-1] + 1 s2 = s.inav[:, ::2] * 4 result_signal = utils.stack([s, s1, s2], axis=1) axis_size = s.axes_manager[1].size axs1 = s1.axes_manager[1].size result_list = result_signal.split() assert len(result_list) == 3 for rs in [result_signal, utils.stack([s, s1, s2], axis='y')]: np.testing.assert_array_almost_equal( result_list[0].data, rs.inav[:, :axis_size].data) np.testing.assert_array_almost_equal( s.data, rs.inav[:, :axis_size].data) np.testing.assert_array_almost_equal( s1.data, rs.inav[:, axis_size:axis_size + axs1].data) np.testing.assert_array_almost_equal( s2.data, rs.inav[:, axis_size + axs1:].data)
def test_stack_bigger_than_ten(self): s = self.signal list_s = [s] * 12 list_s.append(s.deepcopy() * 3) list_s[-1].metadata.General.title = 'test' s1 = utils.stack(list_s) res = s1.split() np.testing.assert_array_almost_equal(list_s[-1].data, res[-1].data) nt.assert_equal(res[-1].metadata.General.title, 'test')
def test_stack_not_default(self): s = self.signal s1 = s.inav[:, :-1] + 1 s2 = s.inav[:, ::2] * 4 result_signal = utils.stack([s, s1, s2], axis=1) axis_size = s.axes_manager[1].size axs1 = s1.axes_manager[1].size axs2 = s2.axes_manager[1].size result_list = result_signal.split() assert len(result_list) == 3 for rs in [result_signal, utils.stack([s, s1, s2], axis='y')]: np.testing.assert_array_almost_equal( result_list[0].data, rs.inav[:, :axis_size].data) np.testing.assert_array_almost_equal( s.data, rs.inav[:, :axis_size].data) np.testing.assert_array_almost_equal( s1.data, rs.inav[:, axis_size:axis_size + axs1].data) np.testing.assert_array_almost_equal( s2.data, rs.inav[:, axis_size + axs1:].data)
def test_stack_default(self): s = self.signal s1 = s.deepcopy() + 1 s2 = s.deepcopy() * 4 test_axis = s.axes_manager[0].index_in_array result_signal = utils.stack([s, s1, s2]) result_list = result_signal.split() nt.assert_equal(test_axis, s.axes_manager[0].index_in_array) nt.assert_equal(len(result_list), 3) np.testing.assert_array_almost_equal( result_list[0].data, result_signal.inav[:, :, 0].data)
def test_stack_default(self): s = self.signal s1 = s.deepcopy() + 1 s2 = s.deepcopy() * 4 test_axis = s.axes_manager[0].index_in_array result_signal = utils.stack([s, s1, s2]) result_list = result_signal.split() assert_true(test_axis == s.axes_manager[0].index_in_array) assert_true(len(result_list) == 3) assert_true( (result_list[0].data == result_signal[::, ::, 0].data).all())
def test_stack_default(self): s = self.signal s1 = s.deepcopy() + 1 s2 = s.deepcopy() * 4 test_axis = s.axes_manager[0].index_in_array result_signal = utils.stack([s, s1, s2]) result_list = result_signal.split() assert test_axis == s.axes_manager[0].index_in_array assert len(result_list) == 3 np.testing.assert_array_almost_equal( result_list[0].data, result_signal.inav[:, :, 0].data)
def test_stack_default(self): s = self.signal s1 = s.deepcopy() + 1 s2 = s.deepcopy() * 4 test_axis = s.axes_manager[0].index_in_array result_signal = utils.stack([s, s1, s2]) result_list = result_signal.split() assert_true(test_axis == s.axes_manager[0].index_in_array) assert_true(len(result_list) == 3) assert_true((result_list[0].data == result_signal.inav[::, ::, 0].data).all())
def test_stack_not_default(self): s = self.signal # Add variance to metadata to check that it also stacks correctly s.metadata.set_item("Signal.Noise_properties.variance", s.deepcopy()) def get_variance_data(s): return s.metadata.Signal.Noise_properties.variance.data s1 = s.inav[:, :-1] s1.data += 1 s2 = s.inav[:, ::2] s2.data *= 4 result_signal = utils.stack([s, s1, s2], axis=1) axis_size = s.axes_manager[1].size axs1 = s1.axes_manager[1].size result_list = result_signal.split() assert len(result_list) == 3 for rs in [result_signal, utils.stack([s, s1, s2], axis='y')]: np.testing.assert_array_almost_equal( result_list[0].data, rs.inav[:, :axis_size].data) np.testing.assert_array_almost_equal( s.data, rs.inav[:, :axis_size].data) np.testing.assert_array_almost_equal( s1.data, rs.inav[:, axis_size:axis_size + axs1].data) np.testing.assert_array_almost_equal( s2.data, rs.inav[:, axis_size + axs1:].data) np.testing.assert_array_almost_equal( get_variance_data(result_list[0]), get_variance_data(rs.inav[:, :axis_size])) np.testing.assert_array_almost_equal( get_variance_data(s), get_variance_data(rs.inav[:, :axis_size]) ) np.testing.assert_array_almost_equal( get_variance_data(s1), get_variance_data(rs.inav[:, axis_size:axis_size + axs1]) ) np.testing.assert_array_almost_equal( get_variance_data(s2), get_variance_data(rs.inav[:, axis_size + axs1:]) )
def test_function_nd(binned): s = Signal1D(np.empty((100,))) axis = s.axes_manager.signal_axes[0] axis.scale = 1 axis.offset = -20 g1 = Gaussian(50015.156, 10/sigma2fwhm, 10) s.data = g1.function(axis.axis) s.metadata.Signal.binned = binned s2 = stack([s] * 2) g2 = Gaussian() factor = axis.scale if binned else 1 g2.estimate_parameters(s2, axis.low_value, axis.high_value, False) assert g2.binned == binned assert_allclose(g2.function_nd(axis.axis) * factor, s2.data)
def test_function_nd(binned): s = Signal1D(np.empty((100, ))) axis = s.axes_manager.signal_axes[0] axis.scale = 0.02 axis.offset = 1 g1 = PowerLaw(50015.156, 1.2) s.data = g1.function(axis.axis) s.metadata.Signal.binned = binned s2 = stack([s] * 2) g2 = PowerLaw() factor = axis.scale if binned else 1 g2.estimate_parameters(s2, axis.low_value, axis.high_value, False) assert g2.binned == binned assert_allclose(g2.function_nd(axis.axis) * factor, s2.data, rtol=0.05)
def test_function_nd(binned): s = Signal1D(np.empty((100,))) axis = s.axes_manager.signal_axes[0] axis.scale = 2. axis.offset = -30 g1 = GaussianHF(50015.156, 23, 10) s.data = g1.function(axis.axis) s.metadata.Signal.binned = binned s2 = stack([s] * 2) g2 = GaussianHF() factor = axis.scale if binned else 1 g2.estimate_parameters(s2, axis.low_value, axis.high_value, False) assert g2.binned == binned # TODO: sort out while the rtol to be so high... assert_allclose(g2.function_nd(axis.axis) * factor, s2.data, rtol=0.05)
def test_function_nd(binned, lazy): s = Signal1D(np.empty((300,))) axis = s.axes_manager.signal_axes[0] axis.scale = 0.2 axis.offset = -10 g1 = SkewNormal(A=2, x0=2, scale=10, shape=5) s.data = g1.function(axis.axis) s.metadata.Signal.binned = binned s2 = stack([s] * 2) if lazy: s2 = s2.as_lazy() g2 = SkewNormal() factor = axis.scale if binned else 1 assert g2.estimate_parameters(s2, axis.low_value, axis.high_value, False) assert g2.binned == binned assert_allclose(g2.function_nd(axis.axis) * factor, s2.data, 0.06)
def test_function_nd(binned): s = Signal1D(np.empty((100, ))) axis = s.axes_manager.signal_axes[0] axis.scale = 2. axis.offset = -30 g1 = GaussianHF(50015.156, 23, 10) s.data = g1.function(axis.axis) s.metadata.Signal.binned = binned s2 = stack([s] * 2) g2 = GaussianHF() factor = axis.scale if binned else 1 g2.estimate_parameters(s2, axis.low_value, axis.high_value, False) assert g2.binned == binned # TODO: sort out while the rtol to be so high... assert_allclose(g2.function_nd(axis.axis) * factor, s2.data, rtol=0.05)
def test_function_nd(binned, lazy): s = Signal1D(np.empty((200, ))) s.metadata.Signal.binned = binned axis = s.axes_manager.signal_axes[0] axis.scale = .05 axis.offset = -5 g1 = Voigt(centre=1, area=5, gamma=0, sigma=0.5, legacy=False) s.data = g1.function(axis.axis) s2 = stack([s] * 2) if lazy: s2 = s2.as_lazy() g2 = Voigt(legacy=False) factor = axis.scale if binned else 1 g2.estimate_parameters(s2, axis.low_value, axis.high_value, False) assert g2.binned == binned assert_allclose(g2.function_nd(axis.axis) * factor, s2.data)
def test_function_nd(binned, lazy): s = Signal1D(np.empty((250,))) axis = s.axes_manager.signal_axes[0] axis.scale = .2 axis.offset = -15 g1 = Lorentzian(52342, 2, 10) s.data = g1.function(axis.axis) s.metadata.Signal.binned = binned s2 = stack([s] * 2) if lazy: s2 = s2.as_lazy() g2 = Lorentzian() factor = axis.scale if binned else 1 g2.estimate_parameters(s2, axis.low_value, axis.high_value, False) assert g2.binned == binned np.testing.assert_allclose(g2.function_nd(axis.axis) * factor, s2.data,0.16)
def test_stack_stack_metadata_value(self): s = BaseSignal(1) s.metadata.General.title = 'title 1' s.original_metadata.set_item('a', 1) s2 = BaseSignal(2) s2.metadata.General.title = 'title 2' s2.original_metadata.set_item('a', 2) stack_out = utils.stack([s, s2], stack_metadata=True) elem0 = stack_out.original_metadata.stack_elements.element0 elem1 = stack_out.original_metadata.stack_elements.element1 for el, _s in zip([elem0, elem1], [s, s2]): assert el.original_metadata.as_dictionary() == \ _s.original_metadata.as_dictionary() assert el.metadata.as_dictionary() == _s.metadata.as_dictionary()
def test_function_nd(binned, lazy): s = Signal1D(np.empty((100, ))) axis = s.axes_manager.signal_axes[0] axis.scale = 0.2 axis.offset = 15 g1 = Exponential(A=10005.7, tau=214.3) s.data = g1.function(axis.axis) s.metadata.Signal.binned = binned s2 = stack([s] * 2) if lazy: s2 = s2.as_lazy() g2 = Exponential() factor = axis.scale if binned else 1. g2.estimate_parameters(s2, axis.low_value, axis.high_value, False) assert g2.binned == binned assert_allclose(g2.function_nd(axis.axis) * factor, s2.data, rtol=0.05)
def test_stack_stack_metadata(self, stack_metadata): s = self.signal s1 = s.deepcopy() + 1 s2 = s.deepcopy() * 4 test_axis = s.axes_manager[0].index_in_array result_signal = utils.stack([s, s1, s2], stack_metadata=stack_metadata) result_list = result_signal.split() assert test_axis == s.axes_manager[0].index_in_array assert len(result_list) == 3 np.testing.assert_array_almost_equal( result_list[0].data, result_signal.inav[:, :, 0].data) if stack_metadata is True: om = result_signal.original_metadata.stack_elements.element0.original_metadata elif stack_metadata in [0, 1]: om = result_signal.original_metadata if stack_metadata is False: assert om.as_dictionary() == {} else: assert om.as_dictionary() == s.original_metadata.as_dictionary()
def test_stack_broadcast_number(self): s = self.signal rs = utils.stack([5, s]) np.testing.assert_array_equal( rs.inav[..., 0].data, 5 * np.ones((3, 2, 5)))
def test_stack_broadcast_number_not_default(self): s = self.signal rs = utils.stack([5, s], axis='E') np.testing.assert_array_equal(rs.isig[0].data, 5 * np.ones((3, 2)))
def test_plot_eds_lines(): a = EDS_TEM_Spectrum() s = stack([a, a * 5]) s.plot(True) s.axes_manager.navigation_axes[0].index = 1 return s._plot.signal_plot.figure
def quantification( self, intensities, kfactors, composition_units="weight", navigation_mask=1.0, closing=True, plot_result=False, **kwargs ): """ Quantification of intensities to return elemental composition Method: Cliff-Lorimer Parameters ---------- intensities: list of signal the intensitiy for each X-ray lines. kfactors: list of float The list of kfactor in same order as intensities. Note that intensities provided by hyperspy are sorted by the aplhabetical order of the X-ray lines. eg. kfactors =[0.982, 1.32, 1.60] for ['Al_Ka','Cr_Ka', 'Ni_Ka']. composition_units: 'weight' or 'atomic' Quantification returns weight percent. By choosing 'atomic', the return composition is in atomic percent. navigation_mask : None or float or signal The navigation locations marked as True are not used in the quantification. If int is given the vacuum_mask method is used to generate a mask with the int value as threhsold. Else provides a signal with the navigation shape. closing: bool If true, applied a morphologic closing to the mask obtained by vacuum_mask. plot_result : bool If True, plot the calculated composition. If the current object is a single spectrum it prints the result instead. kwargs The extra keyword arguments are passed to plot. Return ------ A list of quantified elemental maps (signal) giving the composition of the sample in weight or atomic percent. Examples -------- >>> s = utils.example_signals.EDS_TEM_Spectrum() >>> s.add_lines() >>> kfactors = [1.450226, 5.075602] #For Fe Ka and Pt La >>> bw = s.estimate_background_windows(line_width=[5.0, 2.0]) >>> s.plot(background_windows=bw) >>> intensities = s.get_lines_intensity(background_windows=bw) >>> res = s.quantification(intensities, kfactors, plot_result=True, >>> composition_units='atomic') Fe (Fe_Ka): Composition = 15.41 atomic percent Pt (Pt_La): Composition = 84.59 atomic percent See also -------- vacuum_mask """ if isinstance(navigation_mask, float): navigation_mask = self.vacuum_mask(navigation_mask, closing).data elif navigation_mask is not None: navigation_mask = navigation_mask.data xray_lines = self.metadata.Sample.xray_lines composition = utils.stack(intensities) composition.data = ( utils_eds.quantification_cliff_lorimer(composition.data, kfactors=kfactors, mask=navigation_mask) * 100.0 ) composition = composition.split() if composition_units == "atomic": composition = utils.material.weight_to_atomic(composition) for i, xray_line in enumerate(xray_lines): element, line = utils_eds._get_element_and_line(xray_line) composition[i].metadata.General.title = composition_units + " percent of " + element composition[i].metadata.set_item("Sample.elements", ([element])) composition[i].metadata.set_item("Sample.xray_lines", ([xray_line])) if plot_result and composition[i].axes_manager.signal_dimension == 0: print( "%s (%s): Composition = %.2f %s percent" % (element, xray_line, composition[i].data, composition_units) ) if plot_result and composition[i].axes_manager.signal_dimension != 0: utils.plot.plot_signals(composition, **kwargs) return composition
def quantification(self, intensities, method, factors='auto', composition_units='atomic', navigation_mask=1.0, closing=True, plot_result=False, **kwargs): """ Quantification using Cliff-Lorimer, the zeta-factor method, or ionization cross sections. Parameters ---------- intensities: list of signal the intensitiy for each X-ray lines. method: 'CL' or 'zeta' or 'cross_section' Set the quantification method: Cliff-Lorimer, zeta-factor, or ionization cross sections. factors: list of float The list of kfactors, zeta-factors or cross sections in same order as intensities. Note that intensities provided by Hyperspy are sorted by the alphabetical order of the X-ray lines. eg. factors =[0.982, 1.32, 1.60] for ['Al_Ka', 'Cr_Ka', 'Ni_Ka']. composition_units: 'weight' or 'atomic' The quantification returns the composition in atomic percent by default, but can also return weight percent if specified. navigation_mask : None or float or signal The navigation locations marked as True are not used in the quantification. If int is given the vacuum_mask method is used to generate a mask with the int value as threhsold. Else provides a signal with the navigation shape. closing: bool If true, applied a morphologic closing to the mask obtained by vacuum_mask. plot_result : bool If True, plot the calculated composition. If the current object is a single spectrum it prints the result instead. kwargs The extra keyword arguments are passed to plot. Returns ------ A list of quantified elemental maps (signal) giving the composition of the sample in weight or atomic percent. If the method is 'zeta' this function also returns the mass thickness profile for the data. If the method is 'cross_section' this function also returns the atom counts for each element. Examples -------- >>> s = hs.datasets.example_signals.EDS_TEM_Spectrum() >>> s.add_lines() >>> kfactors = [1.450226, 5.075602] #For Fe Ka and Pt La >>> bw = s.estimate_background_windows(line_width=[5.0, 2.0]) >>> s.plot(background_windows=bw) >>> intensities = s.get_lines_intensity(background_windows=bw) >>> res = s.quantification(intensities, kfactors, plot_result=True, >>> composition_units='atomic') Fe (Fe_Ka): Composition = 15.41 atomic percent Pt (Pt_La): Composition = 84.59 atomic percent See also -------- vacuum_mask """ if isinstance(navigation_mask, float): navigation_mask = self.vacuum_mask(navigation_mask, closing).data elif navigation_mask is not None: navigation_mask = navigation_mask.data xray_lines = self.metadata.Sample.xray_lines composition = utils.stack(intensities) if method == 'CL': composition.data = utils_eds.quantification_cliff_lorimer( composition.data, kfactors=factors, mask=navigation_mask) * 100. elif method == 'zeta': results = utils_eds.quantification_zeta_factor( composition.data, zfactors=factors, dose=self._get_dose(method)) composition.data = results[0] * 100. mass_thickness = intensities[0].deepcopy() mass_thickness.data = results[1] mass_thickness.metadata.General.title = 'Mass thickness' elif method == 'cross_section': results = utils_eds.quantification_cross_section(composition.data, cross_sections=factors, dose=self._get_dose(method)) composition.data = results[0] * 100 number_of_atoms = utils.stack(intensities) number_of_atoms.data = results[1] number_of_atoms = number_of_atoms.split() else: raise ValueError ('Please specify method for quantification, as \'CL\', \'zeta\' or \'cross_section\'') composition = composition.split() if composition_units == 'atomic': if method != 'cross_section': composition = utils.material.weight_to_atomic(composition) else: if method == 'cross_section': composition = utils.material.atomic_to_weight(composition) for i, xray_line in enumerate(xray_lines): element, line = utils_eds._get_element_and_line(xray_line) composition[i].metadata.General.title = composition_units + \ ' percent of ' + element composition[i].metadata.set_item("Sample.elements", ([element])) composition[i].metadata.set_item( "Sample.xray_lines", ([xray_line])) if plot_result and \ composition[i].axes_manager.signal_dimension == 0: print("%s (%s): Composition = %.2f %s percent" % (element, xray_line, composition[i].data, composition_units)) if method=='cross_section': for i, xray_line in enumerate(xray_lines): element, line = utils_eds._get_element_and_line(xray_line) number_of_atoms[i].metadata.General.title = 'atom counts of ' +\ element number_of_atoms[i].metadata.set_item("Sample.elements", ([element])) number_of_atoms[i].metadata.set_item( "Sample.xray_lines", ([xray_line])) if plot_result and composition[i].axes_manager.signal_dimension != 0: utils.plot.plot_signals(composition, **kwargs) if method=='zeta': self.metadata.set_item("Sample.mass_thickness", mass_thickness) return composition, mass_thickness elif method == 'cross_section': return composition, number_of_atoms elif method == 'CL': return composition else: raise ValueError ('Please specify method for quantification, as \'CL\', \'zeta\' or \'cross_section\'')