def setup_method(self, method): # Set the stack self.ref_holo = hs.stack([reference_hologram()] * 2) self.ref_holo = hs.stack([self.ref_holo] * 3) # Parameters measured using Gatan HoloWorks: self.REF_FRINGE_SPACING = 3.48604 self.REF_FRINGE_SAMPLING = 3.7902 # Measured using the definition of fringe contrast from the centre of image self.REF_FRINGE_CONTRAST = 0.0736 # Prepare test data and derived statistical parameters self.ref_carrier_freq = 1.0 / self.REF_FRINGE_SAMPLING self.ref_carrier_freq_nm = 1.0 / self.REF_FRINGE_SPACING ht = self.ref_holo.metadata.Acquisition_instrument.TEM.beam_energy momentum = ( 2 * constants.m_e * constants.elementary_charge * ht * 1000 * ( 1 + constants.elementary_charge * ht * 1000 / (2 * constants.m_e * constants.c ** 2) ) ) wavelength = constants.h / np.sqrt(momentum) * 1e9 # in nm self.ref_carrier_freq_mrad = self.ref_carrier_freq_nm * 1000 * wavelength
def test_model2D_one_component(self, nav2d): mesh, x, y = self.mesh, self.x, self.y G1 = Gaussian2D(30, 5.0, 4.0, 0, 0) data = G1.function(*mesh) s = Signal2D(data) s.axes_manager[-2].offset = x[0] s.axes_manager[-1].offset = y[0] s.axes_manager[-2].scale = x[1] - x[0] s.axes_manager[-1].scale = y[1] - y[0] if nav2d: s = hs.stack([s] * 2) s = hs.stack([s] * 3) m = s.create_model() m.append(G1) G1.set_parameters_not_free() G1.A.free = True m.multifit(optimizer='lstsq', calculate_errors=True) diff = (s - m.as_signal(show_progressbar=False)) np.testing.assert_allclose(diff.data, 0.0, atol=1E-7) np.testing.assert_allclose(m.p_std[0], 0.0, atol=1E-7)
def test_model2D_linear_many_gaussians(self, nav2d): mesh, x, y = self.mesh, self.x, self.y gausslow, gausshigh = -8, 8 gauss_step = 8 X, Y = mesh z = np.zeros(X.shape) g = Gaussian2D() for i in np.arange(gausslow, gausshigh + 1, gauss_step): for j in np.arange(gausslow, gausshigh + 1, gauss_step): g.centre_x.value = i g.centre_y.value = j g.A.value = 10 z += g.function(X, Y) s = Signal2D(z) s.axes_manager[-2].offset = x[0] s.axes_manager[-1].offset = y[0] s.axes_manager[-2].scale = x[1] - x[0] s.axes_manager[-1].scale = y[1] - y[0] if nav2d: s = hs.stack([s] * 2) s = hs.stack([s] * 3) m = s.create_model() for i in np.arange(gausslow, gausshigh + 1, gauss_step): for j in np.arange(gausslow, gausshigh + 1, gauss_step): g = Gaussian2D(centre_x=i, centre_y=j) g.set_parameters_not_free() g.A.free = True m.append(g) m.fit(optimizer='lstsq') np.testing.assert_allclose(s.data, m.as_signal().data)
def test_multiple_linear_parameters_convolution(nav_dim): s_ref = hs.signals.Signal1D(np.ones(1000)) # Create signal to convolve to_convolve_component = hs.model.components1D.Gaussian(A=1000, sigma=50, centre=100) to_convolve = hs.signals.Signal1D( to_convolve_component.function(np.arange(1000))) to_convolve.axes_manager[-1].offset = -to_convolve_component.centre.value l_ref1 = hs.model.components1D.Lorentzian(A=100, centre=200, gamma=10) l_ref2 = hs.model.components1D.Lorentzian(A=100, centre=600, gamma=20) m_ref = s_ref.create_model() m_ref.extend([l_ref1, l_ref2]) m_ref.low_loss = to_convolve s = m_ref.as_signal() if nav_dim >= 1: s = hs.stack([s] * 2) to_convolve = hs.stack([to_convolve] * 2) if nav_dim == 2: s = hs.stack([s] * 3) to_convolve = hs.stack([to_convolve] * 3) m = s.create_model() l1 = hs.model.components1D.Lorentzian(centre=200, gamma=10) l2 = hs.model.components1D.Lorentzian(centre=600, gamma=20) m.extend([l1, l2]) assert not m.convolved m.low_loss = to_convolve assert m.convolved m.set_parameters_not_free(only_nonlinear=True) with pytest.warns(UserWarning): m.multifit(optimizer='lstsq') np.testing.assert_allclose(l_ref1.A.value, l1.A.value) np.testing.assert_allclose(l_ref1.centre.value, l1.centre.value) np.testing.assert_allclose(l_ref1.gamma.value, l1.gamma.value) np.testing.assert_allclose(l_ref2.A.value, l2.A.value) np.testing.assert_allclose(l_ref2.centre.value, l2.centre.value) np.testing.assert_allclose(l_ref2.gamma.value, l2.gamma.value) np.testing.assert_allclose(m.as_signal().data, s.data) if nav_dim >= 1: np.testing.assert_allclose(l1.A.map['values'].mean(), l_ref1.A.value) np.testing.assert_allclose(l1.centre.map['values'].mean(), l_ref1.centre.value) np.testing.assert_allclose(l1.gamma.map['values'].mean(), l_ref1.gamma.value) np.testing.assert_allclose(l2.A.map['values'].mean(), l_ref2.A.value) np.testing.assert_allclose(l2.centre.map['values'].mean(), l_ref2.centre.value) np.testing.assert_allclose(l2.gamma.map['values'].mean(), l_ref2.gamma.value)
def test_expression_multiple_linear_parameter(nav_dim, convolve): """ This test checks that linear fitting works with convolution with - single and multidimensional fit (warning raise) - multiple free parameters for the same component (different code path) """ s_ref = hs.signals.Signal1D(np.ones(20)) p_ref = hs.model.components1D.Polynomial(order=2, a0=25, a1=-50, a2=2.5, legacy=False) # Create signal to convolve to_convolve_component = hs.model.components1D.Gaussian(A=100, sigma=5, centre=10) to_convolve = hs.signals.Signal1D( to_convolve_component.function(np.arange(1000))) to_convolve.axes_manager[-1].offset = -to_convolve_component.centre.value m_ref = s_ref.create_model() m_ref.extend([p_ref]) if convolve: m_ref.low_loss = to_convolve s = m_ref.as_signal() if nav_dim >= 1: s = hs.stack([s] * 2) if convolve: to_convolve = hs.stack([to_convolve] * 2) if nav_dim == 2: s = hs.stack([s] * 3) if convolve: to_convolve = hs.stack([to_convolve] * 3) m = s.create_model() p = hs.model.components1D.Polynomial(order=2, legacy=False) m.append(p) assert not m.convolved if convolve: m.low_loss = to_convolve with pytest.warns(UserWarning): m.multifit(optimizer='lstsq') else: m.multifit(optimizer='lstsq') np.testing.assert_allclose(p_ref.a0.value, p.a0.value) np.testing.assert_allclose(p_ref.a1.value, p.a1.value) np.testing.assert_allclose(p_ref.a2.value, p.a2.value) np.testing.assert_allclose(m.as_signal().data, s.data) if nav_dim >= 1: np.testing.assert_allclose(p.a0.map['values'].mean(), p_ref.a0.value) np.testing.assert_allclose(p.a1.map['values'].mean(), p_ref.a1.value) np.testing.assert_allclose(p.a2.map['values'].mean(), p_ref.a2.value)
def generate_bad_toy_data(): """ Use a deliberately bad dataset here, as per https://github.com/hyperspy/hyperspy/issues/784, which previously caused a MemoryError when using the Freedman-Diaconis rule. """ ax1 = np.exp(-np.abs(np.arange(-30, 100, 0.02))) ax2 = np.exp(-np.abs(np.arange(-40, 90, 0.02))) s1 = hs.signals.EELSSpectrum(ax1) s2 = hs.signals.EELSSpectrum(ax2) s1 = hs.stack([s1] * 5) s2 = hs.stack([s2] * 5) s1.align_zero_loss_peak(also_align=[s2]) return s1
def test_plot_integrated_intensity(self, stack, diffraction_pattern): if stack: diffraction_pattern = hs.stack([diffraction_pattern] * 3) roi = hs.roi.CircleROI(3, 3, 5) plt.ion() # to make plotting non-blocking diffraction_pattern.plot_integrated_intensity(roi) plt.close("all")
def test_function_nd(self): s = self.m.as_signal() s = hs.stack([s] * 2) o = hs.model.components1D.Offset() o.estimate_parameters(s, None, None, only_current=False) axis = s.axes_manager.signal_axes[0] np.testing.assert_allclose(o.function_nd(axis.axis), s.data)
def test_function_nd(self): s = self.m.as_signal() s = hs.stack([s] * 2) p = hs.model.components1D.Polynomial(order=2, legacy=False) p.estimate_parameters(s, None, None, only_current=False) axis = s.axes_manager.signal_axes[0] np.testing.assert_allclose(p.function_nd(axis.axis), s.data)
def test_fit_component(): np.random.seed(0) s = hs.signals.Signal1D(np.random.normal(size=1000, loc=1)).get_histogram() s = hs.stack([s, s], axis=0) m = s.create_model() m.extend([hs.model.components1D.Gaussian(), hs.model.components1D.Gaussian()]) g1, g2 = m g1.centre.value = 0 g2.centre.value = 8 fc = ComponentFit(model=m, component=g1) fc.ss_left_value = -2 fc.ss_right_value = 4 fc.only_current = not fc.only_current wd = fc.gui(**KWARGS)["ipywidgets"]["wdict"] wd["fit_button"]._click_handlers(wd["fit_button"]) # Trigger it assert wd["only_current"].value == fc.only_current wd["only_current"].value = not fc.only_current assert wd["only_current"].value == fc.only_current assert g2.centre.value == 8 np.testing.assert_allclose(g1.centre.value, 0.804, rtol=1E-2) np.testing.assert_allclose(g1.sigma.value, 0.965, rtol=1E-2) assert wd["iterpath"].disabled == True fc.only_current = False assert wd["iterpath"].disabled == False wd["close_button"]._click_handlers(wd["close_button"]) # Trigger it
def test_plot_integrated_intensity(self, stack, electron_diffraction1d): if stack: electron_diffraction1d = hs.stack([electron_diffraction1d] * 3) roi = hs.roi.SpanROI(left=1.0, right=2.0) plt.ion() # to make plotting non-blocking electron_diffraction1d.plot_integrated_intensity(roi) plt.close("all")
def test_ragged_navigation_shape(self, ragged): s = hs.stack([self.im]*3) out = s.map(lambda x: x, inplace=False, ragged=ragged) assert out.axes_manager.navigation_shape == s.axes_manager.navigation_shape assert out.data.shape[:2] == s.axes_manager.navigation_shape[::-1] assert out.ragged == ragged assert not s.ragged
def test_ma_lazify(): s = hs.signals.BaseSignal( np.ma.masked_array(data=[1, 2, 3], mask=[0, 1, 0])) l = s.as_lazy() assert np.isnan(l.data[1].compute()) ss = hs.stack([s, s]) assert np.isnan(ss.data[:, 1]).all()
def test_expression_convolved(nav_dim, multiple_free_parameters): s_ref = hs.signals.Signal1D(np.ones(100)) # Create signal to convolve to_convolve_component = hs.model.components1D.Gaussian(A=100, sigma=5, centre=10) to_convolve = hs.signals.Signal1D( to_convolve_component.function(np.arange(100))) to_convolve.axes_manager[-1].offset = -to_convolve_component.centre.value # Create reference signal from model with convolution l_ref = hs.model.components1D.Lorentzian(A=100, centre=20, gamma=4) m_ref = s_ref.create_model() m_ref.append(l_ref) m_ref.low_loss = to_convolve s = m_ref.as_signal() if nav_dim >= 1: s = hs.stack([s] * 2) to_convolve = hs.stack([to_convolve] * 2) if nav_dim == 2: s = hs.stack([s] * 3) to_convolve = hs.stack([to_convolve] * 3) m = s.create_model() l = hs.model.components1D.Lorentzian(centre=20, gamma=4) m.append(l) assert not m.convolved m.low_loss = to_convolve assert m.convolved m.set_parameters_not_free(only_nonlinear=True) with pytest.warns(UserWarning): m.multifit(optimizer='lstsq') np.testing.assert_allclose(l_ref.A.value, l.A.value) np.testing.assert_allclose(l_ref.centre.value, l.centre.value) np.testing.assert_allclose(l_ref.gamma.value, l.gamma.value) np.testing.assert_allclose(m.as_signal().data, s.data) if nav_dim in (1, 2): np.testing.assert_allclose(l.A.map['values'].mean(), l_ref.A.value) np.testing.assert_allclose(l.centre.map['values'].mean(), l_ref.centre.value) np.testing.assert_allclose(l.gamma.map['values'].mean(), l_ref.gamma.value)
def test_background_remove_navigation(self): # Check it calculate the chisq s2 = hs.stack([self.signal] * 2) (s, model) = s2.remove_background(signal_range=(None, None), background_type='Gaussian', fast=True, return_model=True) assert np.allclose(model.chisq.data, np.array([0.0, 0.0])) assert np.allclose(model.as_signal().data, s2.data) assert np.allclose(s.data, np.zeros_like(s.data))
def test_function_nd(self, binned, uniform): self.m.signal.axes_manager[-1].is_binned = binned s = self.m.as_signal() s = hs.stack([s] * 2) o = hs.model.components1D.Offset() o.estimate_parameters(s, None, None, only_current=False) assert o._axes_manager[-1].is_binned == binned axis = s.axes_manager.signal_axes[0] factor = axis.scale if binned else 1 np.testing.assert_allclose(o.function_nd(axis.axis) * factor, s.data)
def test_function_nd(self, binned): self.m.signal.metadata.Signal.binned = binned s = self.m.as_signal() s2 = hs.stack([s] * 2) g = hs.model.components1D.Gaussian() g.estimate_parameters(s2, None, None, only_current=False) assert g.binned == binned axis = s.axes_manager.signal_axes[0] factor = axis.scale if binned else 1 np.testing.assert_allclose(g.function_nd(axis.axis) * factor, s2.data)
def test_get_integrated_intensity_out_signal_axes(self, diffraction_pattern): s = hs.stack([diffraction_pattern] * 3) roi = hs.roi.CircleROI(3, 3, 5) vi = s.get_integrated_intensity(roi, out_signal_axes=(0, 1, 2)) assert vi.axes_manager.signal_dimension == 3 assert vi.axes_manager.navigation_dimension == 0 assert vi.metadata.General.title == "Integrated intensity" assert (vi.metadata.Diffraction.intergrated_range == "CircleROI(cx=3, cy=3, r=5) of Stack of ")
def test_function_nd(self, binned): self.m.signal.metadata.Signal.binned = binned s = self.m.as_signal(show_progressbar=None, parallel=False) s2 = hs.stack([s]*2) g = hs.model.components1D.Gaussian() g.estimate_parameters(s2, None, None, only_current=False) assert g.binned == binned axis = s.axes_manager.signal_axes[0] factor = axis.scale if binned else 1 assert_allclose(g.function_nd(axis.axis) * factor, s2.data)
def test_ma_lazify(): s = hs.signals.BaseSignal( np.ma.masked_array( data=[ 1, 2, 3], mask=[ 0, 1, 0])) l = s.as_lazy() assert np.isnan(l.data[1].compute()) ss = hs.stack([s, s]) assert np.isnan(ss.data[:, 1]).all()
def generate_bad_toy_data(): """ Use a deliberately bad dataset here, as per https://github.com/hyperspy/hyperspy/issues/784, which previously caused a MemoryError when using the Freedman-Diaconis rule. """ ax1 = np.exp(-np.abs(np.arange(-30, 100, 0.05))) s1 = hs.signals.EELSSpectrum(ax1) s1 = hs.stack([s1] * 2) return s1
def test_function_nd(self): s = self.s s1 = self.pattern fp = hs.model.components1D.ScalableFixedPattern(s1) s_multi = hs.stack([s] * 3) m = s_multi.create_model() m.append(fp) fp.yscale.map['values'] = [1.0, 0.5, 1.0] fp.xscale.map['values'] = [1.0, 1.0, 0.75] results = fp.function_nd(s.axes_manager[0].axis) expected = np.array([s1.data * v for v in [1, 0.5, 0.75]]) np.testing.assert_allclose(results, expected)
def _get_virtual_images(self, roi_list, normalize, new_axis_dict, out_signal_axes=None): """ Obtain the intensity scattered at each navigation position in an Diffraction2D Signal by summation over the roi defined by the ``roi_list`` parameter. Parameters ---------- roi_list : list of hyperspy ROI or list of `hyperspy.roi.CircleROI` arguments List of ROI or Arguments required to initialise a CircleROI %s %s Returns ------- virtual_images : VDFImage VDFImage object containing the virtual images """ if isinstance(roi_list[0], hs.roi.CircleROI): self.roi_list = self.roi_list else: self.roi_list = [hs.roi.CircleROI(*r) for r in roi_list] vdfs = [ self.signal.get_integrated_intensity(roi, out_signal_axes) for roi in self.roi_list ] vdfim = hs.stack(vdfs, new_axis_name=new_axis_dict["name"], show_progressbar=False) vdfim.set_signal_type("virtual_dark_field") if vdfim.metadata.has_item("Diffraction.integrated_range"): del vdfim.metadata.Diffraction.integrated_range vdfim.metadata.set_item("Diffraction.roi_list", [f"{roi}" for roi in self.roi_list]) # Set new axis properties new_axis = vdfim.axes_manager[new_axis_dict["name"]] for k, v in new_axis_dict.items(): setattr(new_axis, k, v) if normalize: vdfim.map(normalize_virtual_images, show_progressbar=False) return vdfim
def test_estimate_parameters_unbinned(self): self.m.signal.metadata.Signal.binned = False s = self.m.as_signal(show_progressbar=None, parallel=False) s.metadata.Signal.binned = False g = hs.model.components1D.PowerLaw() g.estimate_parameters(s, None, None, only_current=False) assert_allclose(g.A.value, 10.064378823244837) assert_allclose(g.r.value, 4.0017522876514304) # Test that it all works when calling it with a different signal s2 = hs.stack((s, s)) g.estimate_parameters(s2, None, None, only_current=False) assert_allclose(g.A.map["values"][1], 10.064378823244837) assert_allclose(g.r.map["values"][0], 4.0017522876514304)
def test_model2D_polyexpression(self, nav2d): poly = "a*x**2 + b*x - c*y**2 + d*y + e" P = Expression(poly, 'poly') P.a.value = 6 P.b.value = 5 P.c.value = 4 P.d.value = 3 P.e.value = 2 data = P.function(*self.mesh) s = Signal2D(data) if nav2d: s = hs.stack([s] * 2) s = hs.stack([s] * 3) m = s.create_model() m.append(P) m.fit(optimizer='lstsq') diff = (s - m.as_signal(show_progressbar=False)) np.testing.assert_allclose(diff.data, 0.0, atol=1E-7) np.testing.assert_allclose(m.p_std, 0.0, atol=1E-7)
def get_diffraction_variance(self, dqe, set_data_type=None): """Calculates the variance in scattered intensity as a function of scattering vector. Parameters ---------- dqe : float Detective quantum efficiency of the detector for Poisson noise correction. data_type : numpy data type. For numpy data types, see https://docs.scipy.org/doc/numpy-1.13.0/user/basics.types.html. This is incorporated as squaring the numbers in meansq_dp results in considerably larger than the ones in the original array. This can result in an overflow error that is difficult to distinguish. Hence the data can be converted to a different data type to accommodate. Returns ------- vardps : DiffractionVariance2D A DiffractionVariance2D object containing the mean DP, mean squared DP, and variance DP. """ dp = self.signal mean_dp = dp.mean((0, 1)) if set_data_type is None: meansq_dp = Signal2D(np.square(dp.data)).mean((0, 1)) else: meansq_dp = Signal2D(np.square( dp.data.astype(set_data_type))).mean((0, 1)) normvar = (meansq_dp.data / np.square(mean_dp.data)) - 1. var_dp = Signal2D(normvar) corr_var_array = var_dp.data - (np.divide(dqe, mean_dp.data)) corr_var_array[np.isinf(corr_var_array)] = 0 corr_var_array[np.isnan(corr_var_array)] = 0 corr_var = Signal2D(corr_var_array) vardps = stack((mean_dp, meansq_dp, var_dp, corr_var)) sig_x = vardps.data.shape[1] sig_y = vardps.data.shape[2] dv = DiffractionVariance2D(vardps.data.reshape((2, 2, sig_x, sig_y))) dv = transfer_signal_axes(dv, self.signal) return dv
def get_diffraction_variance(self): """Calculates the variance of associated with each diffraction pixel. Returns ------- ElectronDiffraction A two dimensional signal containing the mean, mean squared, and variance. """ mean = self.mean(axis=self.axes_manager.navigation_axes) square = np.square(self) meansquare = square.mean(axis=square.axes_manager.navigation_axes) variance = meansquare / np.square(mean) - 1 return stack((mean, meansquare, variance))
def gpa_tool_stack(strain_values): def get_interface_image(strain=0.1): _s = gpa.datasets.get_atomic_resolution_interface( size=512, spacing=14, strain=-strain) _s.add_gaussian_noise(100) return _s strain_values = [0.05, 0.075, 0.1] s = hs.stack([get_interface_image(strain) for strain in strain_values], show_progressbar=False) s.set_signal_type('atomic_resolution') gpa_tool = gpa.GeometricalPhaseAnalysisTool(s) gpa_tool.set_fft() return gpa_tool
def test_get_integrated_intensity_stack(self, diffraction_pattern, out_signal_axes): s = hs.stack([diffraction_pattern] * 3) s.axes_manager.navigation_axes[0].name = "x" s.axes_manager.navigation_axes[1].name = "y" roi = hs.roi.CircleROI(3, 3, 5) vi = s.get_integrated_intensity(roi, out_signal_axes) assert vi.axes_manager.signal_dimension == 2 assert vi.axes_manager.navigation_dimension == 1 if out_signal_axes == (1, 2): assert vi.data.shape == (2, 3, 2) assert vi.axes_manager.navigation_size == 2 assert vi.axes_manager.signal_shape == (2, 3) else: assert vi.data.shape == (3, 2, 2) assert vi.axes_manager.navigation_size == 3 assert vi.axes_manager.signal_shape == (2, 2)
def test_estimate_parameters(self, only_current, binned): self.m.signal.metadata.Signal.binned = binned s = self.m.as_signal() assert s.metadata.Signal.binned == binned g = hs.model.components1D.PowerLaw() g.estimate_parameters(s, None, None, only_current=only_current) A_value = 1008.4913 if binned else 1006.4378 r_value = 4.001768 if binned else 4.001752 np.testing.assert_allclose(g.A.value, A_value) np.testing.assert_allclose(g.r.value, r_value) if only_current: A_value, r_value = 0, 0 # Test that it all works when calling it with a different signal s2 = hs.stack((s, s)) g.estimate_parameters(s2, None, None, only_current=only_current) np.testing.assert_allclose(g.A.map["values"][1], A_value) np.testing.assert_allclose(g.r.map["values"][1], r_value)
def test_fit_component(): np.random.seed(0) s = hs.signals.Signal1D(np.random.normal(size=1000, loc=1)).get_histogram() s = hs.stack([s, s]) m = s.create_model() m.extend([hs.model.components1D.Gaussian(), hs.model.components1D.Gaussian()]) g1, g2 = m g1.centre.value = 0 g2.centre.value = 8 fc = ComponentFit(model=m, component=g1) fc.only_current = True fc.gui(**KWARGS) fc.ss_left_value = -2 fc.ss_right_value = 4 fc.only_current = False fc.iterpath = 'serpentine' fc.iterpath = 'flyback'
def test_estimate_parameters(self, only_current, binned): self.m.signal.metadata.Signal.binned = binned s = self.m.as_signal(show_progressbar=None, parallel=False) assert s.metadata.Signal.binned == binned g = hs.model.components1D.PowerLaw() g.estimate_parameters(s, None, None, only_current=only_current) A_value = 1008.4913 if binned else 1006.4378 r_value = 4.001768 if binned else 4.001752 assert_allclose(g.A.value, A_value) assert_allclose(g.r.value, r_value) if only_current: A_value, r_value = 0, 0 # Test that it all works when calling it with a different signal s2 = hs.stack((s, s)) g.estimate_parameters(s2, None, None, only_current=only_current) assert_allclose(g.A.map["values"][1], A_value) assert_allclose(g.r.map["values"][1], r_value)
def test_get_integrated_intensity_stack(self, electron_diffraction1d, out_signal_axes): s = hs.stack([electron_diffraction1d] * 3) s.axes_manager.navigation_axes[0].name = "x" s.axes_manager.navigation_axes[1].name = "y" roi = hs.roi.SpanROI(left=1.0, right=2.0) vi = s.get_integrated_intensity(roi, out_signal_axes) assert vi.axes_manager.signal_dimension == 2 assert vi.axes_manager.navigation_dimension == 1 if out_signal_axes == (1, 2): assert vi.data.shape == (2, 3, 2) assert vi.axes_manager.navigation_size == 2 assert vi.axes_manager.signal_shape == (2, 3) else: assert vi.data.shape == (3, 2, 2) assert vi.axes_manager.navigation_size == 3 assert vi.axes_manager.signal_shape == (2, 2)
def shift_area_eels(eels_s, shifts, crop_scan=True, reset_origin=True): """ Shift an EELS SI signal to have a straight interface, returning a new Hyperspy signal with the same calibration as the original. Only shifts scans in the x-direction (vertical interface). Parameters ---------- eels_s: ~hyperspy.signal.Signal A HAADF STEM image (with signal dimension > 1) loaded into HyperSpy that will be shifted shifts: list list of shifts to use. Can be obtained from `get_shifts_from_area_stem` crop_scan: bool Whether or not the resulting image should be cropped to lose the blank pixels resulting from shifting reset_origin: bool Whether or not to reset the origin of the x-axis to zero after cropping Returns ------- shifted_s: hyperspy.signal.Signal shifted EELS SI signal """ lines = [eels_s.inav[:, i] for i in range(eels_s.data.shape[0])] shifted_lines = shift_lines(lines, shifts, progress_label='Shifting EELS line scans:') # Copy signal so calibration is right shifted_s = eels_s.deepcopy() shifted_s.data = hs.stack(shifted_lines).data shifted_s.metadata.General.title += ' shifted' if crop_scan: shifted_s = crop_area_scan(shifted_s, shifts) if reset_origin: shifted_s.axes_manager[0].offset = 0 return shifted_s
def test_no_function_nd_signal(self): g = hs.model.components2D.Expression( GAUSSIAN2D_EXPR, name="gaussian2d", add_rotation=False, position=("x0", "y0"),) g.sy.value = .1 g.sx.value = 0.5 g.sy.value = 1 g.x0.value = 1 g.y0.value = 1 l = np.arange(0, 3) x, y = np.meshgrid(l, l) s = hs.signals.Signal2D(g.function(x, y)) s2 = hs.stack([s]*2) m = s2.create_model() m.append(g) m.multifit() res = g.function_nd(x, y) assert res.shape == (2, 3, 3) assert_allclose(res, s2.data)
def shift_area_stem(stem_s, stem_list=None, shifts=None, crop_scan=True, do_smoothing=True, reset_origin=True, smoothing_parameter=0.05, ): """ Shift a HAADF STEM signal to have a straight interface, returning a new Hyperspy signal with the same calibration as the original. Only shifts scans in the x-direction (vertical interface). If needed, get the shifts required from the area scan. Parameters ---------- stem_s: ~hyperspy.signal.Signal A HAADF STEM image (with signal dimension > 1) loaded into hyperspy that will be shifted stem_list: list list of stem signals (as output by :py:meth:`get_shifts_from_area_stem`. Providing this will save a little time extracting the scans again, but is honestly not very necessary shifts: list or None list of shifts to use. If None, they will be determined automatically crop_scan: bool Whether or not the resulting image should be cropped to lose the blank pixels resulting from shifting reset_origin: bool Whether or not to reset the origin of the x-axis to zero after cropping do_smoothing: bool Whether to smooth the signal before finding shifts smoothing_parameter: float parameter supplied to :py:meth:`determine_shifts` to define how much to smooth the data while finding the shift values Returns ------- shifted_s: hyperspy.signal.Signal shifted STEM signal """ if shifts is None: stem_list, shifts = \ get_shifts_from_area_stem(stem_s, do_smoothing=do_smoothing, smoothing_parameter=smoothing_parameter) if stem_list is None: stem_list = [stem_s.isig[:, i] for i in range(stem_s.data.shape[0])] shifted_stem_list = shift_lines(stem_list, shifts) # Copy signal so calibration is right shifted_s = stem_s.deepcopy() shifted_s.data = hs.stack(shifted_stem_list).data shifted_s.metadata.General.title += ' shifted' if crop_scan: shifted_s = crop_area_scan(shifted_s, shifts) if reset_origin: shifted_s.axes_manager[0].offset = 0 return shifted_s
def load_shift_and_build_area(c_to_o_stem=None, c_to_o_eels=None, o_to_c_stem=None, o_to_c_eels=None, shifts=None, smoothing_parm=0.05, return_unshifted=False, return_uncropped=False, debug=False): """ Load a number of STEM signals and EELS line scans in order to build useful area scans out of them for decomposition and other analysis If no filenames are supplied, four file chooser dialogs will be opened. The files should be chosen in the order of SiC to SiO2 STEM, SiC to SiO2 EELS, SiO2 to SiC STEM, and then SiO2 to SiC EELS. If there are not reversed line scans to analyze (i.e. the scans were acquired just in one direction), then select them in the appropriate window, and press 'Cancel' on the file selection for the ones that are not relevant. Note: all line scans must be same dimensions, or there will be an error. Parameters ----------- c_to_o_stem: list of str If supplied as keyword arguments, this method will not bring up a dialog in order to get the file names, and just use those that are in the lists instead. This can be useful when combined with :py:meth:`get_scans_and_eels_fnames` so the function can be run multiple times without having to click through all the dialogs. c_to_o_eels: list of str See ``c_to_o_stem`` o_to_c_stem: list of str See ``c_to_o_stem`` o_to_c_eels: list of str See ``c_to_o_stem`` shifts: list of float list of shift amounts to use. Allows one to supply custom shifts for each line, which will be applied to both the EELS and STEM scans If None, the method will try to figure it out itself smoothing_parameter: float or 'ask' This is the parameter passed to :py:meth:`determine_shifts` in order to figure out how much to smooth the STEM signals before doing all the derivative work. Lower values are less smoothing, which will be more accurate, but be more susceptible to noise. Typical values are on the order of [0.03, 0.1], depending on the signal. return_unshifted: bool switch whether or not to return the unshifted data (good for comparison) return_uncropped: bool switch whether or not to return the uncropped data (good for comparison) debug: bool switch whether debugging information is printed out to see the shift values and everything Returns ------- res: tuple the results tuple will have the following signals, in the following order: area_stem: :py:class:`~hyperspy.signal.Signal` Hyperspy signal containing shifted and cropped STEM signals as an image, rather than a list of profiles area_eels: :py:class:`~hyperspy.signal.Signal` Hyperspy signal containing the shifted and cropped EELS line scans as an area scan, rather than a list of single line scans file_list: list List of the files that were processed area_stem_nocrop: :py:class:`~hyperspy.signal.Signal` (Optional) Hyperspy signal containing shifted but not cropped STEM signals as an image, rather than a list of profiles area_eels_nocrop: :py:class:`~hyperspy.signal.Signal` (Optional) Hyperspy signal containing the shifted but not cropped EELS line scans as an area scan, rather than a list of single line scans area_stem_unshifted: :py:class:`~hyperspy.signal.Signal` (Optional) Hyperspy signal containing the unshifted STEM signals as an image, rather than a list of profiles area_eels_unshifted: :py:class:`~hyperspy.signal.Signal` (Optional) Hyperspy signal containing the unshifted EELS line scans as an area scan, rather than a list of single line scans """ def _check_list_equal(iterator): # will return whether all items in list are the same or not return len(set(iterator)) <= 1 # if no EELS scans are provided, get the information from dialog: if c_to_o_eels is None and o_to_c_eels is None: # get files from dialog if not supplied: (c_to_o_stem, c_to_o_eels, o_to_c_stem, o_to_c_eels) = get_scans_and_eels_fnames() # Save filenames in a list for reporting file_list = c_to_o_stem + c_to_o_eels + o_to_c_stem + o_to_c_eels # load in the files from the list of files: c_to_o_scans = [hs.load(x) for x in c_to_o_stem] c_to_o_lines = [hs.load(x) for x in c_to_o_eels] o_to_c_scans = [hs.load(x) for x in o_to_c_stem] o_to_c_lines = [hs.load(x) for x in o_to_c_eels] # flip the data in the OtoC scans and lines: for i in o_to_c_scans: i.data = i.data[::-1] for i in o_to_c_lines: i.data = i.data[::-1] # combine lists to make bigger lists: scans = c_to_o_scans + o_to_c_scans lines = c_to_o_lines + o_to_c_lines scan_sizes = [i.axes_manager.shape for i in scans] scan_scales = [i.axes_manager[0].scale for i in scans] line_sizes = [i.axes_manager.shape for i in lines] line_scales = [i.axes_manager[0].scale for i in lines] # Handle some errors related to scan sizes and magnifications if not _check_list_equal(scan_sizes): print("STEM scans were not all same size.") print("") print("SiC to SiO2 files were:") for i in c_to_o_stem: print(i) print("") print("SiO2 to SiC files were:") for i in o_to_c_stem: print(i) print("") print("Sizes were:") pprint(scan_sizes) raise ValueError("All line scans must be same size for stacking.") if not _check_list_equal(scan_scales): print("STEM scans were not all same scale (different mag?).") print("") print("SiC to SiO2 files were:") for i in c_to_o_stem: print(i) print("") print("SiO2 to SiC files were:") for i in o_to_c_stem: print(i) print("") print("Scales were:") pprint(scan_scales) raise ValueError("All line scans must be same scale for stacking.") if not _check_list_equal(line_sizes): print("EELS line scans were not all same size.") print("") print("SiC to SiO2 files were:") for i in c_to_o_eels: print(i) print("") print("SiO2 to SiC files were:") for i in o_to_c_eels: print(i) print("") print("Sizes were:") pprint(line_sizes) raise ValueError("All line scans must be same size for stacking.") if not _check_list_equal(line_scales): print("EELS line scans were not all same scale (different mag?).") print("") print("SiC to SiO2 files were:") for i in c_to_o_stem: print(i) print("") print("SiO2 to SiC files were:") for i in o_to_c_stem: print(i) print("") print("Scales were:") pprint(line_scales) raise ValueError("All line scans must be same scale for stacking.") # smooth scans: if shifts is None: smoothed_scans = smooth_scans(scans, progress_label="Smoothing STEM signals:", smoothing_parm=smoothing_parm) # do actual shifting and cropping: if shifts is None: shifts = determine_shifts(smoothed_scans, do_smoothing=False, debug=debug) if debug: print("Shifts are:") pprint(list(shifts)) # normalize the intensity of the line scans: normalize_lines(lines, progress_label='Normalizing EELS line scans:') # normalize the intensity of the STEM profiles: normalize_lines(scans, progress_label='Normalizing STEM signals:') # shift EELS line scans shifted_lines = shift_lines(lines, shifts, progress_label='Shifting EELS line scans:') # shift HAADF STEM signals shifted_scans = shift_lines(scans, shifts, progress_label='Shifting STEM signals:') # create area spectrum images from the lines area_eels_nocrop = hs.stack(shifted_lines) area_eels_nocrop.axes_manager[1].name = 'line scan' area_eels_nocrop.axes_manager[1].units = '#' area_stem_nocrop = hs.stack(shifted_scans) area_stem_nocrop.axes_manager[0].name = 'STEM profile' area_stem_nocrop.axes_manager[0].units = '#' # Set appropriate titles for the signals area_eels_nocrop.metadata.General.title = 'Stacked EELS line scans - ' \ 'shifted' area_stem_nocrop.metadata.General.title = 'Stacked STEM signals - shifted' # crop the area spectrum images so there is no blank data area_eels = crop_area_scan(area_eels_nocrop, shifts) area_eels.axes_manager[1].name = 'line scan' area_eels.axes_manager[1].units = '#' area_stem = crop_area_scan(area_stem_nocrop, shifts) area_stem.axes_manager[0].name = 'STEM profile' area_stem.axes_manager[0].units = '#' # Set appropriate titles for the signals area_eels.metadata.General.title = 'Stacked EELS line scans - shifted ' \ 'and cropped' area_stem.metadata.General.title = 'Stacked STEM signals - shifted and ' \ 'cropped' # initialize the results list with the cropped and shifted data and the # list of file names that were analyzed res = [area_stem, area_eels, file_list] # if we want to return the uncropped data, add it to the list if return_uncropped: res.append(area_stem_nocrop) res.append(area_eels_nocrop) # if we want to return the unshifted data, add it to the list if return_unshifted: area_stem_unshifted = hs.stack(scans) area_eels_unshifted = hs.stack(lines) # Set appropriate titles for the signals area_eels_unshifted.metadata.General.title = 'Stacked EELS line scans' area_eels_unshifted.axes_manager[1].name = 'line scan' area_eels_unshifted.axes_manager[1].units = '#' area_stem_unshifted.metadata.General.title = 'Stacked STEM signals' area_stem_unshifted.axes_manager[0].name = 'STEM profile' area_stem_unshifted.axes_manager[0].units = '#' res.append(area_stem_unshifted) res.append(area_eels_unshifted) return res
def test_statistics(parallel, lazy, single_values, fringe_contrast_algorithm): # Parameters measured using Gatan HoloWorks: REF_FRINGE_SPACING = 3.48604 REF_FRINGE_SAMPLING = 3.7902 # Measured using the definition of fringe contrast from the centre of image REF_FRINGE_CONTRAST = 0.0736 RTOL = 1e-5 # 0. Prepare test data and derived statistical parameters ref_carrier_freq = 1. / REF_FRINGE_SAMPLING ref_carrier_freq_nm = 1. / REF_FRINGE_SPACING ref_holo = hs.stack([reference_hologram(), reference_hologram()]) ref_holo = hs.stack([ref_holo, ref_holo, ref_holo]) ht = ref_holo.metadata.Acquisition_instrument.TEM.beam_energy momentum = 2 * constants.m_e * constants.elementary_charge * ht * \ 1000 * (1 + constants.elementary_charge * ht * 1000 / (2 * constants.m_e * constants.c ** 2)) wavelength = constants.h / np.sqrt(momentum) * 1e9 # in nm ref_carrier_freq_mrad = ref_carrier_freq_nm * 1000 * wavelength if lazy: ref_holo.as_lazy() # 1. Test core functionality stats = ref_holo.statistics(high_cf=True, parallel=parallel, single_values=single_values, fringe_contrast_algorithm=fringe_contrast_algorithm) if single_values: # Fringe contrast in experimental conditions can be only an estimate # therefore tolerance is 10%: assert_allclose( stats['Fringe contrast'], REF_FRINGE_CONTRAST, rtol=0.1) assert_allclose( stats['Fringe sampling (px)'], REF_FRINGE_SAMPLING, rtol=RTOL) assert_allclose( stats['Fringe spacing (nm)'], REF_FRINGE_SPACING, rtol=RTOL) assert_allclose( stats['Carrier frequency (1 / nm)'], ref_carrier_freq_nm, rtol=RTOL) assert_allclose( stats['Carrier frequency (1/px)'], ref_carrier_freq, rtol=RTOL) assert_allclose( stats['Carrier frequency (mrad)'], ref_carrier_freq_mrad, rtol=RTOL) else: ref_fringe_contrast_stack = np.repeat( REF_FRINGE_CONTRAST, 6).reshape((3, 2)) ref_fringe_sampling_stack = np.repeat( REF_FRINGE_SAMPLING, 6).reshape((3, 2)) ref_fringe_spacing_stack = np.repeat( REF_FRINGE_SPACING, 6).reshape((3, 2)) ref_carrier_freq_nm_stack = np.repeat( ref_carrier_freq_nm, 6).reshape((3, 2)) ref_carrier_freq_stack = np.repeat(ref_carrier_freq, 6).reshape((3, 2)) ref_carrier_freq_mrad_stack = np.repeat( ref_carrier_freq_mrad, 6).reshape((3, 2)) # Fringe contrast in experimental conditions can be only an estimate # therefore tolerance is 10%: assert_allclose( stats['Fringe contrast'].data, ref_fringe_contrast_stack, rtol=0.1) assert_allclose( stats['Fringe sampling (px)'].data, ref_fringe_sampling_stack, rtol=RTOL) assert_allclose( stats['Fringe spacing (nm)'].data, ref_fringe_spacing_stack, rtol=RTOL) assert_allclose( stats['Carrier frequency (1 / nm)'].data, ref_carrier_freq_nm_stack, rtol=RTOL) assert_allclose( stats['Carrier frequency (1/px)'].data, ref_carrier_freq_stack, rtol=RTOL) assert_allclose( stats['Carrier frequency (mrad)'].data, ref_carrier_freq_mrad_stack, rtol=RTOL) # 2. Test raises: holo_raise = hs.signals.HologramImage(np.random.random(20).reshape((5, 4))) # 2a. Test raise for absent units: with pytest.raises(ValueError): holo_raise.statistics(sb_position=(1, 1)) holo_raise.axes_manager.signal_axes[0].units = 'nm' holo_raise.axes_manager.signal_axes[1].units = 'nm' # 2b. Test raise for absent beam_energy: with pytest.raises(AttributeError): holo_raise.statistics(sb_position=(1, 1)) holo_raise.set_microscope_parameters(beam_energy=300.) # 2c. Test raise for wrong value of `fringe_contrast_algorithm` with pytest.raises(ValueError): holo_raise.statistics( sb_position=( 1, 1), fringe_contrast_algorithm='pure_guess')