Exemple #1
0
def test_function():
    g = Lorentzian()
    g.A.value = 1.5 * np.pi
    g.gamma.value = 1
    g.centre.value = 2
    np.testing.assert_allclose(g.function(2), 1.5)
    np.testing.assert_allclose(g.function(4), 0.3)
Exemple #2
0
    def setup_method(self, method):
        """To test the kramers_kronig_analysis we will generate 3
        EELSSpectrum instances. First a model energy loss function(ELF),
        in our case following the Drude bulk plasmon peak. Second, we
        simulate the inelastic scattering to generate a model scattering
        distribution (SPC). Finally, we use a lorentzian peak with
        integral equal to 1 to simulate a ZLP.

        """

        # Parameters
        i0 = 1.
        t = hs.signals.BaseSignal(np.arange(10, 70, 10).reshape((2, 3)))
        t = t.transpose(signal_axes=0)
        scale = 0.02

        # Create an 3x2x2048 spectrum with Drude plasmon
        s = hs.signals.EELSSpectrum(np.zeros((2, 3, 2 * 2048)))
        s.set_microscope_parameters(
            beam_energy=300.0,
            convergence_angle=5,
            collection_angle=10.0)
        s.axes_manager.signal_axes[0].scale = scale
        k = eels_constant(s, i0, t)

        vpm = VolumePlasmonDrude()
        m = s.create_model(auto_background=False)
        m.append(vpm)
        vpm.intensity.map['values'][:] = 1
        vpm.plasmon_energy.map['values'] = np.array([[8., 18.4, 15.8],
                                                     [16.6, 4.3, 3.7]])
        vpm.fwhm.map['values'] = np.array([[2.3, 4.8, 0.53],
                                           [3.7, 0.3, 0.3]])
        vpm.intensity.map['is_set'][:] = True
        vpm.plasmon_energy.map['is_set'][:] = True
        vpm.fwhm.map['is_set'][:] = True
        s.data = (m.as_signal() * k).data

        # Create ZLP
        z = s.deepcopy()
        z.axes_manager.signal_axes[0].scale = scale
        z.axes_manager.signal_axes[0].offset = -10
        zlp = Lorentzian()
        zlp.A.value = i0
        zlp.gamma.value = 0.2
        zlp.centre.value = 0.0
        z.data[:] = zlp.function(z.axes_manager[-1].axis).reshape((1, 1, -1))
        z.data *= scale
        self.s = s
        self.thickness = t
        self.k = k
        self.zlp = z
    def setUp(self):
        """To test the kramers_kronig_analysis we will generate 3
        EELSSpectrum instances. First a model energy loss function(ELF),
        in our case following the Drude bulk plasmon peak. Second, we
        simulate the inelastic scattering to generate a model scattering
        distribution (SPC). Finally, we use a lorentzian peak with
        integral equal to 1 to simulate a ZLP.

        """

        # Parameters
        i0 = 1.
        t = hs.signals.BaseSignal(np.arange(10, 70, 10).reshape((2, 3)))
        t = t.transpose(signal_axes=0)
        scale = 0.02

        # Create an 3x2x2048 spectrum with Drude plasmon
        s = hs.signals.EELSSpectrum(np.zeros((2, 3, 2 * 2048)))
        s.set_microscope_parameters(
            beam_energy=300.0,
            convergence_angle=5,
            collection_angle=10.0)
        s.axes_manager.signal_axes[0].scale = scale
        k = eels_constant(s, i0, t)

        vpm = VolumePlasmonDrude()
        m = s.create_model(auto_background=False)
        m.append(vpm)
        vpm.intensity.map['values'][:] = 1
        vpm.plasmon_energy.map['values'] = np.array([[8., 18.4, 15.8],
                                                     [16.6, 4.3, 3.7]])
        vpm.fwhm.map['values'] = np.array([[2.3, 4.8, 0.53],
                                           [3.7, 0.3, 0.3]])
        vpm.intensity.map['is_set'][:] = True
        vpm.plasmon_energy.map['is_set'][:] = True
        vpm.fwhm.map['is_set'][:] = True
        s.data = (m.as_signal(show_progressbar=None) * k).data

        # Create ZLP
        z = s.deepcopy()
        z.axes_manager.signal_axes[0].scale = scale
        z.axes_manager.signal_axes[0].offset = -10
        zlp = Lorentzian()
        zlp.A.value = i0
        zlp.gamma.value = 0.2
        zlp.centre.value = 0.0
        z.data[:] = zlp.function(z.axes_manager[-1].axis).reshape((1, 1, -1))
        z.data *= scale
        self.s = s
        self.thickness = t
        self.k = k
        self.zlp = z
 def setup_method(self, method):
     s = Signal1D(range(10))
     m1 = s.create_model()
     m2 = s.create_model()
     m1.append(Gaussian())
     m2.append(Lorentzian())
     m1.fit()
     m2.fit()
     self.m1 = m1
     self.m2 = m2
 def setup_method(self, method):
     m = Signal1D(np.arange(30).reshape((3, 10))).create_model()
     m.append(Lorentzian())
     m.multifit(show_progressbar=False)
     self.m = m
     # have to be imported here, as otherwise crashes nosetools
     from hyperspy.samfire_utils.goodness_of_fit_tests.information_theory \
         import (AIC_test, AICc_test, BIC_test)
     self.aic = AIC_test(0.)
     self.aicc = AICc_test(0.)
     self.bic = BIC_test(0.)
 def setup_method(self, method):
     m = Signal1D(np.arange(30).reshape((3, 10))).create_model()
     m.append(Lorentzian())
     # HyperSpy 2.0: remove setting iterpath='serpentine'
     m.multifit(iterpath='serpentine')
     self.m = m
     # have to be imported here, as otherwise crashes nosetools
     from hyperspy.samfire_utils.goodness_of_fit_tests.information_theory \
         import (AIC_test, AICc_test, BIC_test)
     self.aic = AIC_test(0.)
     self.aicc = AICc_test(0.)
     self.bic = BIC_test(0.)
Exemple #7
0
def test_estimate_parameters_binned(only_current, binned, lazy, uniform):
    s = Signal1D(np.empty((250, )))
    s.axes_manager.signal_axes[0].is_binned = binned
    axis = s.axes_manager.signal_axes[0]
    axis.scale = .2
    axis.offset = -15
    g1 = Lorentzian(52342, 2, 10)
    s.data = g1.function(axis.axis)
    if not uniform:
        axis.convert_to_non_uniform_axis()
    if lazy:
        s = s.as_lazy()
    g2 = Lorentzian()
    if binned and uniform:
        factor = axis.scale
    elif binned:
        factor = np.gradient(axis.axis)
    else:
        factor = 1
    assert g2.estimate_parameters(s,
                                  axis.low_value,
                                  axis.high_value,
                                  only_current=only_current)
    assert g2._axes_manager[-1].is_binned == binned
    np.testing.assert_allclose(g1.A.value, g2.A.value * factor, 0.1)
    assert abs(g2.centre.value - g1.centre.value) <= 0.2
    assert abs(g2.gamma.value - g1.gamma.value) <= 0.1
Exemple #8
0
def test_function_nd(binned, lazy):
    s = Signal1D(np.empty((250,)))
    axis = s.axes_manager.signal_axes[0]
    axis.scale = .2
    axis.offset = -15
    g1 = Lorentzian(52342, 2, 10)
    s.data = g1.function(axis.axis)
    s.metadata.Signal.binned = binned
    s2 = stack([s] * 2)
    if lazy:
        s2 = s2.as_lazy()
    g2 = Lorentzian()
    factor = axis.scale if binned else 1
    g2.estimate_parameters(s2, axis.low_value, axis.high_value, False)
    assert g2.binned == binned
    np.testing.assert_allclose(g2.function_nd(axis.axis) * factor, s2.data,0.16)
    def test_load_dictionary(self):
        d = self.model.as_dictionary()
        mn = self.s.create_model()
        mn.append(Lorentzian())
        mn._load_dictionary(d)
        mo = self.model

        # assert_true(np.allclose(mo.signal1D.data, mn.signal1D.data))
        np.testing.assert_allclose(mo.chisq.data, mn.chisq.data)
        np.testing.assert_allclose(mo.dof.data, mn.dof.data)

        np.testing.assert_allclose(mn.low_loss.data, mo.low_loss.data)

        np.testing.assert_equal(mn.free_parameters_boundaries,
                                mo.free_parameters_boundaries)
        assert mn.convolved is mo.convolved
        for i in range(len(mn)):
            assert mn[i]._id_name == mo[i]._id_name
            for po, pn in zip(mo[i].parameters, mn[i].parameters):
                np.testing.assert_allclose(po.map['values'], pn.map['values'])
                np.testing.assert_allclose(po.map['is_set'], pn.map['is_set'])

        assert mn[0].A.twin is mn[1].A
Exemple #10
0
def test_estimate_parameters_binned(only_current, binned, lazy):
    s = Signal1D(np.empty((250,)))
    s.metadata.Signal.binned = binned
    axis = s.axes_manager.signal_axes[0]
    axis.scale = .2
    axis.offset = -15
    g1 = Lorentzian(52342, 2, 10)
    s.data = g1.function(axis.axis)
    if lazy:
        s = s.as_lazy()
    g2 = Lorentzian()
    factor = axis.scale if binned else 1
    assert g2.estimate_parameters(s, axis.low_value, axis.high_value,
                                  only_current=only_current)
    assert g2.binned == binned
    np.testing.assert_allclose(g1.A.value, g2.A.value * factor,0.1)
    assert abs(g2.centre.value - g1.centre.value) <= 0.2
    assert abs(g2.gamma.value - g1.gamma.value) <= 0.1
Exemple #11
0
def test_util_fwhm_set():
    g1 = Lorentzian()
    g1.fwhm = 1.0
    np.testing.assert_allclose(g1.gamma.value, 0.5)
Exemple #12
0
def test_util_gamma_getset():
    g1 = Lorentzian()
    g1.gamma.value = 3.0
    np.testing.assert_allclose(g1.gamma.value, 3.0)
Exemple #13
0
def test_util_height_getset():
    g1 = Lorentzian()
    g1.height = 4.0
    np.testing.assert_allclose(g1.height, 4.0)
Exemple #14
0
def test_util_height_get():
    g1 = Lorentzian()
    g1.gamma.value = 3.0
    g1.A.value = np.pi*1.5
    np.testing.assert_allclose(g1.height, 0.5)
Exemple #15
0
def test_util_fwhm_get():
    g1 = Lorentzian()
    g1.gamma.value = 2.0
    np.testing.assert_allclose(g1.fwhm, 4.0)
Exemple #16
0
def generate_test_model():

    # import hyperspy.api as hs
    from hyperspy.signals import Signal1D
    from hyperspy.components1d import (Gaussian, Lorentzian)
    import numpy as np
    from scipy.ndimage import gaussian_filter
    total = None
# blurs = [0., 0.5, 1., 2.,5.]
    blurs = [1.5]
    radius = 5
    domain = 15
# do circle/domain
    cent = (domain // 2, domain // 2)
    y, x = np.ogrid[-cent[0]:domain - cent[0], -cent[1]:domain - cent[1]]
    mask = x * x + y * y <= radius * radius
    lor_map = None
    for blur in blurs:

        s = Signal1D(np.ones((domain, domain, 1024)))
        cent = tuple([int(0.5 * i) for i in s.data.shape[:-1]])
        m0 = s.create_model()

        gs01 = Lorentzian()
        m0.append(gs01)
        gs01.gamma.map['values'][:] = 50
        gs01.gamma.map['is_set'][:] = True
        gs01.centre.map['values'][:] = 300
        gs01.centre.map['values'][mask] = 400
        gs01.centre.map['values'] = gaussian_filter(
            gs01.centre.map['values'],
            blur)
        gs01.centre.map['is_set'][:] = True
        gs01.A.map['values'][:] = 100 * \
            np.random.random((domain, domain)) + 300000
        gs01.A.map['values'][mask] *= 0.75
        gs01.A.map['values'] = gaussian_filter(gs01.A.map['values'], blur)
        gs01.A.map['is_set'][:] = True

        gs02 = Gaussian()
        m0.append(gs02)
        gs02.sigma.map['values'][:] = 15
        gs02.sigma.map['is_set'][:] = True
        gs02.centre.map['values'][:] = 400
        gs02.centre.map['values'][mask] = 300
        gs02.centre.map['values'] = gaussian_filter(
            gs02.centre.map['values'],
            blur)
        gs02.centre.map['is_set'][:] = True
        gs02.A.map['values'][:] = 50000
        gs02.A.map['is_set'][:] = True

        gs03 = Lorentzian()
        m0.append(gs03)
        gs03.gamma.map['values'][:] = 20
        gs03.gamma.map['is_set'][:] = True
        gs03.centre.map['values'][:] = 100
        gs03.centre.map['values'][mask] = 900
        gs03.centre.map['is_set'][:] = True
        gs03.A.map['values'][:] = 100 * \
            np.random.random((domain, domain)) + 50000
        gs03.A.map['values'][mask] *= 0.
        gs03.A.map['is_set'][:] = True

        s11 = m0.as_signal(show_progressbar=False)
        if total is None:
            total = s11.data.copy()
            lor_map = gs01.centre.map['values'].copy()
        else:
            total = np.concatenate((total, s11.data), axis=1)
            lor_map = np.concatenate(
                (lor_map, gs01.centre.map['values'].copy()), axis=1)

    s = Signal1D(total)
    s.add_poissonian_noise()
    s.data += 0.1
    s.estimate_poissonian_noise_variance()

    m = s.inav[:, :7].create_model()
    g = Gaussian()
    l1 = Lorentzian()
    l2 = Lorentzian()
    g.sigma.value = 50
    g.centre.value = 400
    g.A.value = 50000
    l1.gamma.value = 40
    l1.centre.value = 300
    l1.A.value = 300000
    l2.gamma.value = 15
    l2.centre.value = 100
    l2.A.value = 50000
    l2.centre.bmin = 0
    l2.centre.bmax = 200
    l2.A.bmin = 30000
    l2.A.bmax = 100000
    l2.gamma.bmin = 0
    l2.gamma.bmax = 60
    m.extend([g, l1, l2])
    m.assign_current_values_to_all()
    l2.active_is_multidimensional = True
    return m, gs01, gs02, gs03
Exemple #17
0
def generate_test_model():

    # import hyperspy.api as hs
    from hyperspy.signals import Signal1D
    from hyperspy.components1d import (Gaussian, Lorentzian)
    import numpy as np
    from scipy.ndimage import gaussian_filter
    total = None
    # blurs = [0., 0.5, 1., 2.,5.]
    blurs = [1.5]
    radius = 5
    domain = 15
    # do circle/domain
    cent = (domain // 2, domain // 2)
    y, x = np.ogrid[-cent[0]:domain - cent[0], -cent[1]:domain - cent[1]]
    mask = x * x + y * y <= radius * radius
    lor_map = None
    for blur in blurs:

        s = Signal1D(np.ones((domain, domain, 1024)))
        cent = tuple([int(0.5 * i) for i in s.data.shape[:-1]])
        m0 = s.create_model()

        gs01 = Lorentzian()
        m0.append(gs01)
        gs01.gamma.map['values'][:] = 50
        gs01.gamma.map['is_set'][:] = True
        gs01.centre.map['values'][:] = 300
        gs01.centre.map['values'][mask] = 400
        gs01.centre.map['values'] = gaussian_filter(gs01.centre.map['values'],
                                                    blur)
        gs01.centre.map['is_set'][:] = True
        gs01.A.map['values'][:] = 100 * \
            np.random.random((domain, domain)) + 300000
        gs01.A.map['values'][mask] *= 0.75
        gs01.A.map['values'] = gaussian_filter(gs01.A.map['values'], blur)
        gs01.A.map['is_set'][:] = True

        gs02 = Gaussian()
        m0.append(gs02)
        gs02.sigma.map['values'][:] = 15
        gs02.sigma.map['is_set'][:] = True
        gs02.centre.map['values'][:] = 400
        gs02.centre.map['values'][mask] = 300
        gs02.centre.map['values'] = gaussian_filter(gs02.centre.map['values'],
                                                    blur)
        gs02.centre.map['is_set'][:] = True
        gs02.A.map['values'][:] = 50000
        gs02.A.map['is_set'][:] = True

        gs03 = Lorentzian()
        m0.append(gs03)
        gs03.gamma.map['values'][:] = 20
        gs03.gamma.map['is_set'][:] = True
        gs03.centre.map['values'][:] = 100
        gs03.centre.map['values'][mask] = 900
        gs03.centre.map['is_set'][:] = True
        gs03.A.map['values'][:] = 100 * \
            np.random.random((domain, domain)) + 50000
        gs03.A.map['values'][mask] *= 0.
        gs03.A.map['is_set'][:] = True

        s11 = m0.as_signal(show_progressbar=False)
        if total is None:
            total = s11.data.copy()
            lor_map = gs01.centre.map['values'].copy()
        else:
            total = np.concatenate((total, s11.data), axis=1)
            lor_map = np.concatenate(
                (lor_map, gs01.centre.map['values'].copy()), axis=1)

    s = Signal1D(total)
    s.add_poissonian_noise()
    s.data += 0.1
    s.estimate_poissonian_noise_variance()

    m = s.inav[:, :7].create_model()
    g = Gaussian()
    l1 = Lorentzian()
    l2 = Lorentzian()
    g.sigma.value = 50
    g.centre.value = 400
    g.A.value = 50000
    l1.gamma.value = 40
    l1.centre.value = 300
    l1.A.value = 300000
    l2.gamma.value = 15
    l2.centre.value = 100
    l2.A.value = 50000
    l2.centre.bmin = 0
    l2.centre.bmax = 200
    l2.A.bmin = 30000
    l2.A.bmax = 100000
    l2.gamma.bmin = 0
    l2.gamma.bmax = 60
    m.extend([g, l1, l2])
    m.assign_current_values_to_all()
    l2.active_is_multidimensional = True
    return m, gs01, gs02, gs03
Exemple #18
0
def test_util_fwhm_getset():
    g1 = Lorentzian()
    g1.fwhm = 4.0
    np.testing.assert_allclose(g1.fwhm, 4.0)
Exemple #19
0
def test_util_height_set():
    g1 = Lorentzian()
    g1.gamma.value = 4.0
    g1.height = 2.0/np.pi
    np.testing.assert_allclose(g1.A.value, 8)