Exemplo n.º 1
0
def test_numdifftools_calc_covar_false():
    pytest.importorskip("numdifftools")
    # load data to be fitted
    data = np.loadtxt(os.path.join(os.path.dirname(__file__), '..', 'examples',
                                   'test_peak.dat'))
    x = data[:, 0]
    y = data[:, 1]

    # define the model and initialize parameters
    mod = VoigtModel()
    params = mod.guess(y, x=x)
    params['sigma'].set(min=-np.inf)

    # do fit, with leastsq and nelder
    result = mod.fit(y, params, x=x, method='leastsq')
    result_ndt = mod.fit(y, params, x=x, method='nelder', calc_covar=False)

    # assert that fit converged to the same result
    vals = [result.params[p].value for p in result.params.valuesdict()]
    vals_ndt = [result_ndt.params[p].value for p in result_ndt.params.valuesdict()]
    assert_allclose(vals_ndt, vals, rtol=5e-3)
    assert_allclose(result_ndt.chisqr, result.chisqr)

    assert result_ndt.covar is None
    assert result_ndt.errorbars is False
Exemplo n.º 2
0
def test_numdifftools_calc_covar_false():
    pytest.importorskip("numdifftools")
    # load data to be fitted
    data = np.loadtxt(
        os.path.join(os.path.dirname(__file__), '..', 'examples',
                     'test_peak.dat'))
    x = data[:, 0]
    y = data[:, 1]

    # define the model and initialize parameters
    mod = VoigtModel()
    params = mod.guess(y, x=x)
    params['sigma'].set(min=-np.inf)

    # do fit, with leastsq and nelder
    result = mod.fit(y, params, x=x, method='leastsq')
    result_ndt = mod.fit(y, params, x=x, method='nelder', calc_covar=False)

    # assert that fit converged to the same result
    vals = [result.params[p].value for p in result.params.valuesdict()]
    vals_ndt = [
        result_ndt.params[p].value for p in result_ndt.params.valuesdict()
    ]
    assert_allclose(vals_ndt, vals, rtol=5e-3)
    assert_allclose(result_ndt.chisqr, result.chisqr)

    assert result_ndt.covar is None
    assert result_ndt.errorbars is False
Exemplo n.º 3
0
def test_numdifftools_with_bounds(fit_method):
    pytest.importorskip("numdifftools")
    if fit_method in ['shgo', 'dual_annealing']:
        pytest.importorskip("scipy", minversion="1.2")

    # load data to be fitted
    data = np.loadtxt(
        os.path.join(os.path.dirname(__file__), '..', 'examples',
                     'test_peak.dat'))
    x = data[:, 0]
    y = data[:, 1]

    # define the model and initialize parameters
    mod = VoigtModel()
    params = mod.guess(y, x=x)
    params['amplitude'].set(min=25, max=70)
    params['sigma'].set(max=1)
    params['center'].set(min=5, max=15)

    # do fit, here with leastsq model
    result = mod.fit(y, params, x=x, method='leastsq')

    result_ndt = mod.fit(y, params, x=x, method=fit_method)

    # assert that fit converged to the same result
    vals = [result.params[p].value for p in result.params.valuesdict()]
    vals_ndt = [
        result_ndt.params[p].value for p in result_ndt.params.valuesdict()
    ]
    assert_allclose(vals_ndt, vals, rtol=0.1)
    assert_allclose(result_ndt.chisqr, result.chisqr, rtol=1e-5)

    # assert that parameter uncertaintes from leastsq and calculated from
    # the covariance matrix using numdifftools are very similar
    stderr = [result.params[p].stderr for p in result.params.valuesdict()]
    stderr_ndt = [
        result_ndt.params[p].stderr for p in result_ndt.params.valuesdict()
    ]

    perr = np.array(stderr) / np.array(vals)
    perr_ndt = np.array(stderr_ndt) / np.array(vals_ndt)
    assert_almost_equal(perr_ndt, perr, decimal=3)

    # assert that parameter correlatations from leastsq and calculated from
    # the covariance matrix using numdifftools are very similar
    for par1 in result.var_names:
        cor = [
            result.params[par1].correl[par2]
            for par2 in result.params[par1].correl.keys()
        ]
        cor_ndt = [
            result_ndt.params[par1].correl[par2]
            for par2 in result_ndt.params[par1].correl.keys()
        ]
        assert_almost_equal(cor_ndt, cor, decimal=2)
Exemplo n.º 4
0
def test_least_squares_solver_options(peakdata, capsys):
    """Test least_squares algorithm, pass options to solver."""
    x = peakdata[0]
    y = peakdata[1]
    mod = VoigtModel()
    params = mod.guess(y, x=x)
    solver_kws = {'verbose': 2}
    mod.fit(y, params, x=x, method='least_squares', fit_kws=solver_kws)
    captured = capsys.readouterr()

    assert 'Iteration' in captured.out
    assert 'final cost' in captured.out
Exemplo n.º 5
0
def test_least_squares_solver_options(peakdata, capsys):
    """Test least_squares algorithm, pass options to solver."""
    x = peakdata[0]
    y = peakdata[1]
    mod = VoigtModel()
    params = mod.guess(y, x=x)
    solver_kws = {'verbose': 2}
    mod.fit(y, params, x=x, method='least_squares', fit_kws=solver_kws)
    captured = capsys.readouterr()

    assert 'Iteration' in captured.out
    assert 'final cost' in captured.out
Exemplo n.º 6
0
def test_numdifftools_no_bounds():
    numdifftools = pytest.importorskip("numdifftools")
    # load data to be fitted
    data = np.loadtxt(
        os.path.join(os.path.dirname(__file__), '..', 'examples',
                     'test_peak.dat'))
    x = data[:, 0]
    y = data[:, 1]

    # define the model and initialize parameters
    mod = VoigtModel()
    params = mod.guess(y, x=x)
    params['sigma'].set(min=-np.inf)

    # do fit, here with leastsq model
    result = mod.fit(y, params, x=x, method='leastsq')

    for fit_method in ['nelder', 'basinhopping', 'ampgo']:
        result_ndt = mod.fit(y, params, x=x, method=fit_method)

        # assert that fit converged to the same result
        vals = [result.params[p].value for p in result.params.valuesdict()]
        vals_ndt = [
            result_ndt.params[p].value for p in result_ndt.params.valuesdict()
        ]
        assert_allclose(vals_ndt, vals, rtol=5e-3)
        assert_allclose(result_ndt.chisqr, result.chisqr)

        # assert that parameter uncertaintes from leastsq and calculated from
        # the covariance matrix using numdifftools are very similar
        stderr = [result.params[p].stderr for p in result.params.valuesdict()]
        stderr_ndt = [
            result_ndt.params[p].stderr
            for p in result_ndt.params.valuesdict()
        ]

        perr = np.array(stderr) / np.array(vals)
        perr_ndt = np.array(stderr_ndt) / np.array(vals_ndt)
        assert_almost_equal(perr_ndt, perr, decimal=4)

        # assert that parameter correlatations from leastsq and calculated from
        # the covariance matrix using numdifftools are very similar
        for par1 in result.var_names:
            cor = [
                result.params[par1].correl[par2]
                for par2 in result.params[par1].correl.keys()
            ]
            cor_ndt = [
                result_ndt.params[par1].correl[par2]
                for par2 in result_ndt.params[par1].correl.keys()
            ]
            assert_almost_equal(cor_ndt, cor, decimal=2)
Exemplo n.º 7
0
def test_least_squares_cov_x(peakdata, bounds):
    """Test calculation of cov. matrix from Jacobian, with/without bounds."""
    x = peakdata[0]
    y = peakdata[1]

    # define the model and initialize parameters
    mod = VoigtModel()
    params = mod.guess(y, x=x)

    if bounds:
        params['amplitude'].set(min=25, max=70)
        params['sigma'].set(min=0, max=1)
        params['center'].set(min=5, max=15)
    else:
        params['sigma'].set(min=-np.inf)

    # do fit with least_squares and leastsq algorithm
    result = mod.fit(y, params, x=x, method='least_squares')
    result_lsq = mod.fit(y, params, x=x, method='leastsq')

    # assert that fit converged to the same result
    vals = [result.params[p].value for p in result.params.valuesdict()]
    vals_lsq = [
        result_lsq.params[p].value for p in result_lsq.params.valuesdict()
    ]
    assert_allclose(vals, vals_lsq, rtol=1e-5)
    assert_allclose(result.chisqr, result_lsq.chisqr)

    # assert that parameter uncertaintes obtained from the leastsq method and
    # those from the covariance matrix estimated from the Jacbian matrix in
    # least_squares are similar
    stderr = [result.params[p].stderr for p in result.params.valuesdict()]
    stderr_lsq = [
        result_lsq.params[p].stderr for p in result_lsq.params.valuesdict()
    ]
    assert_allclose(stderr, stderr_lsq, rtol=1e-4)

    # assert that parameter correlations obtained from the leastsq method and
    # those from the covariance matrix estimated from the Jacbian matrix in
    # least_squares are similar
    for par1 in result.var_names:
        cor = [
            result.params[par1].correl[par2]
            for par2 in result.params[par1].correl.keys()
        ]
        cor_lsq = [
            result_lsq.params[par1].correl[par2]
            for par2 in result_lsq.params[par1].correl.keys()
        ]
        assert_allclose(cor, cor_lsq, rtol=1e-2)
Exemplo n.º 8
0
def test_cov_x_with_bounds():
    # load data to be fitted
    data = np.loadtxt(
        os.path.join(os.path.dirname(__file__), '..', 'examples',
                     'test_peak.dat'))
    x = data[:, 0]
    y = data[:, 1]

    # define the model and initialize parameters
    mod = VoigtModel()
    params = mod.guess(y, x=x)
    params['amplitude'].set(min=25, max=70)
    params['sigma'].set(min=0, max=1)
    params['center'].set(min=5, max=15)

    # do fit, here with leastsq model
    result = mod.fit(y, params, x=x, method='least_squares')
    result_lsq = mod.fit(y, params, x=x, method='leastsq')

    # assert that fit converged to the same result
    vals = [result.params[p].value for p in result.params.valuesdict()]
    vals_lsq = [
        result_lsq.params[p].value for p in result_lsq.params.valuesdict()
    ]
    assert_allclose(vals_lsq, vals, rtol=1e-5)
    assert_allclose(result_lsq.chisqr, result.chisqr)

    # assert that parameter uncertaintes obtained from the leastsq method and
    # those from the covariance matrix estimated from the Jacbian matrix in
    # least_squares are similar
    stderr = [result.params[p].stderr for p in result.params.valuesdict()]
    stderr_lsq = [
        result_lsq.params[p].stderr for p in result_lsq.params.valuesdict()
    ]
    assert_almost_equal(stderr_lsq, stderr, decimal=6)

    # assert that parameter correlations obtained from the leastsq method and
    # those from the covariance matrix estimated from the Jacbian matrix in
    # least_squares are similar
    for par1 in result.var_names:
        cor = [
            result.params[par1].correl[par2]
            for par2 in result.params[par1].correl.keys()
        ]
        cor_lsq = [
            result_lsq.params[par1].correl[par2]
            for par2 in result_lsq.params[par1].correl.keys()
        ]
        assert_almost_equal(cor_lsq, cor, decimal=6)
Exemplo n.º 9
0
def test_saveload_usersyms():
    """Test save/load of modelresult with non-trivial user symbols,
    this example uses a VoigtModel, wheree `wofz()` is used in a
    constraint expression"""
    x = np.linspace(0, 20, 501)
    y = gaussian(x, 1.1, 8.5, 2) + lorentzian(x, 1.7, 8.5, 1.5)
    np.random.seed(20)
    y = y + np.random.normal(size=len(x), scale=0.025)

    model = VoigtModel()
    pars = model.guess(y, x=x)
    result = model.fit(y, pars, x=x)

    savefile = 'tmpvoigt_modelresult.sav'
    save_modelresult(result, savefile)

    assert_param_between(result.params['sigma'], 0.7, 2.1)
    assert_param_between(result.params['center'], 8.4, 8.6)
    assert_param_between(result.params['height'], 0.2, 1.0)

    time.sleep(0.25)
    result2 = load_modelresult(savefile)

    assert_param_between(result2.params['sigma'], 0.7, 2.1)
    assert_param_between(result2.params['center'], 8.4, 8.6)
    assert_param_between(result2.params['height'], 0.2, 1.0)
Exemplo n.º 10
0
def test_bounds_expression():
    # load data to be fitted
    data = np.loadtxt(os.path.join(os.path.dirname(__file__), '..', 'examples',
                                   'test_peak.dat'))
    x = data[:, 0]
    y = data[:, 1]

    # define the model and initialize parameters
    mod = VoigtModel()
    params = mod.guess(y, x=x)
    params['amplitude'].set(min=0, max=100)
    params['center'].set(min=5, max=10)

    # do fit, here with leastsq model
    result = mod.fit(y, params, x=x)

    # assert that stderr and correlations are correct [cf. lmfit v0.9.10]
    assert_almost_equal(result.params['sigma'].stderr, 0.00368468, decimal=6)
    assert_almost_equal(result.params['center'].stderr, 0.00505496, decimal=6)
    assert_almost_equal(result.params['amplitude'].stderr, 0.13861506,
                        decimal=6)
    assert_almost_equal(result.params['gamma'].stderr, 0.00368468, decimal=6)
    assert_almost_equal(result.params['fwhm'].stderr, 0.00806917, decimal=6)
    assert_almost_equal(result.params['height'].stderr, 0.03009459, decimal=6)

    assert_almost_equal(result.params['sigma'].correl['center'],
                        -4.6623973788006615e-05, decimal=6)
    assert_almost_equal(result.params['sigma'].correl['amplitude'],
                        0.651304091954038, decimal=6)
    assert_almost_equal(result.params['center'].correl['amplitude'],
                        -4.390334984618851e-05, decimal=6)
Exemplo n.º 11
0
    def voigt_response(self, sigma=None, gamma=None, weights=True):
        '''
        Fit the background with a Voigt profile to determine the response
        of the spectrometer

        If you have a good, clear signal, set sigma and gamma to None (done by default)

        If your signal is poor, set sigma and gamma using a fit to a good signal, and then
        only the position of the central wavelength will be altered.
        '''
        vm = VoigtModel()
        par_v = vm.guess(self.bkgd, x=self.lamb)
        par_v['center'].set(value=532e-9, vary=True)
        if sigma is not None:  #if a width is provided, fix it.
            par_v['sigma'].set(value=sigma, vary=False)
        if gamma is not None:  #if a width is provided, fix it.
            par_v['gamma'].set(value=gamma, vary=False, expr='')
        elif gamma is None:  #vary gamma for better fit - this is not done by default
            par_v['gamma'].set(value=par_v['sigma'].value, vary=True, expr='')

        ##Fit the Voigt Model to the data
        if weights is True:
            weights = self.bkgd / self.bkgd_err
        if weights is False:
            weights = np.ones_like(self.bkgd)
        self.vm_fit = vm.fit(self.bkgd, par_v, x=self.lamb, weights=weights)
        self.l0 = self.vm_fit.best_values['center']
        self.sigma = self.vm_fit.best_values['sigma']
Exemplo n.º 12
0
def test_bounds_expression():
    # load data to be fitted
    data = np.loadtxt(os.path.join(os.path.dirname(__file__), '..', 'examples',
                                   'test_peak.dat'))
    x = data[:, 0]
    y = data[:, 1]

    # define the model and initialize parameters
    mod = VoigtModel()
    params = mod.guess(y, x=x)
    params['amplitude'].set(min=0, max=100)
    params['center'].set(min=5, max=10)

    # do fit, here with leastsq model
    result = mod.fit(y, params, x=x)

    # assert that stderr and correlations are correct [cf. lmfit v0.9.10]
    assert_almost_equal(result.params['sigma'].stderr, 0.00368468, decimal=6)
    assert_almost_equal(result.params['center'].stderr, 0.00505496, decimal=6)
    assert_almost_equal(result.params['amplitude'].stderr, 0.13861506,
                        decimal=6)
    assert_almost_equal(result.params['gamma'].stderr, 0.00368468, decimal=6)
    assert_almost_equal(result.params['fwhm'].stderr, 0.00806917, decimal=6)
    assert_almost_equal(result.params['height'].stderr, 0.03009459, decimal=6)

    assert_almost_equal(result.params['sigma'].correl['center'],
                        -4.6623973788006615e-05, decimal=6)
    assert_almost_equal(result.params['sigma'].correl['amplitude'],
                        0.651304091954038, decimal=6)
    assert_almost_equal(result.params['center'].correl['amplitude'],
                        -4.390334984618851e-05, decimal=6)
Exemplo n.º 13
0
def test_numdifftools_with_bounds(fit_method):
    pytest.importorskip("numdifftools")
    if fit_method == 'shgo':
        pytest.importorskip("scipy", minversion="1.2")

    # load data to be fitted
    data = np.loadtxt(os.path.join(os.path.dirname(__file__), '..', 'examples',
                                   'test_peak.dat'))
    x = data[:, 0]
    y = data[:, 1]

    # define the model and initialize parameters
    mod = VoigtModel()
    params = mod.guess(y, x=x)
    params['amplitude'].set(min=25, max=70)
    params['sigma'].set(max=1)
    params['center'].set(min=5, max=15)

    # do fit, here with leastsq model
    result = mod.fit(y, params, x=x, method='leastsq')

    result_ndt = mod.fit(y, params, x=x, method=fit_method)

    # assert that fit converged to the same result
    vals = [result.params[p].value for p in result.params.valuesdict()]
    vals_ndt = [result_ndt.params[p].value for p in result_ndt.params.valuesdict()]
    assert_allclose(vals_ndt, vals, rtol=0.1)
    assert_allclose(result_ndt.chisqr, result.chisqr, rtol=1e-5)

    # assert that parameter uncertaintes from leastsq and calculated from
    # the covariance matrix using numdifftools are very similar
    stderr = [result.params[p].stderr for p in result.params.valuesdict()]
    stderr_ndt = [result_ndt.params[p].stderr for p in result_ndt.params.valuesdict()]

    perr = np.array(stderr) / np.array(vals)
    perr_ndt = np.array(stderr_ndt) / np.array(vals_ndt)
    assert_almost_equal(perr_ndt, perr, decimal=3)

    # assert that parameter correlatations from leastsq and calculated from
    # the covariance matrix using numdifftools are very similar
    for par1 in result.var_names:
        cor = [result.params[par1].correl[par2] for par2 in
               result.params[par1].correl.keys()]
        cor_ndt = [result_ndt.params[par1].correl[par2] for par2 in
                   result_ndt.params[par1].correl.keys()]
        assert_almost_equal(cor_ndt, cor, decimal=2)
Exemplo n.º 14
0
def test_least_squares_cov_x(peakdata, bounds):
    """Test calculation of cov. matrix from Jacobian, with/without bounds."""
    x = peakdata[0]
    y = peakdata[1]

    # define the model and initialize parameters
    mod = VoigtModel()
    params = mod.guess(y, x=x)

    if bounds:
        params['amplitude'].set(min=25, max=70)
        params['sigma'].set(min=0, max=1)
        params['center'].set(min=5, max=15)
    else:
        params['sigma'].set(min=-np.inf)

    # do fit with least_squares and leastsq algorithm
    result = mod.fit(y, params, x=x, method='least_squares')
    result_lsq = mod.fit(y, params, x=x, method='leastsq')

    # assert that fit converged to the same result
    vals = [result.params[p].value for p in result.params.valuesdict()]
    vals_lsq = [result_lsq.params[p].value for p in
                result_lsq.params.valuesdict()]
    assert_allclose(vals, vals_lsq, rtol=1e-5)
    assert_allclose(result.chisqr, result_lsq.chisqr)

    # assert that parameter uncertaintes obtained from the leastsq method and
    # those from the covariance matrix estimated from the Jacbian matrix in
    # least_squares are similar
    stderr = [result.params[p].stderr for p in result.params.valuesdict()]
    stderr_lsq = [result_lsq.params[p].stderr for p in
                  result_lsq.params.valuesdict()]
    assert_allclose(stderr, stderr_lsq, rtol=1e-4)

    # assert that parameter correlations obtained from the leastsq method and
    # those from the covariance matrix estimated from the Jacbian matrix in
    # least_squares are similar
    for par1 in result.var_names:
        cor = [result.params[par1].correl[par2] for par2 in
               result.params[par1].correl.keys()]
        cor_lsq = [result_lsq.params[par1].correl[par2] for par2 in
                   result_lsq.params[par1].correl.keys()]
        assert_allclose(cor, cor_lsq, rtol=1e-2)
Exemplo n.º 15
0
def curve_fitting_voigt(dref, pars=None):
    xdata = dref.index.to_numpy()
    ydata = dref.to_numpy()

    mod = VoigtModel()
    if pars is None:
        pars = mod.guess(ydata, x=xdata)

    out = mod.fit(ydata, pars, x=xdata)

    return out
Exemplo n.º 16
0
def VoigtCalc(x, y, x1, y1):
    y = removerBackground(y)
    y1 = removerBackground(y1)

    mod = VoigtModel()
    pars = mod.guess(y, x=x)
    pars['gamma'].set(value=0.7, vary=True, expr='')
    out = mod.fit(y, pars, x=x)

    mod = VoigtModel()
    pars1 = mod.guess(y1, x=x1)
    pars1['gamma'].set(value=0.7, vary=True, expr='')
    out1 = mod.fit(y1, pars1, x=x1)

    center = out.best_values['center']

    sigma = Decon_Gau(out.best_values['sigma'], out1.best_values['sigma'])

    gamma = Decon_Lor(out.best_values['gamma'], out1.best_values['gamma'])

    return SingleLineEquation(sigma, gamma, center)
Exemplo n.º 17
0
def singleline(x, y, tipo, arquivo):
    ##    pdb.set_trace()
    mod = VoigtModel()
    pars = mod.guess(y, x=x)
    pars['gamma'].set(value=0.7, vary=True, expr='')
    ##    pars['sigma'].set(value=0.7, vary=True, expr='')
    out = mod.fit(y, pars, x=x)

    gamma = out.best_values['gamma']
    sigma = out.best_values['sigma']
    center = out.best_values['center']
    calcsingleline(gamma, sigma, center, tipo, arquivo)
Exemplo n.º 18
0
def fit_peak_1d(
    xdata: np.ndarray,
    ydata: np.ndarray,
    engine: str = 'lmfit',
) -> np.ndarray:
    """
    Description
    -----------
    Perform 1D peak fitting using Voigt function

    Parameters
    ----------
    xdata: np.ndarray
        independent var array
    ydata: np.ndarray
        dependent var array
    engien: str
        engine name, [lmfit, tomoproc]
    
    Returns
    -------
    dict
        dictionary of peak parameters

    NOTE
    ----
    Return dictionary have different entries.
    """
    if engine.lower() in ['lmfit', 'external']:
        mod = VoigtModel()
        pars = mod.guess(ydata, x=xdata)
        out = mod.fit(ydata, pars, x=xdata)
        return out.best_values
    else:
        popt, pcov = curve_fit(
            voigt1d,
            xdata,
            ydata,
            maxfev=int(1e6),
            p0=[ydata.max(), xdata.mean(), 1, 1],
            bounds=([0, xdata.min(), 0, 0], [
                ydata.max() * 10,
                xdata.max(),
                xdata.max() - xdata.min(), np.inf
            ]),
        )
        return {
            'amplitude': popt[0],
            'center': popt[1],
            'fwhm': popt[2],
            'shape': popt[3],
        }
Exemplo n.º 19
0
def correlate_spectra(obs_flx, obs_wvl, ref_flx, ref_wvl):

    # convert spectra sampling to logspace
    obs_flux_res_log, _ = spectra_logspace(obs_flx, obs_wvl)
    ref_flux_sub_log, wvl_log = spectra_logspace(ref_flx, ref_wvl)
    wvl_step = ref_wvl[1] - ref_wvl[0]

    # correlate the two spectra
    min_flux = 0.95
    ref_flux_sub_log[ref_flux_sub_log > min_flux] = 0.
    obs_flux_res_log[obs_flux_res_log > min_flux] = 0.
    corr_res = correlate(ref_flux_sub_log, obs_flux_res_log, mode='same', method='fft')

    # plt.plot(corr_res)
    # plt.show()
    # plt.close()

    # create a correlation subset that will actually be analysed
    corr_w_size = 100
    corr_c_off = np.int64(len(corr_res) / 2.)
    corr_pos_min = corr_c_off - corr_w_size
    corr_pos_max = corr_c_off + corr_w_size
    # print corr_pos_min, corr_pos_max
    corr_res_sub = corr_res[corr_pos_min:corr_pos_max]
    corr_res_sub -= np.median(corr_res_sub)
    corr_res_sub_x = np.arange(len(corr_res_sub))

    # analyze correlation function by fitting gaussian/voigt/lorentzian distribution to it
    fit_model = VoigtModel()
    parameters = fit_model.guess(corr_res_sub, x=corr_res_sub_x)
    corr_fit_res = fit_model.fit(corr_res_sub, parameters, x=corr_res_sub_x)
    corr_center = corr_fit_res.params['center'].value

    # plt.plot(corr_res_sub)
    # plt.axvline(corr_center)
    # plt.show()
    # plt.close()

    # determine the actual shift
    idx_no_shift = np.int32(len(corr_res) / 2.)
    idx_center = corr_c_off - corr_w_size + corr_center
    log_shift_px = idx_no_shift - idx_center
    log_shift_wvl = log_shift_px * wvl_step

    wvl_log_new = wvl_log - log_shift_wvl
    rv_shifts = (wvl_log_new[1:] - wvl_log_new[:-1]) / wvl_log_new[:-1] * 299792.458 * log_shift_px

    if log_shift_wvl < 2:
        return np.nanmedian(rv_shifts)
    else:
        # something went wrong
        return np.nan
def xrdCalculationProcessing(spectrumData, centerXValsList, heightList, axs, setupOptions):
    proposedUserSubstrateTwoTheta = centerXValsList[heightList.index(max(heightList))]
    substrateModel = VoigtModel()
    params = substrateModel.guess(spectrumData.bgSubIntensity, x=spectrumData.xVals, negative=False)
    out = substrateModel.fit(spectrumData.bgSubIntensity, params, x=spectrumData.xVals)
    fullModelSubstrateTwoTheta = out.best_values['center']
    if abs(fullModelSubstrateTwoTheta - proposedUserSubstrateTwoTheta) <= 0.1:
        # looks like the user selected the substrate as a peak, use their value
        substrateTwoTheta = proposedUserSubstrateTwoTheta
    else:
        # Looks like the user did not select the substrate as a peak, use a global value from fitting all data
        substrateTwoTheta = fullModelSubstrateTwoTheta

    literatureSubstrateTwoTheta = calculateTwoTheta(snContentPercent=0)  # Reusing Sn content to 2theta equation
    twoThetaOffset = substrateTwoTheta - literatureSubstrateTwoTheta
    offsetCorrectedCenterTwoThetaList = np.asarray(centerXValsList) - twoThetaOffset
    for centerTwoTheta in offsetCorrectedCenterTwoThetaList:
        michaelSnContent = round(calculateXRDSnContent(centerTwoTheta), 1)
        print("Michael Comp:", michaelSnContent)
        print("Zach Comp:", round(calculateXRDSnContent_Zach(centerTwoTheta), 1))
        if abs(centerTwoTheta - literatureSubstrateTwoTheta) > 0.05:  # Don't draw one for the substrate
            _, centerIndex = closestNumAndIndex(spectrumData.xVals, centerTwoTheta + twoThetaOffset)
            if setupOptions.isLogPlot:
                basePlot = spectrumData.lnIntensity
                subtractedPlot = spectrumData.lnBgSubIntensity
            else:
                basePlot = spectrumData.intensity
                subtractedPlot = spectrumData.bgSubIntensity
            if setupOptions.doBackgroundSubtraction:

                an0 = axs[0].annotate(str(abs(michaelSnContent)),
                                      xy=(centerTwoTheta + twoThetaOffset, basePlot[centerIndex]),
                                      xycoords='data', xytext=(0, 72), textcoords='offset points',
                                      arrowprops=dict(arrowstyle="->", shrinkA=10, shrinkB=5, patchA=None,
                                                      patchB=None))
                an0.draggable()

                an1 = axs[1].annotate(str(abs(michaelSnContent)), xy=(
                    centerTwoTheta + twoThetaOffset, subtractedPlot[centerIndex]), xycoords='data',
                                      xytext=(0, 72), textcoords='offset points',
                                      arrowprops=dict(arrowstyle="->", shrinkA=10, shrinkB=5, patchA=None,
                                                      patchB=None))
                an1.draggable()
            else:
                an0 = axs.annotate(str(abs(michaelSnContent)),
                                   xy=(centerTwoTheta + twoThetaOffset, subtractedPlot[centerIndex]),
                                   xycoords='data', xytext=(0, 72), textcoords='offset points',
                                   arrowprops=dict(arrowstyle="->", shrinkA=10, shrinkB=5, patchA=None,
                                                   patchB=None))
                an0.draggable()
Exemplo n.º 21
0
def voigtFit(filename, xloc=0, yloc=1, stats=False, plot=False):
    # Read Data
    df = pd.read_csv(filename, header=None)
    # Remove bad pixel
    df.drop(df.index[446], inplace=True)
    df.fillna(method='bfill', inplace=True)
    # Narrow region for later delays
    if 'd5' in filename:
        df = df[(df.iloc[:, xloc] > 287.75) & (df.iloc[:, xloc] < 288.6)]

    if 'd4' in filename and ('m1r' or 'm2' in filename):
        df = df[(df.iloc[:, xloc] > 287.75) & (df.iloc[:, xloc] < 288.6)]

    x = np.array(df.iloc[:, xloc])
    y = np.array(df.iloc[:, yloc])

    # Set Voigt fit parameters
    mod = VoigtModel()
    pars = mod.guess(y, x=x)
    pars['gamma'].set(value=0.7, vary=True, expr='')
    # Perform Voigt fit
    out = mod.fit(y, pars, x=x)

    # Print fit statistics
    if stats:
        print(out.fit_report(min_correl=0.25, show_correl=False))

    # Plot Voigt fit
    if plot:
        plt.plot(x, y, 'o', markersize=2.0, c='blue')
        plt.plot(x, out.best_fit, 'r-')
        dely = out.eval_uncertainty(sigma=5)
        plt.fill_between(x,
                         out.best_fit - dely,
                         out.best_fit + dely,
                         color="#bc8f8f")
        plt.xlabel = 'Wavelength (nm)'
        plt.ylabel = 'Intensity (a.u.)'
        plt.xlim((287, 289.5))
        plt.show()

    # Save fit statistics
    for par_name, param in out.params.items():
        if par_name == 'gamma':
            return pd.DataFrame({
                'fid': [filename],
                'fwhm_L': [2 * param.value],
                'error': [2 * param.stderr],
                'R^2': [out.redchi]
            })
Exemplo n.º 22
0
    def onePeakVoigtFit(self):
        try:
            nRow, nCol = self.dockedOpt.fileInfo()

            self.gausFit.binFitData = plab.zeros((nRow, 0))
            self.gausFit.OnePkFitData = plab.zeros(
                (nCol, 6))  # Creates the empty 2D List
            for j in range(nCol):
                yy = self.dockedOpt.TT[:, j]
                xx = plab.arange(0, len(yy))
                x1 = xx[0]
                x2 = xx[-1]
                y1 = yy[0]
                y2 = yy[-1]
                m = (y2 - y1) / (x2 - x1)
                b = y2 - m * x2

                mod = VoigtModel()
                mod.guess(yy, x=xx)
                pars = mod.guess(yy, x=xx)

                mod = mod + LinearModel()
                pars.add('intercept', value=b, vary=True)
                pars.add('slope', value=m, vary=True)
                out = mod.fit(yy, pars, x=xx)
                amplitude = out.best_values['amplitude']

                fitError = self.getFitError(out.fit_report(sort_pars=True),
                                            amplitude)

                self.gausFit.OnePkFitData[j, :] = (amplitude, 0,
                                                   out.best_values['center'],
                                                   0, out.best_values['sigma'],
                                                   0)

                # Saves fitted data of each fit
                fitData = out.best_fit
                binFit = np.reshape(fitData, (len(fitData), 1))
                self.gausFit.binFitData = np.concatenate(
                    (self.gausFit.binFitData, binFit), axis=1)

                if self.gausFit.continueGraphingEachFit == True:
                    self.gausFit.graphEachFitRawData(xx, yy, out.best_fit, 'V')

            return False
        except Exception as e:
            qtWidgets.QMessageBox.warning(
                self.myMainWindow, "Error",
                "There was an error \n\n Exception: " + str(e))
            return True
Exemplo n.º 23
0
    def fit(self):
        x = self.energies_eV
        y = self.intensities

        model = VoigtModel()

        init_parameters = model.guess(y, x=x)
        self.fit_results = model.fit(y, init_parameters, x=x)

        values = self.fit_results.params.valuesdict()

        self.fit_results_position_eV = values['center']
        self.fit_results_fwhm_eV = values['fwhm']
        self.fit_results_sigma_eV = values['sigma']
        self.fit_results_gamma_eV = values['gamma']
        self.fit_results_area = values['amplitude']
        self.fit_results_height = values['height']
Exemplo n.º 24
0
    def voigt_response(self, sigma=None, gamma=None):
        '''
        Fit the background with a Voigt profile to determine the response
        of the spectrometer

        If you have a good, clear signal, set sigma and gamma to None (done by default)

        If your signal is poor, set sigma and gamma using a fit to a good signal, and then
        only the position of the central wavelength will be altered.
        '''
        vm = VoigtModel()
        par_v = vm.guess(self.bkgd, x=self.lamb)
        par_v['center'].set(value=532e-9, vary=True)
        err = (self.bkgd_ferr * self.shot)
        if sigma is not None:  #if a width is provided, fix it.
            par_v['sigma'].set(value=sigma, vary=False)
        if gamma is not None:  #if a width is provided, fix it.
            par_v['gamma'].set(value=gamma, vary=False, expr='')
        elif gamma is None:  #vary gamma for better fit - this is not done by default
            par_v['gamma'].set(value=par_v['sigma'].value, vary=True, expr='')

        ##Fit the Voigt Model to the data
        vm_fit = vm.fit(self.bkgd, par_v, x=self.lamb)
        self.vm_fit = vm_fit
        #now crop the data so that the response is symmetric for the convolution to work
        l0 = vm_fit.best_values['center']
        self.sigma = vm_fit.best_values['sigma']
        self.l0 = l0
        l0_i = find_nearest(self.lamb, l0)
        l_size = self.lamb.size
        take_l = min(
            l0_i, l_size -
            l0_i)  #trim the shortest distance from the central wavelength
        low_i = l0_i - take_l
        high_i = l0_i + take_l
        self.lamb = self.lamb[low_i:high_i]
        self.bkgd = self.bkgd[low_i:high_i]
        self.shot = self.shot[low_i:high_i]
        self.shot_ferr = self.shot_ferr[low_i:high_i]
        self.bkgd_ferr = self.bkgd_ferr[low_i:high_i]
        #the response is taken from the model so it is nice and smooth
        self.response = vm_fit.best_fit[low_i:high_i]
        self.shift = self.lamb - l0  #this is useful for plotting data
Exemplo n.º 25
0
def fit_one_Voigt(x_lst,y_lst, pre):
    '''
    Fits one Pseudo Voigt returns the 
    results object
    '''
    x_lst = np.asarray(x_lst)
    y_lst = np.asarray(y_lst)
    
    mod = VoigtModel(prefix = pre, independent_vars=['x'],nan_policy='raise')
    
    # here we set up the peak fitting guess. Then the peak fitter will make a parameter object out of them
    mod.set_param_hint(pre+'amplitude', value = 4 * np.max(y_lst), min = 3*np.max(y_lst), max = 7*np.max(y_lst), vary=True)
    # mod.set_param_hint(prefp+'center', value = x_max, min = x_max*(1-wiggle_room), max = x_max*(1+wiggle_room),vary=True)
    mod.set_param_hint(pre+'center', value = x_lst[np.argmax(y_lst)], vary=True)
    # Basically FWHM/3.6
    w_guess = 2
    mod.set_param_hint(pre+'sigma', value = w_guess, min = 0, max = 5*w_guess,vary=True)
    
    result = mod.fit(y_lst, x = x_lst, params = mod.make_params())
    
    return result
Exemplo n.º 26
0
def fit_voigt(ax, spectra, args):
    fit_range = args.pl_range
    wl = spectra[:, 0]
    sp = spectra[:, 2]

    wl_fit = wl[(wl > fit_range[0]) & (wl < fit_range[1])]
    sp_fit = sp[(wl > fit_range[0]) & (wl < fit_range[1])]

    mod = VoigtModel() + ConstantModel()
    pars = mod.make_params(amplitude=np.max(sp_fit),
                           center=wl_fit[np.argmax(sp_fit)],
                           sigma=10,
                           gamma=10,
                           c=0)

    out = mod.fit(sp_fit, pars, x=wl_fit)

    ax.plot(wl_fit, out.best_fit, 'k--', alpha=0.8)
    print(f'Peak center = {out.params["center"].value:.2f} nm \n'
          f'FWHM = {out.params["fwhm"].value:.2f} nm')

    return out.params['center'].value, out.params['fwhm'].value,
Exemplo n.º 27
0
def Plotar():

    global x, y, K, E, v

    mod = VoigtModel()
    pars = mod.guess(y, x=x)
    pars['gamma'].set(value=0.7, vary=True, expr='')
    out = mod.fit(y, pars, x=x)

    K = E / (1 + v)
    K = K / 2

    mult = np.tan(np.radians(out.best_values['center'] / 2))

    mult = 1 / mult

    print 'multi: ', mult

    K = K * mult * (-1)

    print K

    print out.best_values

    try:
        plt.title('Amostra')
        plt.xlabel('2Theta')
        plt.ylabel("Intensity")
        plt.plot(x, out.best_fit, 'r-', label='bestfit')
        plt.plot(x, y, linestyle='-', marker='o', label='material')
        plt.grid()
        plt.legend()
        plt.show()

    except:
        print 'vazio'
Exemplo n.º 28
0
def NewSingleLineDouble():
    plt.close()
    print "Single Line"
    global x, y, xs, ys

    copyx = copy.copy(x)
    copyy = copy.copy(y)
    copyxs = copy.copy(xs)
    copyys = copy.copy(ys)

    mini, maxi = getminmax()
    minis, maxis = stgetminmax()
    x = x[mini:maxi]
    y = y[mini:maxi]
    xs = xs[minis:maxis]
    ys = ys[minis:maxis]
    #pdb.set_trace()
    #mod = methodfunciont((comboBox1.get()))
    #print mod
    mod = VoigtModel()

    pars = mod.guess(y, x=x)
    ##    pars['gamma'].set(value=0.5, vary=True, expr='')
    ##    pars['sigma'].set(value=0.5, vary=True, expr='')
    out = mod.fit(y, pars, x=x)

    pars1 = mod.guess(ys, x=xs)
    ##    pars1['gamma'].set(value=0.5, vary=True, expr='')
    ##    pars1['sigma'].set(value=0.5, vary=True, expr='')
    out1 = mod.fit(ys, pars1, x=xs)

    if out.best_values['gamma'] < out.best_values['gamma']:
        pars = mod.guess(y, x=x)
        out = mod.fit(y, pars, x=x)

        pars1 = mod.guess(ys, x=xs)
        out1 = mod.fit(ys, pars1, x=xs)

    print(out.values)
    print(out1.values)

    try:
        G = np.sqrt(
            ((2.3548200 * 1.06446701943 * np.radians(out.best_values['sigma']))
             **2 - (2.3548200 * 1.06446701943 *
                    np.radians(out1.best_values['sigma']))**2))
        padrao = (2.3548200 * 1.06446701943 *
                  np.radians(out.best_values['sigma']))**2
        amostra = (2.3548200 * 1.06446701943 *
                   np.radians(out1.best_values['sigma']))**2
    except:
        G = 0

    L = np.radians(2 * out.best_values['gamma']) * 1.57079632679 - np.radians(
        2 * out1.best_values['gamma']) * 1.57079632679

    lambida = radiation(comboBoxrad.get())  #nm

    costheta = np.cos(np.radians(out.best_values['center'] / 2))
    tantheta = np.tan(np.radians(out.best_values['center'] / 2))

    RMSS = G / (4 * tantheta)
    RMSS = RMSS * 0.7978845608

    D = (lambida) / (L * costheta)
    D = int(D)

    print 'D', D, 'RMSS', RMSS

    plt.figure(1)
    plt.subplot(121)
    plt.grid()
    plt.xlabel('$2\Theta$', size=15)
    plt.ylabel("Normalized(u.a)", size=15)

    plt.plot(x, y, 'k-+', label='Amostra')
    plt.plot(x, out.best_fit, 'k-', label='Best Fit')

    plt.legend()

    plt.subplot(122)
    plt.grid()
    plt.xlabel('$2\Theta$', size=15)
    plt.ylabel("Normalized(u.a)", size=15)

    plt.plot(xs, ys, 'k-+', label='Padrao')
    plt.plot(xs, out1.best_fit, 'k--', label='Best Fit')
    plt.legend()

    x = copyx
    xs = copyxs
    y = copyy
    ys = copyys

    plt.show()
Exemplo n.º 29
0
def SingleLine():
    plt.close()
    global x, y, Lv
    mini, maxi = getminmax()

    x = x[mini:maxi]
    y = y[mini:maxi]

    if str(comboBox.get()) == 'VoigtModel':
        mod = VoigtModel()
        pars = mod.guess(y, x=x)
        pars['gamma'].set(value=0.7, vary=True, expr='')
        out = mod.fit(y, pars, x=x)

    elif str(comboBox.get()) == 'PseudoVoigtModel':
        mod = PseudoVoigtModel()
        pars = mod.guess(y, x=x)
        out = mod.fit(y, pars, x=x)

    print "Saida de dados"
    print(out.fit_report())

    print "Melhores dados"
    print out.best_values

    plt.figure(1)

    plt.subplot(221)
    plt.plot(x, y, label='original data', linestyle='-', marker='o')
    plt.title('Amostra')
    plt.xlabel('$2\Theta$')
    plt.ylabel("Intensity")
    plt.grid()
    plt.legend()

    plt.subplot(222)
    plt.plot(x,
             out.best_fit,
             'r-',
             label='best fit',
             linestyle='-',
             marker='o')
    plt.title('Amostra')
    plt.xlabel('$2\Theta$')
    plt.ylabel("Intensity")
    plt.grid()
    plt.legend()

    plt.subplot(212)

    plt.plot(x, y, linestyle='-', marker='o')
    plt.title(str(str(comboBox.get())))

    lambida = radiation(comboBoxrad.get())

    #D=(lambida)/(  radians( out.best_values['sigma']*0.5*sqrt(pi/log1p(2))) *2*cos( radians( out.best_values['center']/2)))

    center = out.best_values['center'] / 2
    center = radians(center)
    center = cos(center)
    tancenter = tan(center)

    sigmaL = out.best_values['gamma']  #*3.6013100
    sigmaL = radians(sigmaL) * 0.5 * sqrt(pi / log1p(2))

    D = lambida / (sigmaL * center)
    Lv = D

    E = (pi / sqrt(4 * log1p(2))) * (
        (radians(out.best_values['sigma'] * pi / 2))) / (4 * tancenter)

    if E < 0:
        E *= -1
    if D < 0:
        D *= -1

    #t = plt.text(0.5, 0.5, '$L_V(nm)$: '+ str(D) + '\n$<e>$: '+ str(E), transform=plt.subplot(212).transAxes, fontsize=10)
    #t.set_bbox(dict(color='red', alpha=0.5, edgecolor='red'))

    plt.xlabel('$2\Theta$')
    plt.ylabel("Intensity")
    print D
    print E
    plt.plot(x,
             out.best_fit,
             'r-',
             label='$L_V(nm)$: ' + str(int(D)) + '\n$ <e> $: ' + str(E),
             linestyle='-',
             marker='o')
    plt.plot(x, y - out.best_fit, label="residual")
    plt.legend()
    plt.grid()
    plt.show()
Exemplo n.º 30
0
def pre_edge_baseline(energy, norm=None, group=None, form='lorentzian',
                      emin=None, emax=None, elo=None, ehi=None,
                      with_line=True, _larch=None):
    """remove baseline from main edge over pre edge peak region

    This assumes that pre_edge() has been run successfully on the spectra
    and that the spectra has decent pre-edge subtraction and normalization.

    Arguments
    ----------
    energy:    array of x-ray energies, in eV, or group (see note 1)
    norm:      array of normalized mu(E)
    group:     output group
    elo:       low energy of pre-edge peak region to not fit baseline [e0-20]
    ehi:       high energy of pre-edge peak region ot not fit baseline [e0-10]
    emax:      max energy (eV) to use for baesline fit [e0-5]
    emin:      min energy (eV) to use for baesline fit [e0-40]
    form:      form used for baseline (see note 2)  ['lorentzian']
    with_line: whether to include linear component in baseline ['True']


    Returns
    -------
      None

    A group named 'prepeaks' will be created in the output group, with the following
    attributes:
        energy        energy array for pre-edge peaks = energy[emin:emax]
        baseline      fitted baseline array over pre-edge peak energies
        norm          spectrum over pre-edge peak energies
        peaks         baseline-subtraced spectrum over pre-edge peak energies
        centroid      estimated centroid of pre-edge peaks (see note 3)
        peak_energies list of predicted peak energies (see note 4)
        fit_details   details of fit to extract pre-edge peaks.

    (if the output group is None, _sys.xafsGroup will be written to)

    Notes
    -----
     1 If the first argument is a Group, it must contain 'energy' and 'norm'.
       See First Argrument Group in Documentation

     2 A function will be fit to the input mu(E) data over the range between
       [emin:elo] and [ehi:emax], ignorng the pre-edge peaks in the
       region [elo:ehi].  The baseline function is specified with the `form`
       keyword argument, which can be one of
           'lorentzian', 'gaussian', or 'voigt',
       with 'lorentzian' the default.  In addition, the `with_line` keyword
       argument can be used to add a line to this baseline function.

     3 The value calculated for `prepeaks.centroid`  will be found as
         (prepeaks.energy*prepeaks.peaks).sum() / prepeaks.peaks.sum()
     4 The values in the `peak_energies` list will be predicted energies
       of the peaks in `prepeaks.peaks` as found by peakutils.

    """
    energy, norm, group = parse_group_args(energy, members=('energy', 'norm'),
                                           defaults=(norm,), group=group,
                                           fcn_name='pre_edge_baseline')

    prepeaks_setup(energy, norm=norm, group=group, emin=emin, emax=emax,
                   elo=elo, ehi=ehi, _larch=_larch)

    emin = group.prepeaks.emin
    emax = group.prepeaks.emax
    elo = group.prepeaks.elo
    ehi = group.prepeaks.ehi

    dele = 1.e-13 + min(np.diff(energy))/5.0

    imin = index_of(energy, emin+dele)
    ilo  = index_of(energy, elo+dele)
    ihi  = index_of(energy, ehi+dele)
    imax = index_of(energy, emax+dele)

    # build xdat, ydat: dat to fit (skipping pre-edge peaks)
    xdat = np.concatenate((energy[imin:ilo+1], energy[ihi:imax+1]))
    ydat = np.concatenate((norm[imin:ilo+1], norm[ihi:imax+1]))


    # build fitting model: note that we always include
    # a LinearModel but may fix slope and intercept
    form = form.lower()
    if form.startswith('voig'):
        model = VoigtModel()
    elif form.startswith('gaus'):
        model = GaussianModel()
    else:
        model = LorentzianModel()

    model += LinearModel()
    params = model.make_params(amplitude=1.0, sigma=2.0,
                               center=emax,
                               intercept=0, slope=0)
    params['amplitude'].min =  0.0
    params['sigma'].min     =  0.25
    params['sigma'].max     = 50.0
    params['center'].max    = emax + 25.0
    params['center'].min    = emax - 25.0

    if not with_line:
        params['slope'].vary = False
        params['intercept'].vary = False

    result = model.fit(ydat, params, x=xdat)

    cen = dcen = 0.
    peak_energies = []

    # energy including pre-edge peaks, for output
    edat = energy[imin: imax+1]
    norm = norm[imin:imax+1]
    bline = peaks = dpeaks = norm*0.0

    # get baseline and resulting norm over edat range
    if result is not None:
        bline = result.eval(result.params, x=edat)
        peaks = norm-bline

        # estimate centroid
        cen = (edat*peaks).sum() / peaks.sum()

        # uncertainty in norm includes only uncertainties in baseline fit
        # and uncertainty in centroid:
        try:
            dpeaks = result.eval_uncertainty(result.params, x=edat)
        except:
            dbpeaks = 0.0

        cen_plus = (edat*(peaks+dpeaks)).sum()/ (peaks+dpeaks).sum()
        cen_minus = (edat*(peaks-dpeaks)).sum()/ (peaks-dpeaks).sum()
        dcen = abs(cen_minus - cen_plus) / 2.0

        # locate peak positions
        if HAS_PEAKUTILS:
            peak_ids = peakutils.peak.indexes(peaks, thres=0.05, min_dist=2)
            peak_energies = [edat[pid] for pid in peak_ids]

    group = set_xafsGroup(group, _larch=_larch)
    group.prepeaks = Group(energy=edat, norm=norm, baseline=bline,
                           peaks=peaks, delta_peaks=dpeaks,
                           centroid=cen, delta_centroid=dcen,
                           peak_energies=peak_energies,
                           fit_details=result,
                           emin=emin, emax=emax, elo=elo, ehi=ehi,
                           form=form, with_line=with_line)
    return
Exemplo n.º 31
0
def getcenter(x,y):
    mod=VoigtModel()
    pars = mod.guess(y, x=x)
    out  = mod.fit(y, pars, x=x)
    return out.best_values['center']
Exemplo n.º 32
0
def pre_edge_baseline(energy,
                      norm=None,
                      group=None,
                      form='lorentzian',
                      emin=None,
                      emax=None,
                      elo=None,
                      ehi=None,
                      with_line=True,
                      _larch=None):
    """remove baseline from main edge over pre edge peak region

    This assumes that pre_edge() has been run successfully on the spectra
    and that the spectra has decent pre-edge subtraction and normalization.

    Arguments:
       energy (ndarray or group): array of x-ray energies, in eV, or group (see note 1)
       norm (ndarray or group):   array of normalized mu(E)
       group (group or None):     output group
       elo (float or None):       low energy of pre-edge peak region to not fit baseline [e0-20]
       ehi (float or None):       high energy of pre-edge peak region ot not fit baseline [e0-10]
       emax (float or None):      max energy (eV) to use for baesline fit [e0-5]
       emin (float or None):      min energy (eV) to use for baesline fit [e0-40]
       form (string):             form used for baseline (see note 2)  ['lorentzian']
       with_line (bool):          whether to include linear component in baseline ['True']
       _larch (larch instance or None):  current larch session.


    A function will be fit to the input mu(E) data over the range between
    [emin:elo] and [ehi:emax], ignorng the pre-edge peaks in the region
    [elo:ehi].  The baseline function is specified with the `form` keyword
    argument, which can be one of 'lorentzian', 'gaussian', or 'voigt',
    with 'lorentzian' the default.  In addition, the `with_line` keyword
    argument can be used to add a line to this baseline function.

    A group named 'prepeaks' will be used or created in the output group, containing

        ==============   ===========================================================
         attribute        meaning
        ==============   ===========================================================
         energy           energy array for pre-edge peaks = energy[emin:emax]
         energy           energy array for pre-edge peaks = energy[emin:emax]
         baseline         fitted baseline array over pre-edge peak energies
         norm             spectrum over pre-edge peak energies
         peaks            baseline-subtraced spectrum over pre-edge peak energies
         centroid         estimated centroid of pre-edge peaks (see note 3)
         peak_energies    list of predicted peak energies (see note 4)
         fit_details      details of fit to extract pre-edge peaks.
        ==============   ===========================================================

    Notes:
       1. Supports :ref:`First Argument Group` convention, requiring group members `energy` and `norm`
       2. Supports :ref:`Set XAFS Group` convention within Larch or if `_larch` is set.
       3. The value calculated for `prepeaks.centroid`  will be found as
          (prepeaks.energy*prepeaks.peaks).sum() / prepeaks.peaks.sum()
       4. The values in the `peak_energies` list will be predicted energies
          of the peaks in `prepeaks.peaks` as found by peakutils.

    """
    energy, norm, group = parse_group_args(energy,
                                           members=('energy', 'norm'),
                                           defaults=(norm, ),
                                           group=group,
                                           fcn_name='pre_edge_baseline')

    prepeaks_setup(energy,
                   norm=norm,
                   group=group,
                   emin=emin,
                   emax=emax,
                   elo=elo,
                   ehi=ehi,
                   _larch=_larch)

    emin = group.prepeaks.emin
    emax = group.prepeaks.emax
    elo = group.prepeaks.elo
    ehi = group.prepeaks.ehi

    dele = 1.e-13 + min(np.diff(energy)) / 5.0

    imin = index_of(energy, emin + dele)
    ilo = index_of(energy, elo + dele)
    ihi = index_of(energy, ehi + dele)
    imax = index_of(energy, emax + dele)

    # build xdat, ydat: dat to fit (skipping pre-edge peaks)
    xdat = np.concatenate((energy[imin:ilo + 1], energy[ihi:imax + 1]))
    ydat = np.concatenate((norm[imin:ilo + 1], norm[ihi:imax + 1]))

    # build fitting model: note that we always include
    # a LinearModel but may fix slope and intercept
    form = form.lower()
    if form.startswith('voig'):
        model = VoigtModel()
    elif form.startswith('gaus'):
        model = GaussianModel()
    else:
        model = LorentzianModel()

    model += LinearModel()
    params = model.make_params(amplitude=1.0,
                               sigma=2.0,
                               center=emax,
                               intercept=0,
                               slope=0)
    params['amplitude'].min = 0.0
    params['sigma'].min = 0.25
    params['sigma'].max = 50.0
    params['center'].max = emax + 25.0
    params['center'].min = emax - 25.0

    if not with_line:
        params['slope'].vary = False
        params['intercept'].vary = False

    result = model.fit(ydat, params, x=xdat)

    cen = dcen = 0.
    peak_energies = []

    # energy including pre-edge peaks, for output
    edat = energy[imin:imax + 1]
    norm = norm[imin:imax + 1]
    bline = peaks = dpeaks = norm * 0.0

    # get baseline and resulting norm over edat range
    if result is not None:
        bline = result.eval(result.params, x=edat)
        peaks = norm - bline

        # estimate centroid
        cen = (edat * peaks).sum() / peaks.sum()

        # uncertainty in norm includes only uncertainties in baseline fit
        # and uncertainty in centroid:
        try:
            dpeaks = result.eval_uncertainty(result.params, x=edat)
        except:
            dbpeaks = 0.0

        cen_plus = (edat * (peaks + dpeaks)).sum() / (peaks + dpeaks).sum()
        cen_minus = (edat * (peaks - dpeaks)).sum() / (peaks - dpeaks).sum()
        dcen = abs(cen_minus - cen_plus) / 2.0

        # locate peak positions
        if HAS_PEAKUTILS:
            peak_ids = peakutils.peak.indexes(peaks, thres=0.05, min_dist=2)
            peak_energies = [edat[pid] for pid in peak_ids]

    group = set_xafsGroup(group, _larch=_larch)
    group.prepeaks = Group(energy=edat,
                           norm=norm,
                           baseline=bline,
                           peaks=peaks,
                           delta_peaks=dpeaks,
                           centroid=cen,
                           delta_centroid=dcen,
                           peak_energies=peak_energies,
                           fit_details=result,
                           emin=emin,
                           emax=emax,
                           elo=elo,
                           ehi=ehi,
                           form=form,
                           with_line=with_line)
    return
Exemplo n.º 33
0
def SingleLine():
    plt.close()
    global x, y
    mini, maxi = getminmax()

    x = x[mini:maxi]
    y = y[mini:maxi]

    if str(comboBox.get()) == 'VoigtModel':
        mod = VoigtModel()
    elif str(comboBox.get()) == 'PseudoVoigtModel':
        mod = PseudoVoigtModel()

    pars = mod.guess(y, x=x)
    out = mod.fit(y, pars, x=x)

    print(out.fit_report())

    plt.figure(1)

    plt.subplot(221)
    plt.plot(x, y, label='original data', linestyle='-', marker='o')
    plt.title('Amostra')
    plt.xlabel('2Theta')
    plt.ylabel("Intensity")
    plt.grid()
    plt.legend()

    ##plt.plot(x, out.init_fit, 'k--',label='initial ')

    plt.subplot(222)
    plt.plot(x,
             out.best_fit,
             'r-',
             label='best fit',
             linestyle='-',
             marker='o')
    plt.title('Amostra')
    plt.xlabel('2Theta')
    plt.ylabel("Intensity")
    plt.grid()
    plt.legend()

    plt.subplot(212)

    plt.plot(x, y, linestyle='-', marker='o')
    plt.title(str(str(comboBox.get())))

    lambida = radiation(comboBoxrad.get())

    #D=(lambida)/(  radians( out.best_values['sigma']*0.5*sqrt(pi/log1p(2))) *2*cos( radians( out.best_values['center']/2)))

    center = out.best_values['center'] / 2
    center = radians(center)
    center = cos(center)

    sigmaL = out.best_values['sigma'] * 3.6013100
    sigmaL = radians(sigmaL)

    D = lambida / (sigmaL * center)

    E = 2.35482 * (radians(out.best_values['sigma'] * pi / 2)) / (
        4 * tan(radians(out.best_values['center'] / 2)))

    if E < 0:
        E *= -1
    if D < 0:
        D *= -1

    t = plt.text(0.5,
                 0.5,
                 '$L_V(nm)$: ' + str(D) + '\n$<e>$: ' + str(E),
                 transform=plt.subplot(212).transAxes,
                 fontsize=10)
    t.set_bbox(dict(color='red', alpha=0.5, edgecolor='red'))

    plt.xlabel('2Theta')
    plt.ylabel("Intensity")
    plt.plot(x,
             out.best_fit,
             'r-',
             label='center: ' + str(out.best_values['center']) + '\nSigma: ' +
             str(out.best_values['sigma']),
             linestyle='-',
             marker='o')
    plt.legend()
    plt.grid()
    plt.show()
Exemplo n.º 34
0
def voigtFit(df, delay, span, stats=False, plot=False):
    # Remove bad pixel
    df.drop(df.index[446], inplace=True)
    df.fillna(method='bfill', inplace=True)

    # Limit fitting region to selected span
    if (delay == 15) & (span == (545.4, 545.8)):
        span = (545.55, 545.8)
    if (delay == 30) & (span == (545.4, 545.8)):
        span = (545.45, 545.85)
    elif (delay == 500) & (span == (545.4, 545.8)):
        span = (545.5, 545.75)

    if span == span3:
        if delay == 15:
            span = (536.9, 537.5)
        elif delay == 30:
            span = (537.05, 537.45)
        elif delay == 500:
            span = (537.1, 537.4)

    df = df[(df.index >= span[0]) & (df.index <= span[1])]
    x = df.index.values
    y = df[str(delay)].values

    # Set Voigt fit parameters
    mod = VoigtModel()
    pars = mod.guess(y, x=x)
    pars['gamma'].set(value=0.7, vary=True, expr='')
    # Perform Voigt fit
    out = mod.fit(y, pars, x=x)

    # Print fit statistics
    if stats:
        print(out.fit_report(min_correl=0.25, show_correl=False))

    # Plot Voigt fit
    if plot:
        if span == span1:
            plt.subplot(1, 2, 1)
            plt.title('Fe I 544.69, Delay: {} ns'.format(delay))
        elif span == span2:
            plt.subplot(1, 2, 2)
            plt.title('Fe I 545.55, Delay: {} ns'.format(delay))
        else:
            plt.title('Fe I 537.15, Delay: {} ns'.format(delay))
        plt.plot(x, y, 'o', markersize=2.0, c='blue')
        plt.plot(x, out.best_fit, 'r-')
        try:
            dely = out.eval_uncertainty(sigma=5)
        except:
            dely = 0
        plt.fill_between(x,
                         out.best_fit - dely,
                         out.best_fit + dely,
                         color="#bc8f8f")

        plt.xlabel('Wavelength (nm)')
        plt.ylabel('Intensity (a.u.)')
        plt.xlim(span)

    # Save fit statistics
    for par_name, param in out.params.items():
        if par_name == 'gamma':
            return pd.DataFrame({
                'delay': [delay],
                'fwhm_L': [2 * param.value],
                'error': [2 * param.stderr],
                'R^2': [out.redchi]
            })
Exemplo n.º 35
0
def plot_data(data):
    signal_format = 'hist'  # 'line' or 'hist' or None
    Total_SM_label = False  # for Total SM black line in plot and legend
    plot_label = r'$Z \rightarrow ll$'
    signal_label = plot_label

    signal = None
    for s in ZBosonSamples.samples.keys():
        if s not in stack_order and s != 'data': signal = s

    for x_variable, hist in ZBosonHistograms.hist_dict.items():

        h_bin_width = hist['bin_width']
        h_num_bins = hist['num_bins']
        h_xrange_min = hist['xrange_min']
        h_xlabel = hist['xlabel']
        h_log_y = hist['log_y']
        h_y_label_x_position = hist['y_label_x_position']
        h_legend_loc = hist['legend_loc']
        h_log_top_margin = hist[
            'log_top_margin']  # to decrease the separation between data and the top of the figure, remove a 0
        h_linear_top_margin = hist[
            'linear_top_margin']  # to decrease the separation between data and the top of the figure, pick a number closer to 1

        bins = [h_xrange_min + x * h_bin_width for x in range(h_num_bins + 1)]
        bin_centres = [
            h_xrange_min + h_bin_width / 2 + x * h_bin_width
            for x in range(h_num_bins)
        ]

        if store_histograms:
            stored_histos = {}

        if load_histograms:  # not doing line for now
            npzfile = np.load(f'histograms/{x_variable}_hist_{fraction}.npz')
            # load bins
            loaded_bins = npzfile['bins']
            if not np.array_equal(bins, loaded_bins):
                print('Bins mismatch. That\'s a problem')
                raise Exception

            # load data
            data_x = npzfile['data']
            data_x_errors = np.sqrt(data_x)
            # load weighted signal
            signal_x_reshaped = npzfile[signal]
            signal_color = ZBosonSamples.samples[signal]['color']
            # load backgrounds
            mc_x_heights_list = []
            # mc_weights = []
            mc_colors = []
            mc_labels = []
            mc_x_tot = np.zeros(len(bin_centres))
            for s in stack_order:
                if not s in npzfile: continue
                mc_labels.append(s)
                # mc_x.append(data[s][x_variable].values)
                mc_colors.append(ZBosonSamples.samples[s]['color'])
                # mc_weights.append(data[s].totalWeight.values)
                mc_x_heights = npzfile[s]
                mc_x_heights_list.append(mc_x_heights)
                mc_x_tot = np.add(mc_x_tot, mc_x_heights)
            mc_x_err = np.sqrt(mc_x_tot)

        else:
            # ======== This creates histograms for the raw data events ======== #
            # no weights necessary (it's data)
            data_x, _ = np.histogram(data['data'][x_variable].values,
                                     bins=bins)
            data_x_errors = np.sqrt(data_x)
            if store_histograms:
                stored_histos[
                    'data'] = data_x  # saving histograms for later loading

            # ======== This creates histograms for signal simulation (Z->ll) ======== #
            # need to consider the event weights here
            signal_x = None
            if signal_format == 'line':
                signal_x, _ = np.histogram(
                    data[signal][x_variable].values,
                    bins=bins,
                    weights=data[signal].totalWeight.values)
            elif signal_format == 'hist':
                signal_x = data[signal][x_variable].values
                signal_weights = data[signal].totalWeight.values
                signal_color = ZBosonSamples.samples[signal]['color']
                signal_x_reshaped, _ = np.histogram(
                    data[signal][x_variable].values,
                    bins=bins,
                    weights=data[signal].totalWeight.values)
                if store_histograms:
                    stored_histos[
                        signal] = signal_x_reshaped  # saving histograms for later loading

            # ======== This creates histograms for all of the background simulation ======== #
            # weights are also necessary here, since we produce an arbitrary number of MC events
            mc_x_heights_list = []
            mc_weights = []
            mc_colors = []
            mc_labels = []
            mc_x_tot = np.zeros(len(bin_centres))

            for s in stack_order:
                if not s in data: continue
                if data[s].empty: continue
                mc_labels.append(s)
                # mc_x.append(data[s][x_variable].values)
                mc_colors.append(ZBosonSamples.samples[s]['color'])
                mc_weights.append(data[s].totalWeight.values)
                mc_x_heights, _ = np.histogram(
                    data[s][x_variable].values,
                    bins=bins,
                    weights=data[s].totalWeight.values)  #mc_heights?
                mc_x_heights_list.append(mc_x_heights)
                mc_x_tot = np.add(mc_x_tot, mc_x_heights)
                if store_histograms:
                    stored_histos[
                        s] = mc_x_heights  #saving histograms for later loading

            mc_x_err = np.sqrt(mc_x_tot)

        data_x_without_bkg = data_x - mc_x_tot

        # data fit

        # get rid of zero errors (maybe messy) : TODO a better way to do this?
        for i, e in enumerate(data_x_errors):
            if e == 0: data_x_errors[i] = np.inf
        if 0 in data_x_errors:
            print('please don\'t divide by zero')
            raise Exception

        bin_centres_array = np.asarray(bin_centres)

        # *************
        # Models
        # *************

        doniach_mod = DoniachModel()
        pars_doniach = doniach_mod.guess(data_x_without_bkg,
                                         x=bin_centres_array,
                                         amplitude=2100000 * fraction,
                                         center=90.5,
                                         sigma=2.3,
                                         height=10000 * fraction / 0.01,
                                         gamma=0)
        doniach = doniach_mod.fit(data_x_without_bkg,
                                  pars_doniach,
                                  x=bin_centres_array,
                                  weights=1 / data_x_errors)
        params_dict_doniach = doniach.params.valuesdict()

        gaussian_mod = GaussianModel()
        pars_gaussian = gaussian_mod.guess(data_x_without_bkg,
                                           x=bin_centres_array,
                                           amplitude=6000000 * fraction,
                                           center=90.5,
                                           sigma=3)
        gaussian = gaussian_mod.fit(data_x_without_bkg,
                                    pars_gaussian,
                                    x=bin_centres_array,
                                    weights=1 / data_x_errors)
        params_dict_gaussian = gaussian.params.valuesdict()

        lorentzian_mod = LorentzianModel()
        pars = lorentzian_mod.guess(data_x_without_bkg,
                                    x=bin_centres_array,
                                    amplitude=6000000 * fraction,
                                    center=90.5,
                                    sigma=2.9,
                                    gamma=1)
        lorentzian = lorentzian_mod.fit(data_x_without_bkg,
                                        pars,
                                        x=bin_centres_array,
                                        weights=1 / data_x_errors)
        params_dict_lorentzian = lorentzian.params.valuesdict()

        voigt_mod = VoigtModel()
        pars = voigt_mod.guess(data_x_without_bkg,
                               x=bin_centres_array,
                               amplitude=6800000 * fraction,
                               center=90.5,
                               sigma=1.7)
        voigt = voigt_mod.fit(data_x_without_bkg,
                              pars,
                              x=bin_centres_array,
                              weights=1 / data_x_errors)
        params_dict_voigt = voigt.params.valuesdict()

        voigt_mod_2 = VoigtModel()
        polynomial = PolynomialModel(2)
        pars = voigt_mod_2.guess(data_x_without_bkg,
                                 x=bin_centres_array,
                                 amplitude=6800000 * fraction,
                                 center=90.5,
                                 sigma=1.7)
        pars += polynomial.guess(data_x_without_bkg,
                                 x=bin_centres_array,
                                 c0=data_x_without_bkg.max(),
                                 c1=0,
                                 c2=0)
        voigt_poly_mod = voigt_mod_2 + polynomial
        voigt_poly = voigt_poly_mod.fit(data_x_without_bkg,
                                        pars,
                                        x=bin_centres_array,
                                        weights=1 / data_x_errors)
        params_dict_voigt_poly = voigt_poly.params.valuesdict()

        if store_histograms:
            # save all histograms in npz format. different file for each variable. bins are common
            os.makedirs('histograms', exist_ok=True)
            np.savez(f'histograms/{x_variable}_hist.npz',
                     bins=bins,
                     **stored_histos)
            # ======== Now we start doing the fit ======== #

        # *************
        # Main plot
        # *************
        plt.clf()
        plt.axes([0.1, 0.3, 0.85, 0.65])  # (left, bottom, width, height)
        main_axes = plt.gca()
        main_axes.errorbar(x=bin_centres,
                           y=data_x,
                           yerr=data_x_errors,
                           fmt='ko',
                           label='Data')
        # this effectively makes a stacked histogram
        bottoms = np.zeros_like(bin_centres)
        for mc_x_height, mc_color, mc_label in zip(mc_x_heights_list,
                                                   mc_colors, mc_labels):
            main_axes.bar(bin_centres,
                          mc_x_height,
                          bottom=bottoms,
                          color=mc_color,
                          label=mc_label,
                          width=h_bin_width * 1.01)
            bottoms = np.add(bottoms, mc_x_height)

        main_axes.plot(bin_centres, doniach.best_fit, '-r', label='Doniach')
        main_axes.plot(bin_centres, gaussian.best_fit, '-g', label='Gaussian')
        main_axes.plot(bin_centres,
                       lorentzian.best_fit,
                       '-y',
                       label='Lorentzian')
        main_axes.plot(bin_centres, voigt.best_fit, '--', label='Voigt')
        main_axes.plot(bin_centres,
                       voigt_poly.best_fit,
                       '-v',
                       label='Voigt and Polynomial')

        if Total_SM_label:
            totalSM_handle, = main_axes.step(bins,
                                             np.insert(mc_x_tot, 0,
                                                       mc_x_tot[0]),
                                             color='black')
        if signal_format == 'line':
            main_axes.step(bins,
                           np.insert(signal_x, 0, signal_x[0]),
                           color=ZBosonSamples.samples[signal]['color'],
                           linestyle='--',
                           label=signal)
        elif signal_format == 'hist':
            main_axes.bar(bin_centres,
                          signal_x_reshaped,
                          bottom=bottoms,
                          color=signal_color,
                          label=signal,
                          width=h_bin_width * 1.01)
            bottoms = np.add(bottoms, signal_x_reshaped)
        main_axes.bar(bin_centres,
                      2 * mc_x_err,
                      bottom=bottoms - mc_x_err,
                      alpha=0.5,
                      color='none',
                      hatch="////",
                      width=h_bin_width * 1.01,
                      label='Stat. Unc.')

        mc_x_tot = bottoms

        main_axes.set_xlim(left=h_xrange_min, right=bins[-1])
        main_axes.xaxis.set_minor_locator(
            AutoMinorLocator())  # separation of x axis minor ticks
        main_axes.tick_params(which='both',
                              direction='in',
                              top=True,
                              labeltop=False,
                              labelbottom=False,
                              right=True,
                              labelright=False)

        if h_log_y:
            main_axes.set_yscale('log')
            smallest_contribution = mc_x_heights_list[
                0]  # TODO: mc_heights or mc_x_heights
            smallest_contribution.sort()
            bottom = smallest_contribution[-2]
            if bottom == 0: bottom = 0.001  # log doesn't like zero
            top = np.amax(data_x) * h_log_top_margin
            main_axes.set_ylim(bottom=bottom, top=top)
            main_axes.yaxis.set_major_formatter(CustomTicker())
            locmin = LogLocator(base=10.0,
                                subs=(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8,
                                      0.9),
                                numticks=12)
            main_axes.yaxis.set_minor_locator(locmin)
        else:
            main_axes.set_ylim(
                bottom=0,
                top=(np.amax(data_x) + math.sqrt(np.amax(data_x))) *
                h_linear_top_margin)
            main_axes.yaxis.set_minor_locator(AutoMinorLocator())
            main_axes.yaxis.get_major_ticks()[0].set_visible(False)

        plt.text(0.015,
                 0.97,
                 'ATLAS Open Data',
                 ha="left",
                 va="top",
                 family='sans-serif',
                 transform=main_axes.transAxes,
                 fontsize=13)
        plt.text(0.015,
                 0.9,
                 'for education',
                 ha="left",
                 va="top",
                 family='sans-serif',
                 transform=main_axes.transAxes,
                 style='italic',
                 fontsize=8)
        plt.text(0.015,
                 0.86,
                 r'$\sqrt{s}=13\,\mathrm{TeV},\;\int L\,dt=$' +
                 str(lumi_used) + '$\,\mathrm{fb}^{-1}$',
                 ha="left",
                 va="top",
                 family='sans-serif',
                 transform=main_axes.transAxes)
        plt.text(0.015,
                 0.78,
                 plot_label,
                 ha="left",
                 va="top",
                 family='sans-serif',
                 transform=main_axes.transAxes)
        plt.text(0.015,
                 0.72,
                 r'$m_Z = $' + str(round(params_dict_doniach['center'], 4)) +
                 ' GeV',
                 ha="left",
                 va="top",
                 family='sans-serif',
                 transform=main_axes.transAxes,
                 fontsize=10)

        # Create new legend handles but use the colors from the existing ones
        handles, labels = main_axes.get_legend_handles_labels()
        if signal_format == 'line':
            handles[labels.index(signal)] = Line2D(
                [], [],
                c=ZBosonSamples.samples[signal]['color'],
                linestyle='dashed')
        uncertainty_handle = mpatches.Patch(facecolor='none', hatch='////')
        if Total_SM_label:
            handles.append((totalSM_handle, uncertainty_handle))
            labels.append('Total SM')
        else:
            handles.append(uncertainty_handle)
            labels.append('Stat. Unc.')

        # specify order within legend
        new_handles = [
            handles[labels.index('Data')], handles[labels.index('Doniach')],
            handles[labels.index('Gaussian')],
            handles[labels.index('Lorentzian')],
            handles[labels.index('Voigt')],
            handles[labels.index('Voigt and Polynomial')]
        ]
        new_labels = [
            'Data', 'Doniach', 'Gaussian', 'Lorentzian', 'Voigt',
            'Voigt and Polynomial'
        ]
        for s in reversed(stack_order):
            if s not in labels:
                continue
            new_handles.append(handles[labels.index(s)])
            new_labels.append(s)
        if signal is not None:
            new_handles.append(handles[labels.index(signal)])
            new_labels.append(signal_label)
        if Total_SM_label:
            new_handles.append(handles[labels.index('Total SM')])
            new_labels.append('Total SM')
        else:
            new_handles.append(handles[labels.index('Stat. Unc.')])
            new_labels.append('Stat. Unc.')
        main_axes.legend(handles=new_handles,
                         labels=new_labels,
                         frameon=False,
                         loc=h_legend_loc,
                         fontsize='x-small')

        # *************
        # Data / MC plot
        # *************

        plt.axes([0.1, 0.1, 0.85, 0.2])  # (left, bottom, width, height)
        ratio_axes = plt.gca()
        ratio_axes.yaxis.set_major_locator(
            MaxNLocator(nbins='auto', symmetric=True))
        ratio_axes.errorbar(
            x=bin_centres, y=data_x / signal_x_reshaped, fmt='ko'
        )  # TODO: yerr=data_x_errors produce error bars that are too big
        ratio_axes.set_xlim(left=h_xrange_min, right=bins[-1])
        ratio_axes.plot(bins, np.ones(len(bins)), color='k')
        ratio_axes.xaxis.set_minor_locator(
            AutoMinorLocator())  # separation of x axis minor ticks
        ratio_axes.xaxis.set_label_coords(
            0.9, -0.2)  # (x,y) of x axis label # 0.2 down from x axis
        ratio_axes.set_xlabel(h_xlabel, fontname='sans-serif', fontsize=11)
        ratio_axes.set_ylim(bottom=0, top=2)
        ratio_axes.set_yticks([0, 1])
        ratio_axes.tick_params(which='both',
                               direction='in',
                               top=True,
                               labeltop=False,
                               right=True,
                               labelright=False)
        ratio_axes.yaxis.set_minor_locator(AutoMinorLocator())
        ratio_axes.set_ylabel(r'Data / Pred',
                              fontname='sans-serif',
                              x=1,
                              fontsize=11)

        # Generic features for both plots
        main_axes.yaxis.set_label_coords(h_y_label_x_position, 1)
        ratio_axes.yaxis.set_label_coords(h_y_label_x_position, 0.5)

        plt.savefig("ZBoson_" + x_variable + ".pdf", bbox_inches='tight')

        # ========== Statistics ==========

        # ========== Doniach ==========
        chisqr_doniach = mychisqr(doniach.residual, doniach.best_fit)
        redchisqr_doniach = chisqr_doniach / doniach.nfree
        center_doniach = params_dict_doniach['center']
        sigma_doniach = params_dict_doniach['sigma']

        rel_unc_center_doniach = doniach.params[
            'center'].stderr / doniach.params['center'].value
        rel_unc_sigma_doniach = doniach.params[
            'sigma'].stderr / doniach.params['sigma'].value

        # ========== Gaussian ==========
        chisqr_gaussian = mychisqr(gaussian.residual, gaussian.best_fit)
        redchisqr_gaussian = chisqr_gaussian / gaussian.nfree
        center_gaussian = params_dict_gaussian['center']
        sigma_gaussian = params_dict_gaussian['sigma']

        rel_unc_center_gaussian = gaussian.params[
            'center'].stderr / gaussian.params['center'].value
        rel_unc_sigma_gaussian = gaussian.params[
            'sigma'].stderr / gaussian.params['sigma'].value

        # ========== Lorentzian ==========
        chisqr_lorentzian = mychisqr(lorentzian.residual, lorentzian.best_fit)
        redchisqr_lorentzian = chisqr_lorentzian / lorentzian.nfree
        center_lorentzian = params_dict_lorentzian['center']
        sigma_lorentzian = params_dict_lorentzian['sigma']

        rel_unc_center_lorentzian = lorentzian.params[
            'center'].stderr / lorentzian.params['center'].value
        rel_unc_sigma_lorentzian = lorentzian.params[
            'sigma'].stderr / lorentzian.params['sigma'].value

        # ========== Voigt ==========
        chisqr_voigt = mychisqr(voigt.residual, voigt.best_fit)
        redchisqr_voigt = chisqr_voigt / voigt.nfree
        center_voigt = params_dict_voigt['center']
        sigma_voigt = params_dict_voigt['sigma']

        rel_unc_center_voigt = voigt.params['center'].stderr / voigt.params[
            'center'].value
        rel_unc_sigma_voigt = voigt.params['sigma'].stderr / voigt.params[
            'sigma'].value

        # ========== Voigt and Polynomial ==========
        chisqr_voigt_poly = mychisqr(voigt_poly.residual, voigt_poly.best_fit)
        redchisqr_voigt_poly = chisqr_voigt_poly / voigt_poly.nfree
        center_voigt_poly = params_dict_voigt_poly['center']
        sigma_voigt_poly = params_dict_voigt_poly['sigma']

        rel_unc_center_voigt_poly = voigt_poly.params[
            'center'].stderr / voigt_poly.params['center'].value
        rel_unc_sigma_voigt_poly = voigt_poly.params[
            'sigma'].stderr / voigt_poly.params['sigma'].value

        df_dict = {
            'fraction': [fraction],
            'luminosity': [lumi_used],
            'doniach chisqr': [chisqr_doniach],
            'doniach redchisqr': [redchisqr_doniach],
            'doniach center': [rel_unc_center_doniach],
            'doniach sigma': [rel_unc_sigma_doniach],
            'gaussian chisqr': [chisqr_gaussian],
            'gaussian redchisqr': [redchisqr_gaussian],
            'gaussian center': [rel_unc_center_gaussian],
            'gaussian sigma': [rel_unc_sigma_gaussian],
            'lorentzian chisqr': [chisqr_lorentzian],
            'lorentzian redchisqr': [redchisqr_lorentzian],
            'lorentzian center': [rel_unc_center_lorentzian],
            'lorentzian sigma': [rel_unc_sigma_lorentzian],
            'voigt chisqr': [chisqr_voigt],
            'voigt redchisqr': [redchisqr_voigt],
            'voigt center': [rel_unc_center_voigt],
            'voigt sigma': [rel_unc_sigma_voigt],
            'voigt poly chisqr': [chisqr_voigt_poly],
            'voigt poly redchisqr': [redchisqr_voigt_poly],
            'voigt poly center': [rel_unc_center_voigt_poly],
            'voigt poly sigma': [rel_unc_sigma_voigt_poly]
        }

        temp = pd.DataFrame(df_dict)

        fit_results = pd.read_csv('fit_results.csv')

        fit_results_concat = pd.concat([fit_results, temp])

        fit_results_concat.to_csv('fit_results.csv', index=False)

        print("=====================================================")
        print("Statistics for the Doniach Model: ")
        print("\n")
        print("chi^2 = " + str(chisqr_doniach))
        print("chi^2/dof = " + str(redchisqr_doniach))
        print("center = " + str(center_doniach))
        print("sigma = " + str(sigma_doniach))
        print("Relative Uncertainty of Center = " +
              str(rel_unc_center_doniach))
        print("Relative Uncertainty of Sigma = " + str(rel_unc_sigma_doniach))

        print("\n")
        print("=====================================================")
        print("Statistics for the Gaussian Model: ")
        print("\n")
        print("chi^2 = " + str(chisqr_gaussian))
        print("chi^2/dof = " + str(redchisqr_gaussian))
        print("center = " + str(center_gaussian))
        print("sigma = " + str(sigma_gaussian))
        print("Relative Uncertainty of Center = " +
              str(rel_unc_center_gaussian))
        print("Relative Uncertainty of Sigma = " + str(rel_unc_sigma_gaussian))

        print("\n")
        print("=====================================================")
        print("Statistics for the Lorentzian Model: ")
        print("\n")
        print("chi^2 = " + str(chisqr_lorentzian))
        print("chi^2/dof = " + str(redchisqr_lorentzian))
        print("center = " + str(center_lorentzian))
        print("sigma = " + str(sigma_lorentzian))
        print("Relative Uncertainty of Center = " +
              str(rel_unc_center_lorentzian))
        print("Relative Uncertainty of Sigma = " +
              str(rel_unc_sigma_lorentzian))

        print("\n")
        print("=====================================================")
        print("Statistics for the Voigt Model: ")
        print("\n")
        print("chi^2 = " + str(chisqr_voigt))
        print("chi^2/dof = " + str(redchisqr_voigt))
        print("center = " + str(center_voigt))
        print("sigma = " + str(sigma_voigt))
        print("Relative Uncertainty of Center = " + str(rel_unc_center_voigt))
        print("Relative Uncertainty of Sigma = " + str(rel_unc_sigma_voigt))

        print("\n")
        print("=====================================================")
        print("Statistics for the Voigt and Polynomial Model: ")
        print("\n")
        print("chi^2 = " + str(chisqr_voigt_poly))
        print("chi^2/dof = " + str(redchisqr_voigt_poly))
        print("center = " + str(center_voigt_poly))
        print("sigma = " + str(sigma_voigt_poly))
        print("Relative Uncertainty of Center = " +
              str(rel_unc_center_voigt_poly))
        print("Relative Uncertainty of Sigma = " +
              str(rel_unc_sigma_voigt_poly))

        # ========= Plotting Residuals =========

        # ========= Doniach Residuals =========

        plt.clf()
        plt.axes([0.1, 0.3, 0.85, 0.65])  # (left, bottom, width, height)
        main_axes = plt.gca()

        main_axes.set_title("Doniach Model Residuals")

        main_axes.errorbar(x=bin_centres, y=doniach.residual, fmt='ko')

        main_axes.set_xlim(left=h_xrange_min, right=bins[-1])
        main_axes.xaxis.set_minor_locator(
            AutoMinorLocator())  # separation of x axis minor ticks
        main_axes.tick_params(which='both',
                              direction='in',
                              top=True,
                              labeltop=False,
                              right=True,
                              labelright=False)

        main_axes.set_xlabel(r'$M_Z$ GeV')
        main_axes.xaxis.get_major_ticks()[0].set_visible(False)

        main_axes.set_ylim(bottom=1.05 * doniach.residual.min(),
                           top=1.05 * doniach.residual.max())
        main_axes.yaxis.set_minor_locator(AutoMinorLocator())
        main_axes.yaxis.get_major_ticks()[0].set_visible(False)
        main_axes.set_ylabel("Residual")

        plt.savefig("plots/doniach_residuals.pdf", bbox_inches='tight')

        # ========= Gaussian Residuals =========

        plt.clf()
        plt.axes([0.1, 0.3, 0.85, 0.65])  # (left, bottom, width, height)
        main_axes = plt.gca()

        main_axes.set_title("Gaussian Model Residuals")

        main_axes.errorbar(x=bin_centres, y=gaussian.residual, fmt='ko')

        main_axes.set_xlim(left=h_xrange_min, right=bins[-1])
        main_axes.xaxis.set_minor_locator(
            AutoMinorLocator())  # separation of x axis minor ticks
        main_axes.tick_params(which='both',
                              direction='in',
                              top=True,
                              labeltop=False,
                              right=True,
                              labelright=False)

        main_axes.set_xlabel(r'$M_Z$ GeV')
        main_axes.xaxis.get_major_ticks()[0].set_visible(False)

        main_axes.set_ylim(bottom=1.05 * gaussian.residual.min(),
                           top=1.05 * gaussian.residual.max())
        main_axes.yaxis.set_minor_locator(AutoMinorLocator())
        main_axes.yaxis.get_major_ticks()[0].set_visible(False)
        main_axes.set_ylabel("Residual")

        plt.savefig("plots/gaussian_residuals.pdf", bbox_inches='tight')

        # ========= Lorentzian Residuals =========

        plt.clf()
        plt.axes([0.1, 0.3, 0.85, 0.65])  # (left, bottom, width, height)
        main_axes = plt.gca()

        main_axes.set_title("Lorentzian Model Residuals")

        main_axes.errorbar(x=bin_centres, y=lorentzian.residual, fmt='ko')

        main_axes.set_xlim(left=h_xrange_min, right=bins[-1])
        main_axes.xaxis.set_minor_locator(
            AutoMinorLocator())  # separation of x axis minor ticks
        main_axes.tick_params(which='both',
                              direction='in',
                              top=True,
                              labeltop=False,
                              right=True,
                              labelright=False)

        main_axes.set_xlabel(r'$M_Z$ GeV')
        main_axes.xaxis.get_major_ticks()[0].set_visible(False)

        main_axes.set_ylim(bottom=1.05 * lorentzian.residual.min(),
                           top=1.05 * lorentzian.residual.max())
        main_axes.yaxis.set_minor_locator(AutoMinorLocator())
        main_axes.yaxis.get_major_ticks()[0].set_visible(False)
        main_axes.set_ylabel("Residual")

        plt.savefig("plots/lorentzian_residuals.pdf", bbox_inches='tight')

        # ========= Voigt Residuals =========

        plt.clf()
        plt.axes([0.1, 0.3, 0.85, 0.65])  # (left, bottom, width, height)
        main_axes = plt.gca()

        main_axes.set_title("Voigt Model Residuals")

        main_axes.errorbar(x=bin_centres, y=voigt.residual, fmt='ko')

        main_axes.set_xlim(left=h_xrange_min, right=bins[-1])
        main_axes.xaxis.set_minor_locator(
            AutoMinorLocator())  # separation of x axis minor ticks
        main_axes.tick_params(which='both',
                              direction='in',
                              top=True,
                              labeltop=False,
                              right=True,
                              labelright=False)

        main_axes.set_xlabel(r'$M_Z$ GeV')
        main_axes.xaxis.get_major_ticks()[0].set_visible(False)

        main_axes.set_ylim(bottom=1.05 * voigt.residual.min(),
                           top=1.05 * voigt.residual.max())
        main_axes.yaxis.set_minor_locator(AutoMinorLocator())
        main_axes.yaxis.get_major_ticks()[0].set_visible(False)
        main_axes.set_ylabel("Residual")

        plt.savefig("plots/voigt_residuals.pdf", bbox_inches='tight')

        # ========= Voigt and Polynomial Residuals =========

        plt.clf()
        plt.axes([0.1, 0.3, 0.85, 0.65])  # (left, bottom, width, height)
        main_axes = plt.gca()

        main_axes.set_title("Voigt and Polynomial Model Residuals")

        main_axes.errorbar(x=bin_centres, y=voigt_poly.residual, fmt='ko')

        main_axes.set_xlim(left=h_xrange_min, right=bins[-1])
        main_axes.xaxis.set_minor_locator(
            AutoMinorLocator())  # separation of x axis minor ticks
        main_axes.tick_params(which='both',
                              direction='in',
                              top=True,
                              labeltop=False,
                              right=True,
                              labelright=False)

        main_axes.set_xlabel(r'$M_Z$ GeV')
        main_axes.xaxis.get_major_ticks()[0].set_visible(False)

        main_axes.set_ylim(bottom=1.05 * voigt_poly.residual.min(),
                           top=1.05 * voigt_poly.residual.max())
        main_axes.yaxis.set_minor_locator(AutoMinorLocator())
        main_axes.yaxis.get_major_ticks()[0].set_visible(False)
        main_axes.set_ylabel("Residual")

        plt.savefig("plots/voigt_poly_residuals.pdf", bbox_inches='tight')

    if load_histograms: return None, None
    return signal_x, mc_x_tot
Exemplo n.º 36
0
def pre_edge_baseline(energy,
                      norm=None,
                      group=None,
                      form='lorentzian',
                      emin=None,
                      emax=None,
                      elo=None,
                      ehi=None,
                      with_line=True,
                      _larch=None):
    """remove baseline from main edge over pre edge peak region

    This assumes that pre_edge() has been run successfully on the spectra
    and that the spectra has decent pre-edge subtraction and normalization.

    Arguments
    ----------
    energy:    array of x-ray energies, in eV, or group (see note 1)
    norm:      array of normalized mu(E)
    group:     output group
    elo:       low energy of pre-edge peak region to not fit baseline [e0-20]
    ehi:       high energy of pre-edge peak region ot not fit baseline [e0-10]
    emax       max energy (eV) to use for baesline fit [e0-5]
    emin:      min energy (eV) to use for baesline fit [e0-40]
    form:      form used for baseline (see note 2)  ['lorentzian']
    with_line: whether to include linear component in baseline ['True']


    Returns
    -------
      None

    A group named 'prepeaks' will be created in the output group, with the following
    attributes:
        energy        energy array for pre-edge peaks = energy[emin-eneg:emax+epos]
        baseline      fitted baseline array over pre-edge peak energies
        mu            baseline-subtraced spectrum over pre-edge peak energies
        dmu           estimated uncertainty in mu from fit
        centroid      estimated centroid of pre-edge peaks (see note 3)
        peak_energies list of predicted peak energies (see note 4)
        fit_details   details of fit to extract pre-edge peaks.

    (if the output group is None, _sys.xafsGroup will be written to)

    Notes
    -----
     1 If the first argument is a Group, it must contain 'energy' and 'norm'.
       See First Argrument Group in Documentation

     2 A function will be fit to the input mu(E) data over the range between
       [emin:elo] and [ehi:emax], ignorng the pre-edge peaks in the
       region [elo:ehi].  The baseline function is specified with the `form`
       keyword argument, which can be one of
           'lorentzian', 'gaussian', or 'voigt',
       with 'lorentzian' the default.  In addition, the `with_line` keyword
       argument can be used to add a line to this baseline function.

     3 The value calculated for `prepeaks.centroid`  will be found as
         (prepeaks.energy*prepeaks.mu).sum() / prepeaks.mu.sum()
     4 The values in the `peak_energies` list will be predicted energies
       of the peaks in `prepeaks.mu` as found by peakutils.

    """
    energy, norm, group = parse_group_args(energy,
                                           members=('energy', 'norm'),
                                           defaults=(norm, ),
                                           group=group,
                                           fcn_name='pre_edge_baseline')
    if len(energy.shape) > 1:
        energy = energy.squeeze()
    if len(norm.shape) > 1:
        norm = norm.squeeze()

    dat_emin, dat_emax = min(energy), max(energy)

    dat_e0 = getattr(group, 'e0', -1)

    if dat_e0 > 0:
        if emin is None:
            emin = dat_e0 - 30.0
        if emax is None:
            emax = dat_e0 - 1.0
        if elo is None:
            elo = dat_e0 - 15.0
        if ehi is None:
            ehi = dat_e0 - 5.0
        if emin < 0:
            emin += dat_e0
        if elo < 0:
            elo += dat_e0
        if emax < dat_emin:
            emax += dat_e0
        if ehi < dat_emin:
            ehi += dat_e0

    if emax is None or emin is None or elo is None or ehi is None:
        raise ValueError("must provide emin and emax to pre_edge_baseline")

    # get indices for input energies
    if emin > emax:
        emin, emax = emax, emin
    if emin > elo:
        elo, emin = emin, elo
    if ehi > emax:
        ehi, emax = emax, ehi

    imin = index_of(energy, emin)
    ilo = index_of(energy, elo)
    ihi = index_of(energy, ehi)
    imax = index_of(energy, emax)

    # build xdat, ydat: dat to fit (skipping pre-edge peaks)
    xdat = np.concatenate((energy[imin:ilo + 1], energy[ihi:imax + 1]))
    ydat = np.concatenate((norm[imin:ilo + 1], norm[ihi:imax + 1]))

    # build fitting model: note that we always include
    # a LinearModel but may fix slope and intercept
    form = form.lower()
    if form.startswith('voig'):
        model = VoigtModel()
    elif form.startswith('gaus'):
        model = GaussianModel()
    else:
        model = LorentzianModel()

    model += LinearModel()
    params = model.make_params(amplitude=1.0,
                               sigma=2.0,
                               center=emax,
                               intercept=0,
                               slope=0)
    params['amplitude'].min = 0.0
    params['sigma'].min = 0.25
    params['sigma'].max = 50.0
    params['center'].max = emax + 25.0
    params['center'].min = emax - 25.0

    if not with_line:
        params['slope'].vary = False
        params['intercept'].vary = False

    # run fit
    result = model.fit(ydat, params, x=xdat)

    # energy including pre-edge peaks, for output
    edat = energy[imin:imax + 1]

    # get baseline and resulting mu over edat range
    bline = result.eval(result.params, x=edat)
    mu = norm[imin:imax + 1] - bline

    # uncertainty in mu includes only uncertainties in baseline fit
    dmu = result.eval_uncertainty(result.params, x=edat)

    # estimate centroid and its uncertainty
    cen = (edat * mu).sum() / mu.sum()
    cen_plus = (edat * (mu + dmu)).sum() / (mu + dmu).sum()
    cen_minus = (edat * (mu - dmu)).sum() / (mu - dmu).sum()
    dcen = abs(cen_minus - cen_plus) / 2.0

    # locate peak positions
    peak_energies = []
    if HAS_PEAKUTILS:
        peak_ids = peakutils.peak.indexes(mu, thres=0.05, min_dist=2)
        peak_energies = [edat[pid] for pid in peak_ids]

    group = set_xafsGroup(group, _larch=_larch)
    group.prepeaks = Group(energy=edat,
                           mu=mu,
                           delta_mu=dmu,
                           baseline=bline,
                           centroid=cen,
                           delta_centroid=dcen,
                           peak_energies=peak_energies,
                           fit_details=result,
                           emin=emin,
                           emax=emax,
                           elo=elo,
                           ehi=ehi,
                           form=form,
                           with_line=with_line)
    return
Exemplo n.º 37
0
y = data[:, 1]

gmodel = GaussianModel()
gmodel.guess_starting_values(y, x=x)
gresult = gmodel.fit(y, x=x)

print 'With Gaussian: '
print fit_report(gresult.params, min_correl=0.25)
print 'Chi-square = %.3f, Reduced Chi-square = %.3f' % (gresult.chisqr, gresult.redchi)
plt.plot(x, y,         'k')
plt.plot(x, 10*(y-gresult.best_fit), 'r-')


vmodel = VoigtModel()
vmodel.guess_starting_values(y, x=x)
vresult = vmodel.fit(y, x=x)

print 'With Voigt: '
print fit_report(vresult.params, min_correl=0.25)
print 'Chi-square = %.3f, Reduced Chi-square = %.3f' % (vresult.chisqr, vresult.redchi)

plt.plot(x, 10*(y-vresult.best_fit), 'b-')


vmodel.params['gamma'].vary = True
vmodel.params['gamma'].expr = None

vresult2 = vmodel.fit(y, x=x)

print 'With Voigt, varying gamma: '
print fit_report(vresult2.params, min_correl=0.25)