def fitting(filename):
    data = np.genfromtxt(filename)
    x = data[:, 0]
    y = data[:, 1]
    lin_shift = LinearModel(prefix='lin_')
    pars = lin_shift.guess(y, x=x)

    voight1 = VoigtModel(prefix='v1_')
    pars.update(voight1.make_params())

    pars['v1_center'].set(-0.65)
    pars['v1_sigma'].set(0.1)
    pars['v1_gamma'].set(0.1)
    pars['v1_amplitude'].set(-0.4)

    voight2 = VoigtModel(prefix='v2_')
    pars.update(voight2.make_params())

    pars['v2_center'].set(0)
    pars['v2_sigma'].set(0.1)
    pars['v2_gamma'].set(0.1)
    pars['v2_amplitude'].set(-1.0)

    voight3 = VoigtModel(prefix='v3_')
    pars.update(voight3.make_params())

    pars['v3_center'].set(0.75)
    pars['v3_sigma'].set(0.5)
    pars['v3_gamma'].set(0.5)
    pars['v3_amplitude'].set(-1.4)

    voight4 = VoigtModel(prefix='v4_')
    pars.update(voight4.make_params())

    pars['v4_center'].set(1.1)
    pars['v4_sigma'].set(0.15)
    pars['v4_gamma'].set(0.15)
    pars['v4_amplitude'].set(-0.6)

    mod = lin_shift + voight1 + voight2 + voight3 + voight4
    init = mod.eval(pars, x=x)
    out = mod.fit(y, pars, x=x)
    y_fit = out.model.func(x, **out.best_values)

    # print(out.fit_report())
    out.plot(datafmt='g-', fitfmt='r--')
    plt.show()
Exemple #2
0
def call_voigt(x, y, cen, count, pars):
	label='v'+str(count)+'_'
	voigt = VoigtModel(prefix=label)
	pars.update(voigt.make_params())
	pars[label+'center'].set(cen, min=cen-0.01, max=cen+0.01)
	pars[label+'amplitude'].set(-0.5, min=-10., max=0.0001)
	pars[label+'sigma'].set(0.1, min=0.005, max=0.25)
	pars[label+'gamma'].set(value=0.7, vary=True, expr='')
	return voigt
def call_voigt(x, y, cen, count, pars):
	label='v'+str(count)+'_'
	voigt = VoigtModel(prefix=label)
	pars.update(voigt.make_params())
	pars[label+'center'].set(cen, min=cen-0.01, max=cen+0.01)
	pars[label+'amplitude'].set(0, min=-(max(y)-min(y))*1.5, max=0.0001)
	pars[label+'sigma'].set(fw_set/4, min=0.005, max=fw_set/2.3548)
	pars[label+'gamma'].set(value=fw_set/4, vary=True, expr='')
	return voigt
Exemple #4
0
def fitsample(data, theta_initial, theta_final):        
    
    
    x = data[:,0]
    y = data[:,1]
    m = (x > theta_initial) & (x < theta_final)
    x_fit = x[m]
    y_fit = y[m]

    

    pseudovoigt1 = VoigtModel(prefix = 'pv1_')    
    pars= pseudovoigt1.make_params()
    pars['pv1_center'].set(13.5, min = 13.4, max = 13.6)
    pars['pv1_sigma'].set(0.05, min= 0.01, max = 0.1)
    pars['pv1_amplitude'].set(70, min = 1, max = 100)
    #pars['pv1_fraction'].set(0.5)
    

    lorentz2 = LorentzianModel(prefix = 'lor2_')
    pars.update(lorentz2.make_params())
    pars['lor2_center'].set(13.60, min = 13.4, max = 13.9)
    pars['lor2_sigma'].set(0.1, min= 0.01)
    pars['lor2_amplitude'].set(10, min = 1, max = 50 )
    #pars['lor2_fraction'].set(0.5)
    
    line1 = LinearModel(prefix ='l1_')
    pars.update(line1.make_params())
    pars['l1_slope'].set(0)
    pars['l1_intercept'].set(240, min = 200, max = 280)

    
    
    mod = pseudovoigt1 + lorentz2 + line1
    v = pars.valuesdict()
     
    result = mod.fit(y_fit, pars, x=x_fit)    

    #print(result.fit_report())    
    pv1_pos = result.params['pv1_center'].value
    pv1_height = result.params['pv1_height'].value
    lor2_pos = result.params['lor2_center'].value
    lor2_height = result.params['lor2_height'].value
    #peak_area = pars['gau1_fwhm'].value*peak_amp
    #plt.xlim([theta_initial, theta_final])
    #plt.ylim([100, 500])
    #plt.semilogy(x_fit, y_fit, 'bo')
    
    #plt.semilogy (x_fit, result.init_fit, 'k--')    
    #plt.semilogy(x_fit, result.best_fit, 'r-')
    #plt.show()
    return pv1_pos, pv1_height, lor2_pos, lor2_height
Exemple #5
0
def fit_one_Voigt(x_lst,y_lst, pre):
    '''
    Fits one Pseudo Voigt returns the 
    results object
    '''
    x_lst = np.asarray(x_lst)
    y_lst = np.asarray(y_lst)
    
    mod = VoigtModel(prefix = pre, independent_vars=['x'],nan_policy='raise')
    
    # here we set up the peak fitting guess. Then the peak fitter will make a parameter object out of them
    mod.set_param_hint(pre+'amplitude', value = 4 * np.max(y_lst), min = 3*np.max(y_lst), max = 7*np.max(y_lst), vary=True)
    # mod.set_param_hint(prefp+'center', value = x_max, min = x_max*(1-wiggle_room), max = x_max*(1+wiggle_room),vary=True)
    mod.set_param_hint(pre+'center', value = x_lst[np.argmax(y_lst)], vary=True)
    # Basically FWHM/3.6
    w_guess = 2
    mod.set_param_hint(pre+'sigma', value = w_guess, min = 0, max = 5*w_guess,vary=True)
    
    result = mod.fit(y_lst, x = x_lst, params = mod.make_params())
    
    return result
Exemple #6
0
def fit_voigt(ax, spectra, args):
    fit_range = args.pl_range
    wl = spectra[:, 0]
    sp = spectra[:, 2]

    wl_fit = wl[(wl > fit_range[0]) & (wl < fit_range[1])]
    sp_fit = sp[(wl > fit_range[0]) & (wl < fit_range[1])]

    mod = VoigtModel() + ConstantModel()
    pars = mod.make_params(amplitude=np.max(sp_fit),
                           center=wl_fit[np.argmax(sp_fit)],
                           sigma=10,
                           gamma=10,
                           c=0)

    out = mod.fit(sp_fit, pars, x=wl_fit)

    ax.plot(wl_fit, out.best_fit, 'k--', alpha=0.8)
    print(f'Peak center = {out.params["center"].value:.2f} nm \n'
          f'FWHM = {out.params["fwhm"].value:.2f} nm')

    return out.params['center'].value, out.params['fwhm'].value,
#!/usr/bin/env python
from numpy import loadtxt
from lmfit import fit_report
from lmfit.models import GaussianModel, VoigtModel
import matplotlib.pyplot as plt

data = loadtxt('test_peak.dat')
x = data[:, 0]
y = data[:, 1]

mod = VoigtModel()
params = mod.make_params()

for par in params.values():
    print(par)

out1 = mod.fit(y, params, x=x)

print('With Voigt: ')
print(fit_report(out1.params, min_correl=0.25))
print('Chi-square = %.3f, Reduced Chi-square = %.3f' %
      (out1.chisqr, out1.redchi))

plt.plot(x, y, 'ko')
plt.plot(x, out1.best_fit, 'b-')

# make gamma variable
params['gamma'].value = 0.7111
params['gamma'].vary = True
params['gamma'].expr = None
Exemple #8
0
for filename in files:
    x, y = np.loadtxt("./19-04-26/" + filename, unpack=True)
    x = 1239.8 / x

    if counter == 25:
        filtro_total = filtro[1]
    elif counter == 29:
        filtro_total = filtro[2]
    y = (y - a) * filtro_total

    #Imprimo para llevar un control por la terminal
    print(filename)

    #Semillas que se dan iterativamente
    pars = L0_mod.make_params(center=center[0],
                              amplitude=amplitude[0],
                              sigma=sigma[0])
    pars += L1_mod.make_params(center=center[1],
                               amplitude=amplitude[1],
                               sigma=sigma[1])
    pars += L2_mod.make_params(center=center[2],
                               amplitude=amplitude[2],
                               sigma=sigma[2])
    #pars+=L3_mod.make_params(center=center[3], amplitude= amplitude[3], sigma=sigma[3])
    pars += c_mod.make_params(intercept=c, slope=slope)

    #Defino funcion
    mod = L0_mod + c_mod + L1_mod + L2_mod  #+L3_mod
    #Fiteo
    out = mod.fit(y, pars, x=x)
Exemple #9
0
def pre_edge_baseline(energy, norm=None, group=None, form='lorentzian',
                      emin=None, emax=None, elo=None, ehi=None,
                      with_line=True, _larch=None):
    """remove baseline from main edge over pre edge peak region

    This assumes that pre_edge() has been run successfully on the spectra
    and that the spectra has decent pre-edge subtraction and normalization.

    Arguments
    ----------
    energy:    array of x-ray energies, in eV, or group (see note 1)
    norm:      array of normalized mu(E)
    group:     output group
    elo:       low energy of pre-edge peak region to not fit baseline [e0-20]
    ehi:       high energy of pre-edge peak region ot not fit baseline [e0-10]
    emax:      max energy (eV) to use for baesline fit [e0-5]
    emin:      min energy (eV) to use for baesline fit [e0-40]
    form:      form used for baseline (see note 2)  ['lorentzian']
    with_line: whether to include linear component in baseline ['True']


    Returns
    -------
      None

    A group named 'prepeaks' will be created in the output group, with the following
    attributes:
        energy        energy array for pre-edge peaks = energy[emin:emax]
        baseline      fitted baseline array over pre-edge peak energies
        norm          spectrum over pre-edge peak energies
        peaks         baseline-subtraced spectrum over pre-edge peak energies
        centroid      estimated centroid of pre-edge peaks (see note 3)
        peak_energies list of predicted peak energies (see note 4)
        fit_details   details of fit to extract pre-edge peaks.

    (if the output group is None, _sys.xafsGroup will be written to)

    Notes
    -----
     1 If the first argument is a Group, it must contain 'energy' and 'norm'.
       See First Argrument Group in Documentation

     2 A function will be fit to the input mu(E) data over the range between
       [emin:elo] and [ehi:emax], ignorng the pre-edge peaks in the
       region [elo:ehi].  The baseline function is specified with the `form`
       keyword argument, which can be one of
           'lorentzian', 'gaussian', or 'voigt',
       with 'lorentzian' the default.  In addition, the `with_line` keyword
       argument can be used to add a line to this baseline function.

     3 The value calculated for `prepeaks.centroid`  will be found as
         (prepeaks.energy*prepeaks.peaks).sum() / prepeaks.peaks.sum()
     4 The values in the `peak_energies` list will be predicted energies
       of the peaks in `prepeaks.peaks` as found by peakutils.

    """
    energy, norm, group = parse_group_args(energy, members=('energy', 'norm'),
                                           defaults=(norm,), group=group,
                                           fcn_name='pre_edge_baseline')

    prepeaks_setup(energy, norm=norm, group=group, emin=emin, emax=emax,
                   elo=elo, ehi=ehi, _larch=_larch)

    emin = group.prepeaks.emin
    emax = group.prepeaks.emax
    elo = group.prepeaks.elo
    ehi = group.prepeaks.ehi

    dele = 1.e-13 + min(np.diff(energy))/5.0

    imin = index_of(energy, emin+dele)
    ilo  = index_of(energy, elo+dele)
    ihi  = index_of(energy, ehi+dele)
    imax = index_of(energy, emax+dele)

    # build xdat, ydat: dat to fit (skipping pre-edge peaks)
    xdat = np.concatenate((energy[imin:ilo+1], energy[ihi:imax+1]))
    ydat = np.concatenate((norm[imin:ilo+1], norm[ihi:imax+1]))


    # build fitting model: note that we always include
    # a LinearModel but may fix slope and intercept
    form = form.lower()
    if form.startswith('voig'):
        model = VoigtModel()
    elif form.startswith('gaus'):
        model = GaussianModel()
    else:
        model = LorentzianModel()

    model += LinearModel()
    params = model.make_params(amplitude=1.0, sigma=2.0,
                               center=emax,
                               intercept=0, slope=0)
    params['amplitude'].min =  0.0
    params['sigma'].min     =  0.25
    params['sigma'].max     = 50.0
    params['center'].max    = emax + 25.0
    params['center'].min    = emax - 25.0

    if not with_line:
        params['slope'].vary = False
        params['intercept'].vary = False

    result = model.fit(ydat, params, x=xdat)

    cen = dcen = 0.
    peak_energies = []

    # energy including pre-edge peaks, for output
    edat = energy[imin: imax+1]
    norm = norm[imin:imax+1]
    bline = peaks = dpeaks = norm*0.0

    # get baseline and resulting norm over edat range
    if result is not None:
        bline = result.eval(result.params, x=edat)
        peaks = norm-bline

        # estimate centroid
        cen = (edat*peaks).sum() / peaks.sum()

        # uncertainty in norm includes only uncertainties in baseline fit
        # and uncertainty in centroid:
        try:
            dpeaks = result.eval_uncertainty(result.params, x=edat)
        except:
            dbpeaks = 0.0

        cen_plus = (edat*(peaks+dpeaks)).sum()/ (peaks+dpeaks).sum()
        cen_minus = (edat*(peaks-dpeaks)).sum()/ (peaks-dpeaks).sum()
        dcen = abs(cen_minus - cen_plus) / 2.0

        # locate peak positions
        if HAS_PEAKUTILS:
            peak_ids = peakutils.peak.indexes(peaks, thres=0.05, min_dist=2)
            peak_energies = [edat[pid] for pid in peak_ids]

    group = set_xafsGroup(group, _larch=_larch)
    group.prepeaks = Group(energy=edat, norm=norm, baseline=bline,
                           peaks=peaks, delta_peaks=dpeaks,
                           centroid=cen, delta_centroid=dcen,
                           peak_energies=peak_energies,
                           fit_details=result,
                           emin=emin, emax=emax, elo=elo, ehi=ehi,
                           form=form, with_line=with_line)
    return
Exemple #10
0
                     alpha2, gamma2, amp2, ctr3, alpha3, gamma3, amp3, ctr4,
                     alpha4, gamma4, amp4):
    val = slope*x+intercept + Voigt(x,ctr1,alpha1,gamma1,amp1) + Voigt(x,ctr2,alpha2,gamma2,amp2)\
    +Voigt(x,ctr3,alpha3,gamma3,amp3) + Voigt(x,ctr4,alpha4,gamma4,amp4)
    return val


spectra = fitting_function(time, 0.1, 2, 20, 3, 3, -70, 45, 2, 2, -90, 70, 1,
                           1, -150, 85, 2, 2, -60)
spectra = [val + 2 * np.random.rand() for val in spectra]

lin_shift = LinearModel(prefix='lin_')
pars = lin_shift.guess(spectra, x=time)

voight1 = VoigtModel(prefix='v1_')
pars.update(voight1.make_params())

pars['v1_center'].set(20)
pars['v1_sigma'].set(3)
pars['v1_gamma'].set(3)
pars['v1_amplitude'].set(-70)

voight2 = VoigtModel(prefix='v2_')
pars.update(voight2.make_params())

pars['v2_center'].set(45)
pars['v2_sigma'].set(2)
pars['v2_gamma'].set(2)
pars['v2_amplitude'].set(-90)

voight3 = VoigtModel(prefix='v3_')
Exemple #11
0
def fit_two_Voigt(x_lst,y_lst,x_min_flt,x_max_flt,print_all_fits_bool,place_to_save_str):
    '''
    x_lst = x axis
    y_lst = spectra to fit
    first = beginning of fitting regions
    last = end of fitting region
    print_all_fits = Bool, do you want to save all plots
    place_to_save = string that is the filename where we're saving the data
    
    This takes the spectra and fits two Lorentzian curves to it. 
    Returns dictionary of fit values 
    Parameters have prefixes "one" for first V, "two" for second V, "c" for constant
    '''
    
    # Follows
    # http://lmfit.github.io/lmfit-py/builtin_models.html#example-1-fit-peaked-data-to-gaussian-lorentzian-and-voigt-profiles
    # and 
    # http://cars9.uchicago.edu/software/python/lmfit_MinimizerResult/builtin_models.html
    # and 
    # the alpha version of this code
    # Figure out Composit Model
    
    import numpy as np
    # for smoothing the curves
    import scipy.interpolate as interp #import splev 
    
    from lmfit.models import VoigtModel, ConstantModel
    
    # Restrict the fit
    x_fit = []
    y_fit = []
    
    for x,y in zip(x_lst, y_lst):
        if x_min_flt < x < x_max_flt:
            x_fit.append(float(x))
            y_fit.append(float(y))
    
    x_fit = np.asarray(x_fit)
    y_fit = np.asarray(y_fit)   
    
    # now we find the parameters using the - d^2/dx^2
    ysmooth = interp.interp1d(x_fit, y_fit, kind='cubic')
    # differentiate x 2
    yp = np.gradient(ysmooth(x_fit))
    ypp = np.gradient(yp)
    # we want the peaks of -d2/dx2 
    ypp = np.asarray([-x for x in ypp])
    
    '''
    *******************************************************
    Section of bad code that it'd take too long to do right
    *******************************************************
    '''
    # % of wavelength you want the peak centers to move 
    wiggle_room = .05
    w_guess = 1 # sigma
    pref1 = 'one'
    pref2 = 'two'
    prefo = 'off'
    
    # if the fancy shit doesn't work, this is how far in index
    # we shift the 2nd peak and max over
    doesnt_work_shift = 10
    '''
    *******************************************************
    Section of bad code that it'd take too long to do right
    *******************************************************
    '''
    
    # this is the money
    # defines the model that'll be fit
    peak1 = VoigtModel(prefix = pref1, independent_vars=['x'],nan_policy='raise')
    peak2 = VoigtModel(prefix = pref2, independent_vars=['x'],nan_policy='raise')
    offset = ConstantModel(prefix=prefo, independent_vars=['x'],nan_policy='raise')
    
    mod = peak1 + peak2 + offset
    
    # guess parameters
    x_max = x_fit[np.argmax(ypp)]
    y_max = y_fit[np.argmax(ypp)]
    
    # peak #1 
    # here we set up the peak fitting guess. Then the peak fitter will make a parameter object out of them
    mod.set_param_hint(pref1+'amplitude', value = 4*y_max, min=y_max*.8,max = y_max*9,vary=True)
    
    mod.set_param_hint(pref1+'center', value = x_max, min = x_max*(1-wiggle_room), max = x_max*(1+wiggle_room),vary=True)
    
    mod.set_param_hint(pref1+'sigma', value = w_guess, min = 0, max = 5*w_guess,vary=True)
    
    # Change gama maybe
    mod.set_param_hint(pref1+'gamma', value = 1, vary=True)
    
    # peak #2
    x_trunk = []
    y_trunk = []
    ypp_trunk = []
    try:
        for a,b,c in zip(x_fit.tolist(),y_fit.tolist(),ypp.tolist()):
            '''
            BAD CODE MAKE THIS BETTER
            '''
            if x_max + 8 < a < x_max + 12:
                x_trunk.append(a)
                y_trunk.append(b)
                ypp_trunk.append(c)
        x_trunk = np.asarray(x_trunk)
        y_trunk = np.asarray(y_trunk)
        ypp_trunk = np.asarray(ypp_trunk)
        
        x_max_2 = x_trunk[np.argmax(ypp_trunk)]
        y_max_2 = y_trunk[np.argmax(ypp_trunk)]
        
    except ValueError:
        x_max_2 = x_trunk[np.argmax(ypp) + doesnt_work_shift]
        y_max_2 = y_trunk[np.argmax(ypp) + doesnt_work_shift]
    
    # add peak 2 paramaters 
    mod.set_param_hint(pref2+'amplitude', value = 4*y_max_2, min=y_max_2*.8,max = y_max_2*9,vary=True)
    # changed the bounds to be near other peak
    mod.set_param_hint(pref2+'center', value = x_max_2, min = x_max+8, max = x_max+14,vary=True)
    mod.set_param_hint(pref2+'sigma', value = w_guess, min = 0, max = 5*w_guess,vary=True)
    
    # Change gama maybe
    mod.set_param_hint(pref2+'gamma', value = 1, vary=False)
    
    # constant offest
    mod.set_param_hint(prefo+'c', value = y_fit[-1], min = 0, max = 5*y_fit[-1],vary=False)
    
    # this does the fitting
    # the params = mod.ma... is what initializes the parameters
    result = mod.fit(y_fit, x=x_fit, params = mod.make_params())
    
    # If print all fits ... 
    if print_all_fits:
        x_dense = np.arange(x_min_flt,x_max_flt,(x_max_flt-x_min_flt)/300.0).tolist()
        
        result.plot_fit(xlabel='Inv Cm', ylabel='counts',datafmt = 'xb', numpoints=len(x_fit)*10)
        
        '''
        Here we make paramaters for peak 1 and 2
        '''
        for x in result.best_values:
            if pref1 in x:
                peak1.set_param_hint(x, value = result.best_values[str(x)])
            elif pref2 in x:
                peak2.set_param_hint(x, value = result.best_values[str(x)])
            else:
                peak1.set_param_hint(x, value = result.best_values[str(x)])
                peak2.set_param_hint(x, value = result.best_values[str(x)])
        
        comp = [peak1.eval(x=yy, params=peak1.make_params()) for yy in x_dense]
        plt.plot(x_dense,comp, 'green', label = None)
        
        comp = [peak2.eval(x=yy, params=peak2.make_params()) for yy in x_dense]
        plt.plot(x_dense, comp, 'green', label= None)
        plt.title("Fit vs Data")
        plt.ylim(0, 1.1*np.max(y_fit))
        plt.legend()
        plt.savefig(place_to_save_str)
        plt.clf()
        
    return result.best_values
Exemple #12
0
def pre_edge_baseline(energy,
                      norm=None,
                      group=None,
                      form='lorentzian',
                      emin=None,
                      emax=None,
                      elo=None,
                      ehi=None,
                      with_line=True,
                      _larch=None):
    """remove baseline from main edge over pre edge peak region

    This assumes that pre_edge() has been run successfully on the spectra
    and that the spectra has decent pre-edge subtraction and normalization.

    Arguments:
       energy (ndarray or group): array of x-ray energies, in eV, or group (see note 1)
       norm (ndarray or group):   array of normalized mu(E)
       group (group or None):     output group
       elo (float or None):       low energy of pre-edge peak region to not fit baseline [e0-20]
       ehi (float or None):       high energy of pre-edge peak region ot not fit baseline [e0-10]
       emax (float or None):      max energy (eV) to use for baesline fit [e0-5]
       emin (float or None):      min energy (eV) to use for baesline fit [e0-40]
       form (string):             form used for baseline (see note 2)  ['lorentzian']
       with_line (bool):          whether to include linear component in baseline ['True']
       _larch (larch instance or None):  current larch session.


    A function will be fit to the input mu(E) data over the range between
    [emin:elo] and [ehi:emax], ignorng the pre-edge peaks in the region
    [elo:ehi].  The baseline function is specified with the `form` keyword
    argument, which can be one of 'lorentzian', 'gaussian', or 'voigt',
    with 'lorentzian' the default.  In addition, the `with_line` keyword
    argument can be used to add a line to this baseline function.

    A group named 'prepeaks' will be used or created in the output group, containing

        ==============   ===========================================================
         attribute        meaning
        ==============   ===========================================================
         energy           energy array for pre-edge peaks = energy[emin:emax]
         energy           energy array for pre-edge peaks = energy[emin:emax]
         baseline         fitted baseline array over pre-edge peak energies
         norm             spectrum over pre-edge peak energies
         peaks            baseline-subtraced spectrum over pre-edge peak energies
         centroid         estimated centroid of pre-edge peaks (see note 3)
         peak_energies    list of predicted peak energies (see note 4)
         fit_details      details of fit to extract pre-edge peaks.
        ==============   ===========================================================

    Notes:
       1. Supports :ref:`First Argument Group` convention, requiring group members `energy` and `norm`
       2. Supports :ref:`Set XAFS Group` convention within Larch or if `_larch` is set.
       3. The value calculated for `prepeaks.centroid`  will be found as
          (prepeaks.energy*prepeaks.peaks).sum() / prepeaks.peaks.sum()
       4. The values in the `peak_energies` list will be predicted energies
          of the peaks in `prepeaks.peaks` as found by peakutils.

    """
    energy, norm, group = parse_group_args(energy,
                                           members=('energy', 'norm'),
                                           defaults=(norm, ),
                                           group=group,
                                           fcn_name='pre_edge_baseline')

    prepeaks_setup(energy,
                   norm=norm,
                   group=group,
                   emin=emin,
                   emax=emax,
                   elo=elo,
                   ehi=ehi,
                   _larch=_larch)

    emin = group.prepeaks.emin
    emax = group.prepeaks.emax
    elo = group.prepeaks.elo
    ehi = group.prepeaks.ehi

    dele = 1.e-13 + min(np.diff(energy)) / 5.0

    imin = index_of(energy, emin + dele)
    ilo = index_of(energy, elo + dele)
    ihi = index_of(energy, ehi + dele)
    imax = index_of(energy, emax + dele)

    # build xdat, ydat: dat to fit (skipping pre-edge peaks)
    xdat = np.concatenate((energy[imin:ilo + 1], energy[ihi:imax + 1]))
    ydat = np.concatenate((norm[imin:ilo + 1], norm[ihi:imax + 1]))

    # build fitting model: note that we always include
    # a LinearModel but may fix slope and intercept
    form = form.lower()
    if form.startswith('voig'):
        model = VoigtModel()
    elif form.startswith('gaus'):
        model = GaussianModel()
    else:
        model = LorentzianModel()

    model += LinearModel()
    params = model.make_params(amplitude=1.0,
                               sigma=2.0,
                               center=emax,
                               intercept=0,
                               slope=0)
    params['amplitude'].min = 0.0
    params['sigma'].min = 0.25
    params['sigma'].max = 50.0
    params['center'].max = emax + 25.0
    params['center'].min = emax - 25.0

    if not with_line:
        params['slope'].vary = False
        params['intercept'].vary = False

    result = model.fit(ydat, params, x=xdat)

    cen = dcen = 0.
    peak_energies = []

    # energy including pre-edge peaks, for output
    edat = energy[imin:imax + 1]
    norm = norm[imin:imax + 1]
    bline = peaks = dpeaks = norm * 0.0

    # get baseline and resulting norm over edat range
    if result is not None:
        bline = result.eval(result.params, x=edat)
        peaks = norm - bline

        # estimate centroid
        cen = (edat * peaks).sum() / peaks.sum()

        # uncertainty in norm includes only uncertainties in baseline fit
        # and uncertainty in centroid:
        try:
            dpeaks = result.eval_uncertainty(result.params, x=edat)
        except:
            dbpeaks = 0.0

        cen_plus = (edat * (peaks + dpeaks)).sum() / (peaks + dpeaks).sum()
        cen_minus = (edat * (peaks - dpeaks)).sum() / (peaks - dpeaks).sum()
        dcen = abs(cen_minus - cen_plus) / 2.0

        # locate peak positions
        if HAS_PEAKUTILS:
            peak_ids = peakutils.peak.indexes(peaks, thres=0.05, min_dist=2)
            peak_energies = [edat[pid] for pid in peak_ids]

    group = set_xafsGroup(group, _larch=_larch)
    group.prepeaks = Group(energy=edat,
                           norm=norm,
                           baseline=bline,
                           peaks=peaks,
                           delta_peaks=dpeaks,
                           centroid=cen,
                           delta_centroid=dcen,
                           peak_energies=peak_energies,
                           fit_details=result,
                           emin=emin,
                           emax=emax,
                           elo=elo,
                           ehi=ehi,
                           form=form,
                           with_line=with_line)
    return
Exemple #13
0
def fit_vogte(x, y, p1, n=1, equal_widths=False, make_fit=True):
    x = array(x)
    y = array(y)
    sub_models = []

    for i in range(n):
        prefix = 'v' + str(i + 1) + '_'
        vi = VoigtModel(prefix=prefix)
        if i == 0:
            parameters = vi.make_params()
        else:
            parameters.update(vi.make_params())

        center = p1[0][i]
        A = p1[1][i]
        gamma = p1[2][i]
        if equal_widths: gamma = p1[2][0]

        #We correct in order to use our own convention
        gammap = gamma / 2.0
        Ap = A * pi * gammap
        if len(p1) == 4:
            #We use the suggested sigma if it is provided.
            sigma = p1[3][i]
            if equal_widths: sigma = p1[3][0]
            Ap = A / (-1 / 2.0 * sqrt(2.0))
            Ap = Ap / ((erf(1 / 2.0 * sqrt(2.0) * gammap / sigma) - 1.0))
            Ap = Ap / (exp(1 / 2.0 * gammap**2 / sigma**2) /
                       (sqrt(pi) * sigma))
        else:
            sigma = 0.00001

        parameters[prefix + 'center'].set(center)
        parameters[prefix + 'amplitude'].set(Ap, min=0.0)
        if equal_widths and i != 0:
            parameters[prefix + 'sigma'].set(sigma, expr='v1_sigma')
            parameters[prefix + 'gamma'].set(value=gammap,
                                             vary=True,
                                             expr='v1_gamma')
        else:
            parameters[prefix + 'sigma'].set(sigma)
            parameters[prefix + 'gamma'].set(value=gammap, vary=True)

        sub_models += [vi]

    mod = sub_models[0]
    for i in range(1, n):
        mod += sub_models[i]

    #We decide wether to make the fit or return the initial guess.
    if not make_fit:
        init = mod.eval(parameters, x=x)
        return p1 + [[0 for i in range(n)]
                     for j in range(1)], [[0 for i in range(n)]
                                          for j in range(5)], init

    #We make the fit.
    def my_aborting_function(params, iteration, resid, *args, **kws):
        max_iter = 2000
        if iteration > max_iter:
            raise ValueError, 'The maximum number of iterations was reached: ' + str(
                max_iter)

    out = mod.fit(y, parameters, x=x, iter_cb=my_aborting_function)

    var_names = out.result.var_names
    error_bars0 = {}
    for i in var_names:
        ii = var_names.index(i)
        error_bar = sqrt(out.result.covar[ii][ii])
        error_bars0.update({i: error_bar})

    result = out.values

    #We gather the results to return them in the format
    #[[x1,x2,x3,...],[A1,A2,A3,...],[gamma1,gamma2,gamma3,...],
    # [sigma1,sigma2,sigma3,...],[fwhm1,fwhm2,fwhm3,...]]
    fit = [[0 for i in range(n)] for i in range(5)]
    error_bars = [[0 for i in range(n)] for i in range(5)]
    for i in range(n):

        center = result['v' + str(i + 1) + '_center']
        Ap = result['v' + str(i + 1) + '_amplitude']
        gammap = result['v' + str(i + 1) + '_gamma']
        sigma = result['v' + str(i + 1) + '_sigma']

        center_bar = error_bars0['v' + str(i + 1) + '_center']
        Ap_bar = error_bars0['v' + str(i + 1) + '_amplitude']
        if equal_widths:
            gammap_bar = error_bars0['v1_gamma']
            sigma_bar = error_bars0['v1_sigma']
        else:
            gammap_bar = error_bars0['v' + str(i + 1) + '_gamma']
            sigma_bar = error_bars0['v' + str(i + 1) + '_sigma']
        #We will calculate the error bar of the fwhm conservatively:
        fwhm_bar = 2 * gammap_bar + sigma_bar

        #We correct in order to use our own convention
        A = -1 / 2.0 * sqrt(2.0) * Ap * (
            erf(1 / 2.0 * sqrt(2.0) * gammap / sigma) - 1)
        A = A * exp(1 / 2.0 * gammap**2 / sigma**2) / (sqrt(pi) * sigma)
        gamma = 2 * gammap

        #We correct the convention for the error bars. The error bar
        #for the amplitude should be calculated with error-propagating formulae
        #but since it is not of great interest to us, this will suffice for now.
        A_bar = -1 / 2.0 * sqrt(2.0) * Ap_bar * (
            erf(1 / 2.0 * sqrt(2.0) * gammap / sigma) - 1)
        A_bar = A_bar * exp(
            1 / 2.0 * gammap**2 / sigma**2) / (sqrt(pi) * sigma)
        gamma_bar = 2 * gammap_bar

        #We calculate the FWHM according to [1]
        fg = 2.0 * sigma * sqrt(2.0 * log(2.0))
        fl = gamma
        fwhm = 0.5346 * abs(fl) + sqrt(0.2166 * fl**2 + fg**2)

        #We save the results.
        fit[0][i] = center
        fit[1][i] = A
        fit[2][i] = gamma
        fit[3][i] = sigma
        fit[4][i] = fwhm

        error_bars[0][i] = center_bar
        error_bars[1][i] = A_bar
        error_bars[2][i] = gamma_bar
        error_bars[3][i] = sigma_bar
        error_bars[4][i] = fwhm_bar

    fitted_curve = out.best_fit

    return fit, error_bars, fitted_curve
Exemple #14
0
def fit_Voigt_and_step(x_lst,y_lst,x_min_flt,x_max_flt,print_all_fits_bool,place_to_save_str):
    '''
    x_lst = x axis
    y_lst = spectra to fit
    first = beginning of fitting regions
    last = end of fitting region
    print_all_fits = Bool, do you want to save all plots
    place_to_save = string that is the filename where we're saving the data
    
    '''
    import numpy as np
    # for smoothing the curves
    import scipy.interpolate as interp #import splev 
    
    from lmfit.models import VoigtModel, StepModel, ConstantModel
    from lmfit import CompositeModel
    
    # Restrict the fit
    x_fit = []
    y_fit = []
    
    for x,y in zip(x_lst, y_lst):
        if x_min_flt < x < x_max_flt:
            x_fit.append(float(x))
            y_fit.append(float(y))
    
    x_fit = np.asarray(x_fit)
    y_fit = np.asarray(y_fit)   
    
    # now we find the parameters using the - d^2/dx^2
    ysmooth = interp.interp1d(x_fit, y_fit, kind='cubic')
    # differentiate x 2
    yp = np.gradient(ysmooth(x_fit))
    ypp = np.gradient(yp)
    # we want the peaks of -d2/dx2 
    ypp = np.asarray([-x for x in ypp])
    
    '''
    *******************************************************
    Section of bad code that it'd take too long to do right
    *******************************************************
    '''
    step_at = 100
    step_width = 3
    prefp = "one"
    prefs = "stp"
    prefc = 'c'    
    w_guess = 3 # sigma
    '''
    *******************************************************
    Section of bad code that it'd take too long to do right
    *******************************************************
    '''
    
    # this is the money
    # defines the model that'll be fit
    peak = VoigtModel(prefix = prefp, independent_vars=['x'],nan_policy='raise')
    step = StepModel(prefix = prefs, independent_vars=['x'], nan_policy='raise')
    const = ConstantModel(prefix = prefc,independent_vars=['x'], nan_policy='raise', form ='logistic')
    
    mod = peak + step + const
    
    # guess parameters
    x_max = x_fit[np.argmax(y_fit)]
    y_max = y_fit[np.argmax(y_fit)]
    
    # Peak
    # here we set up the peak fitting guess. Then the peak fitter will make a parameter object out of them
    mod.set_param_hint(prefp+'amplitude', value = 4*y_max, min = y_max,max = 30*y_max, vary=True)
    # mod.set_param_hint(prefp+'center', value = x_max, min = x_max*(1-wiggle_room), max = x_max*(1+wiggle_room),vary=True)
    mod.set_param_hint(prefp+'center', value = x_max, vary=True)
    # Basically FWHM/3.6
    mod.set_param_hint(prefp+'sigma', value = w_guess, min = 0, max = 5*w_guess,vary=True)
    
    # Step
    # Step height
    delta = abs(y_fit[-1]-y_fit[0])
    mod.set_param_hint(prefs+'amplitude', value = delta, min = delta*.9, max = delta*1.1, vary=True)
    # Charastic width
    mod.set_param_hint(prefs+'sigma', value = 2,min = 1, max = 3, vary=True)
    # The half way point... 
    mod.set_param_hint(prefs+'center', value = step_at, min = step_at-step_width, max = step_at+step_width, vary = True)
    
    # Constant
    mod.set_param_hint(prefc+'c', value = y_fit[-1], min = 0, max = 2*y_fit[0],vary=True)    
    
    result = mod.fit(y_fit, x=x_fit, params = mod.make_params())
    
    # If print all fits ... 
    if print_all_fits_bool:
        x_dense = np.arange(x_min_flt,x_max_flt,(x_max_flt-x_min_flt)/300.0).tolist()
        
        result.plot_fit(xlabel='Inv Cm', ylabel='counts',datafmt = 'xb', numpoints=len(x_fit)*10)
        
        for x in result.best_values:
            if prefp in x:      # Get peak
                peak.set_param_hint(x, value = result.best_values[str(x)])
            elif prefs in x:    # Get step
                step.set_param_hint(x, value = result.best_values[str(x)])
        
        comp = [result.best_values['cc'] + peak.eval(x=yy, params=peak.make_params()) for yy in x_dense]
        plt.plot(x_dense,comp, 'green', label = None)
        
        comp = [result.best_values['stpamplitude'] + result.best_values['cc']]*len(x_dense)
        plt.plot(x_dense, comp, 'green', label= None)
        
        # comp = [result.best_values['cc'] + step.eval(x=yy, params=step.make_params()) for yy in x_dense]
        # plt.plot(x_dense, comp, 'green', label= None)
        
        plt.title("Fit vs Data")
        plt.legend()
        plt.savefig(place_to_save_str)
        plt.clf()    
    
    return result.best_values
Exemple #15
0
for filename in files:
    x, y = np.loadtxt(direc + filename, unpack=True)
    x = 1239.8 / x

    if counter == 7:
        filtro_total = filtro_3 * filtro_2  #*filtro_feno

    y = (y - a) * filtro_total

    #Imprimo para llevar un control por la terminal
    print(filename)

    #Semillas que se dan iterativamente
    pars = L0_mod.make_params(center=center[0],
                              amplitude=amplitude[0],
                              sigma=sigma[0])
    pars += L1_mod.make_params(center=center[1],
                               amplitude=amplitude[1],
                               sigma=sigma[1])
    pars += L2_mod.make_params(center=center[2],
                               amplitude=amplitude[2],
                               sigma=sigma[2])
    pars += L3_mod.make_params(center=center[3],
                               amplitude=amplitude[3],
                               sigma=sigma[3])
    pars += L4_mod.make_params(center=center[4],
                               amplitude=amplitude[4],
                               sigma=sigma[4])
    pars += L5_mod.make_params(center=center[5],
                               amplitude=amplitude[5],
Exemple #16
0
    x, y = np.loadtxt(direc + filename, unpack=True)
    x = 1239.8 / x

    if counter == 4:
        filtro_total = filtro_3 * filtro_2 * filtro_1
    elif counter == 8:
        filtro_total = filtro_3 * filtro_2 * filtro_1 * negro

    y = (y - a) * filtro_total

    #Imprimo para llevar un control por la terminal
    print(filename)

    #Semillas que se dan iterativamente
    pars = L0_mod.make_params(center=center[0],
                              amplitude=amplitude[0],
                              sigma=sigma[0])
    #pars+=L1_mod.make_params(center=center[1], amplitude= amplitude[1], sigma=sigma[1])

    pars += c_mod.make_params(intercept=c, slope=slope)
    #pars+=c_mod.make_params(c=c)

    #Defino funcion
    mod = L0_mod + c_mod  #+ L1_mod
    #Fiteo
    out = mod.fit(y, pars, x=x)

    #Imprimo el archivo fit log
    fit_log.write("\n\n\n\n\n\n Fitted from: %s %s \n" %
                  ("./19-04-26/", filename))
    now = datetime.datetime.now()
Exemple #17
0
def fit_Voigt_and_step(x_lst,y_lst,x_min_flt,x_max_flt, pre, width_1, width_2, print_all_fits_bool,place_to_save_str):
    '''
    x_lst = x axis
    y_lst = spectra to fit
    first = beginning of fitting regions
    last = end of fitting region
    print_all_fits = Bool, do you want to save all plots
    place_to_save = string that is the filename where we're saving the data
    
    returns result object
    
    '''

    # Restrict the fit
    x_bkp = x_lst
    y_bkp = y_lst
    
    x_lst, y_lst, y_p, ypp = smooth_and_remove_step(x_lst, y_lst, x_min_flt, x_max_flt,True)
    
    '''
    *******************************************************
    Section of bad code that it'd take too long to do right
    *******************************************************
    '''
    step_at = 95
    step_width = 10
    prefp = pre
    prefs = "stp"
    prefc = 'c'    
    w_guess = 3 # sigma
    '''
    *******************************************************
    Section of bad code that it'd take too long to do right
    *******************************************************
    '''
    
    # this is the money
    # defines the model that'll be fit
    peak = VoigtModel(prefix = prefp, independent_vars=['x'],nan_policy='raise')
    step = StepModel(prefix = prefs, independent_vars=['x'],form='logistic')
    const = ConstantModel(prefix = prefc,independent_vars=['x'], nan_policy='raise', form ='logistic')
    
    mod = peak + step + const
    #mod = peak + const
    
    # guess parameters
    x_max = x_lst[np.argmax(y_lst)]
    y_max = y_lst[np.argmax(y_lst)]
    
    # Peak
    # here we set up the peak fitting guess. Then the peak fitter will make a parameter object out of them
    mod.set_param_hint(prefp+'amplitude', value = value_max*y_max, min = .6*value_max_min*y_max,max = 4*value_max_max*y_max, vary=True)
    # mod.set_param_hint(prefp+'center', value = x_max, min = x_max*(1-wiggle_room), max = x_max*(1+wiggle_room),vary=True)
    mod.set_param_hint(prefp+'center', value = x_max,min = x_max*.97, max = x_max*1.03, vary=True)
    
     # Basically FWHM/3.6
    if pre =='one':     # fitting with only one peak
        mod.set_param_hint(prefp+'sigma', value = width_1, min = .25*width_2, max = 2*width_1,vary=True)
    else:               # fitting with two peaks
        mod.set_param_hint(prefp+'sigma', value = width_2, min = 0, max = width_1,vary=True)
    
    # Constant
    top = []
    bottom = []
    for a,b in zip(x_lst,y_lst):
        if a > 135:
            top.append(b)
        elif a < 93:
            bottom.append(b)
    top = np.mean(np.asarray(top))
    bottom = np.mean(np.asarray(bottom))
            
    mod.set_param_hint(prefc+'c', value = bottom, min = -3*bottom, max = 3*bottom,vary=True)
    
    # restrict the fit again
    x_fit = []
    y_fit = []
    for a,b in zip(x_lst,y_lst):
        if 80 < a < 135:
            x_fit.append(a)
            y_fit.append(b)
    top = y_fit[0]
    bottom = y_fit[-1]
    
    # Step
    # Step height
    delta = 2*abs(top - bottom)
    if delta == 0:
        delta = 1
    mod.set_param_hint(prefs+'amplitude', value = delta, min = -3*delta, max = 3*delta, vary=True)
    # Charastic width
    mod.set_param_hint(prefs+'sigma', value = 3,min = 1, max = 3, vary=False)
    # The half way point... 
    mod.set_param_hint(prefs+'center', value = step_at, min = step_at-step_width, max = step_at+step_width, vary = False)
    
    result = mod.fit(y_fit, x=x_fit, params = mod.make_params())
    
    # If print all fits ... 
    if print_all_fits_bool:
        x_dense = np.arange(x_min_flt,x_max_flt,(x_max_flt-x_min_flt)/300.0).tolist()
        
        # each component
        for x in result.best_values:
            if prefp in x:      # Get peak
                peak.set_param_hint(x, value = result.best_values[str(x)])
            elif prefs in x:    # Get step
                step.set_param_hint(x, value = result.best_values[str(x)])
        
        # Data - 'background' 
        y_m_background = []
        for a,b in zip(x_lst,y_lst):
            y_m_background.append(b - result.eval(x=a) + peak.eval(x=a,  params=peak.make_params()))
        
        peak_only = [peak.eval(x=yy, params=peak.make_params()) for yy in x_dense]
        #stp_only = [result.best_values['stpamplitude'] + result.best_values['cc']]*len(x_dense)
        # sum of them
        #y_fit = [a+b for a,b in zip(peak_only,stp_only)]
        y_fit = [a+b for a in peak_only]
        
        plt.plot(x_dense,peak_only, 'g', label = 'Peak Only')
        #plt.plot(x_dense,stp_only, 'g--', label = None)
        #plt.plot(x_dense, y_fit, 'g', label = "Fit Result")        
        
        plt.plot(x_lst,y_lst,'bx', label= "Data")
        plt.plot(x_lst,y_m_background,'ko', label= "Data-Background")
        
        plt.title("Fit vs Data")
        plt.xlabel("Inv Cm")
        plt.ylabel("counts")
        plt.legend()
        plt.savefig(place_to_save_str+"Voigt&Step")
        plt.clf()    
    
    return result
Exemple #18
0
mask_peak = ((WAVERANGE > line_boundaries[0]) &
             (WAVERANGE < line_boundaries[1]))
theta4_g = WAVERANGE[spectra.iloc[i] == np.min(spectra.iloc[i][mask_peak])][0]
# 5. Line width
theta5_g = line_boundaries[1] - line_boundaries[0]

guess = ([theta1_g, theta2_g, theta3_g, theta4_g, theta5_g])
line_w, line_f = get_filtered_line(WAVERANGE, spectra.iloc[i], 1)

#________________ Defnie the lmfit params _____________________________________#


# Build models as Voigt and constant
model = VoigtModel() + ConstantModel()
params = model.make_params(ref_wave=theta1_g,
                           cont_level=theta2_g,
                           flux_max=theta3_g,
                           peak_wave=theta4_g,
                           width_peak=theta5_g)

params['center'].min = 8490
params['center'].max = 8494
params['fwhm'].max = 0.5

result = model.fit(line_f, params, x=line_w)


fig, ax = plt.subplots(1, figsize=[15, 10])
ax.plot(WAVERANGE, spectra.iloc[i])
ax.plot(line_w, result.best_fit, c='k')
def fit_peaks(x, y, df_peak_init, fix=[], method='leastsq'):
    
    df_new = df_peak_init.copy()
    out_dict = {}
    
    # Run peak fits for each set of peaks
    for i in df_new['set'].unique():
        
        df_peaks = df_new.loc[df_new.set==i]

        # Build bounded x and y vectors
        lb = df_peaks['fit_lb'].iloc[0]
        ub = df_peaks['fit_ub'].iloc[0]
        lb_ind = int(np.where(x>=lb)[0][0])
        ub_ind = int(np.where(x>=ub)[0][0])
        xb = x[lb_ind:ub_ind]
        yb = y[lb_ind:ub_ind]

        # Initialize Baseline model
        comp_mod = []
        
        if df_peaks['bg'].iloc[0]=='lin':
            bg_mod = LinearModel(prefix='bg_')
            pars = bg_mod.make_params(m=0,b=yb.min())
            comp_mod.append(bg_mod)
        elif df_peaks['bg'].iloc[0]=='exp':
            bg_mod = ExponentialModel(prefix='bg_')
            pars = bg_mod.guess(y,x=x)
            comp_mod.append(bg_mod)
        else:
            bg_mod = ConstantModel(prefix='bg_')
            pars = bg_mod.make_params(c=yb.min())
            comp_mod.append(bg_mod)

        # Add peaks
        for index, peak in df_peaks.iterrows():
            prefix = peak['name']+'_'
            
            # Select peak model
            if peak['model']=='voigt':
                peak_temp  = VoigtModel(prefix=prefix)
            elif peak['model']=='gauss':
                peak_temp = GuassianModel(prefix=prefix)
            else:
                peak_temp = GuassianModel(prefix=prefix)

            # Set peak parameter guesses + vary or fix
            pars.update(peak_temp.make_params())
            param_guesses = [p.split('_')[0] for p in peak.index if 'guess' in p]
#             print(param_guesses)
            
            for p in param_guesses:
                pars[prefix+p].set(peak[p+'_guess'])
                
                if p+'_lb' in peak.index:
                    pars[prefix+p].set(min=peak[p+'_lb'])
                if p+'_ub' in peak.index:
                    pars[prefix+p].set(max=peak[p+'_ub'])
                
                if p in fix:
#                     print('fixing', p)
                    pars[prefix+p].set(vary=False)
            
            # No negative peaks
            pars[prefix+'amplitude'].set(min=0)
            
            # Add peak to the composite model
            comp_mod.append(peak_temp)

        # Build composite model
        comp_mod = np.sum(comp_mod)
        out = comp_mod.fit(yb, pars, x=xb, method=method)
        params_dict=out.params.valuesdict()

        # Store peak features in original dataframe
        for peak, prop in [s.split('_') for s in list(params_dict.keys())]:
            df_new.loc[df_new.name==peak,prop] = params_dict[peak+'_'+prop]
        out_dict[i] = out
        
    return df_new, out_dict
Exemple #20
0
def pre_edge_baseline(energy,
                      norm=None,
                      group=None,
                      form='lorentzian',
                      emin=None,
                      emax=None,
                      elo=None,
                      ehi=None,
                      with_line=True,
                      _larch=None):
    """remove baseline from main edge over pre edge peak region

    This assumes that pre_edge() has been run successfully on the spectra
    and that the spectra has decent pre-edge subtraction and normalization.

    Arguments
    ----------
    energy:    array of x-ray energies, in eV, or group (see note 1)
    norm:      array of normalized mu(E)
    group:     output group
    elo:       low energy of pre-edge peak region to not fit baseline [e0-20]
    ehi:       high energy of pre-edge peak region ot not fit baseline [e0-10]
    emax       max energy (eV) to use for baesline fit [e0-5]
    emin:      min energy (eV) to use for baesline fit [e0-40]
    form:      form used for baseline (see note 2)  ['lorentzian']
    with_line: whether to include linear component in baseline ['True']


    Returns
    -------
      None

    A group named 'prepeaks' will be created in the output group, with the following
    attributes:
        energy        energy array for pre-edge peaks = energy[emin-eneg:emax+epos]
        baseline      fitted baseline array over pre-edge peak energies
        mu            baseline-subtraced spectrum over pre-edge peak energies
        dmu           estimated uncertainty in mu from fit
        centroid      estimated centroid of pre-edge peaks (see note 3)
        peak_energies list of predicted peak energies (see note 4)
        fit_details   details of fit to extract pre-edge peaks.

    (if the output group is None, _sys.xafsGroup will be written to)

    Notes
    -----
     1 If the first argument is a Group, it must contain 'energy' and 'norm'.
       See First Argrument Group in Documentation

     2 A function will be fit to the input mu(E) data over the range between
       [emin:elo] and [ehi:emax], ignorng the pre-edge peaks in the
       region [elo:ehi].  The baseline function is specified with the `form`
       keyword argument, which can be one of
           'lorentzian', 'gaussian', or 'voigt',
       with 'lorentzian' the default.  In addition, the `with_line` keyword
       argument can be used to add a line to this baseline function.

     3 The value calculated for `prepeaks.centroid`  will be found as
         (prepeaks.energy*prepeaks.mu).sum() / prepeaks.mu.sum()
     4 The values in the `peak_energies` list will be predicted energies
       of the peaks in `prepeaks.mu` as found by peakutils.

    """
    energy, norm, group = parse_group_args(energy,
                                           members=('energy', 'norm'),
                                           defaults=(norm, ),
                                           group=group,
                                           fcn_name='pre_edge_baseline')
    if len(energy.shape) > 1:
        energy = energy.squeeze()
    if len(norm.shape) > 1:
        norm = norm.squeeze()

    dat_emin, dat_emax = min(energy), max(energy)

    dat_e0 = getattr(group, 'e0', -1)

    if dat_e0 > 0:
        if emin is None:
            emin = dat_e0 - 30.0
        if emax is None:
            emax = dat_e0 - 1.0
        if elo is None:
            elo = dat_e0 - 15.0
        if ehi is None:
            ehi = dat_e0 - 5.0
        if emin < 0:
            emin += dat_e0
        if elo < 0:
            elo += dat_e0
        if emax < dat_emin:
            emax += dat_e0
        if ehi < dat_emin:
            ehi += dat_e0

    if emax is None or emin is None or elo is None or ehi is None:
        raise ValueError("must provide emin and emax to pre_edge_baseline")

    # get indices for input energies
    if emin > emax:
        emin, emax = emax, emin
    if emin > elo:
        elo, emin = emin, elo
    if ehi > emax:
        ehi, emax = emax, ehi

    imin = index_of(energy, emin)
    ilo = index_of(energy, elo)
    ihi = index_of(energy, ehi)
    imax = index_of(energy, emax)

    # build xdat, ydat: dat to fit (skipping pre-edge peaks)
    xdat = np.concatenate((energy[imin:ilo + 1], energy[ihi:imax + 1]))
    ydat = np.concatenate((norm[imin:ilo + 1], norm[ihi:imax + 1]))

    # build fitting model: note that we always include
    # a LinearModel but may fix slope and intercept
    form = form.lower()
    if form.startswith('voig'):
        model = VoigtModel()
    elif form.startswith('gaus'):
        model = GaussianModel()
    else:
        model = LorentzianModel()

    model += LinearModel()
    params = model.make_params(amplitude=1.0,
                               sigma=2.0,
                               center=emax,
                               intercept=0,
                               slope=0)
    params['amplitude'].min = 0.0
    params['sigma'].min = 0.25
    params['sigma'].max = 50.0
    params['center'].max = emax + 25.0
    params['center'].min = emax - 25.0

    if not with_line:
        params['slope'].vary = False
        params['intercept'].vary = False

    # run fit
    result = model.fit(ydat, params, x=xdat)

    # energy including pre-edge peaks, for output
    edat = energy[imin:imax + 1]

    # get baseline and resulting mu over edat range
    bline = result.eval(result.params, x=edat)
    mu = norm[imin:imax + 1] - bline

    # uncertainty in mu includes only uncertainties in baseline fit
    dmu = result.eval_uncertainty(result.params, x=edat)

    # estimate centroid and its uncertainty
    cen = (edat * mu).sum() / mu.sum()
    cen_plus = (edat * (mu + dmu)).sum() / (mu + dmu).sum()
    cen_minus = (edat * (mu - dmu)).sum() / (mu - dmu).sum()
    dcen = abs(cen_minus - cen_plus) / 2.0

    # locate peak positions
    peak_energies = []
    if HAS_PEAKUTILS:
        peak_ids = peakutils.peak.indexes(mu, thres=0.05, min_dist=2)
        peak_energies = [edat[pid] for pid in peak_ids]

    group = set_xafsGroup(group, _larch=_larch)
    group.prepeaks = Group(energy=edat,
                           mu=mu,
                           delta_mu=dmu,
                           baseline=bline,
                           centroid=cen,
                           delta_centroid=dcen,
                           peak_energies=peak_energies,
                           fit_details=result,
                           emin=emin,
                           emax=emax,
                           elo=elo,
                           ehi=ehi,
                           form=form,
                           with_line=with_line)
    return
def voigt_fit(df_cut=None, data_num=None, sigma=0.15):
    # フォークト関数によるフィッティングを行う関数

    x = df_cut[data_num]['w']
    y = df_cut[data_num]['i']

    # 線形モデルを定義
    # パラメータオブジェクトparsの生成
    lin = LinearModel(prefix='lin_')
    pars = lin.guess(y, x=x)

    # 1つめのピーク
    voigt1 = VoigtModel(prefix='v1_')
    pars.update(voigt1.make_params())
    pars['v1_center'].set(473.3, min=470, max=475)
    pars['v1_sigma'].set(sigma, min=sigma, max=sigma + 0.0000000001)
    pars['v1_amplitude'].set(10000, min=1)
    pars['v1_gamma'].set(1, min=0.1, max=2, vary=True)

    # 2つめのピーク
    voigt2 = VoigtModel(prefix='v2_')
    pars.update(voigt2.make_params())
    pars['v2_center'].set(476.5, min=475, max=478)
    pars['v2_sigma'].set(sigma, min=sigma, max=sigma + 0.0000000001)
    pars['v2_amplitude'].set(10000, min=1)
    pars['v2_gamma'].set(1, min=0.1, max=2, vary=True)

    # 3つめのピーク
    voigt3 = VoigtModel(prefix='v3_')
    pars.update(voigt3.make_params())
    pars['v3_center'].set(480.6, min=476, max=483)
    pars['v3_sigma'].set(sigma, min=sigma, max=sigma + 0.0000000001)
    pars['v3_amplitude'].set(10000, min=1)
    pars['v3_gamma'].set(1, min=0.1, max=2, vary=True)

    # 4つめのピーク
    voigt4 = VoigtModel(prefix='v4_')
    pars.update(voigt4.make_params())
    pars['v4_center'].set(484.7, min=483, max=487)
    pars['v4_sigma'].set(sigma, min=sigma, max=sigma + 0.0000000001)
    pars['v4_amplitude'].set(10000, min=1)
    pars['v4_gamma'].set(1, min=0.1, max=2, vary=True)

    # 5つめのピーク
    voigt5 = VoigtModel(prefix='v5_')
    pars.update(voigt5.make_params())
    pars['v5_center'].set(487.8, min=485, max=490)
    pars['v5_sigma'].set(sigma, min=sigma, max=sigma + 0.0000000001)
    pars['v5_amplitude'].set(10000, min=1)
    pars['v5_gamma'].set(1, min=0.1, max=2, vary=True)

    # 6つめのピーク
    voigt6 = VoigtModel(prefix='v6_')
    pars.update(voigt6.make_params())
    pars['v6_center'].set(493.3, min=490, max=494)
    pars['v6_sigma'].set(sigma, min=sigma, max=sigma + 0.0000000001)
    pars['v6_amplitude'].set(10000, min=1)
    pars['v6_gamma'].set(1, min=0.1, max=2, vary=True)

    mod = voigt1 + voigt2 + voigt3 + voigt4 + voigt5 + voigt6 + lin

    # 初期値
    init = mod.eval(pars, x=x)

    # 最適値
    out = mod.fit(y, pars, x=x)

    # パラメータの情報などを表示
    print(out.fit_report(min_correl=0.5))
    # 一つ一つのvoigt関数を表示するかどうか
    plot_components = False

    #  結果のプロット
    plt.figure(figsize=(8, 6))
    plt.plot(x, y, 'b')
    plt.plot(x, init, 'k--')
    plt.plot(x, out.best_fit, 'r-')
    plt.xlabel('wavelength (nm)')
    plt.ylabel('intensity (a.u.)')

    if plot_components:
        comps = out.eval_components(x=x)
        plt.plot(x, comps['v1_'], 'b--')
        plt.plot(x, comps['v2_'], 'b--')
        plt.plot(x, comps['v3_'], 'b--')
        plt.plot(x, comps['v4_'], 'b--')
        plt.plot(x, comps['v5_'], 'b--')
        plt.plot(x, comps['v6_'], 'b--')
        plt.plot(x, comps['lin_'], 'k--')

        plt.show()

    plt.savefig('result{}.jpg'.format(data_num))

    return out.params['v3_gamma'].value