def spectrum_calibration(channel_width, energy_list, data_2_calibrate):
    import numpy as np
    import matplotlib.pyplot as plt
    #from scipy.optimize import curve_fit
    #from modelling import gauss
    import statsmodels.api as sm
    from lmfit.models import GaussianModel
    '''
    The while loop goes through and identifies the largest peak in the
    spectrum and it records the position of that peak. It then removes
    the peak by removing 10 channels from the right and left of the peak.
    The code will then search for the next largest position.
    '''

    i = 0
    channel_max_list = []
    gauss_x = []
    gauss_y = []
    fit_channel = []
    while i < len(energy_list):
        channel_max = np.argmax(data_2_calibrate)
        data_left = channel_max - channel_width
        data_right = channel_max + channel_width
        channel_max_list.append(channel_max)
        iterator = data_left
        while iterator < (data_right):
            gauss_x.append(iterator)
            gauss_y.append(data_2_calibrate[iterator])
            x = np.asarray(gauss_x)
            y = np.asarray(gauss_y)
            fit_channel.append(data_2_calibrate[iterator])
            data_2_calibrate[iterator] = 0
            iterator += 1
        i += 1
        mod = GaussianModel()
        pars = mod.guess(y, x=x)
        out = mod.fit(y, pars, x=x)
        plt.plot(x, fit_channel)
        plt.plot(x, out.best_fit, '--k')
        plt.show()
        gauss_x = []
        gauss_y = []
        fit_channel = []
        print(out.fit_report(min_correl=10))
    '''
    sorting channel number so the correct channel number corresponds with
    the correct energy.
    '''
    channel_number = sorted(channel_max_list, key=int)
    energy = energy_list
    results = sm.OLS(energy, sm.add_constant(channel_number)).fit()

    slope, intercept = np.polyfit(channel_number, energy, 1)

    abline_values = [slope * i + intercept for i in channel_number]
    plt.plot(channel_number, energy, 'ro')
    plt.plot(channel_number, abline_values, 'b')
    plt.xlabel('Channel Number')
    plt.ylabel('Energy [keV]')
    plt.title('Best Fit Line')

    #    plt.savefig('../images/BestFitLine_calibrated.png')

    return slope, intercept
Exemple #2
0
def echo_fits():
    """
    Fits a Gaussian with a linear background to each of the echo peaks, finds the centroid and top of
    the Gaussian, then fits the echo_as_T2 function to the points given by x=centroid, y=top.
    """
    xrs, yrs, xr, yr, filename, dat1 = range_to_list()
    cents: List[float] = []
    cents_uncert: List[float] = []
    heights: List[float] = []
    heights_uncert: List[float] = []
    for i in range(0, len(xrs)):
        mdl = GaussianModel(prefix='G_')
        lne = LinearModel(prefix='L_')
        params = mdl.guess(yrs[i], x=xrs[i])
        params += lne.guess(yrs[i], x=xrs[i])
        max_y = np.max(yrs[i])
        min_y = np.min(yrs[i])
        max_x = np.max(yrs[i])
        min_x = np.min(yrs[i])
        predicted_slope = (max_y - min_y) / (max_x - min_x)
        params.add('L_slope',
                   value=predicted_slope,
                   min=predicted_slope * 1.1,
                   max=predicted_slope * 0.9)
        params.add('L_intercept',
                   value=min_y,
                   min=min_y * 0.9,
                   max=min_y * 1.1)
        params.add('G_height',
                   value=max_y - min_y,
                   min=(max_y - min_y) * 0.99,
                   max=(max_y - min_y) * 1.05)
        model = mdl + lne
        result = model.fit(yrs[i], params, x=xrs[i], method='leastsq')
        cent: float = result.params['G_center'].value
        amp: float = result.params['G_height'].value
        inter: float = result.params['L_intercept'].value
        grad: float = result.params['L_slope'].value
        height: float = amp + ((cent * grad) + inter)
        heights.append(height)
        cents.append(cent)
        cents_uncert.append(result.params['G_center'].stderr)
        partial_amp = 1
        partial_grad = cent
        partial_x = grad
        partial_inter = 1
        amp_term = partial_amp * result.params['G_height'].stderr
        grad_term = partial_grad * result.params['L_slope'].stderr
        x_term = partial_x * np.mean(np.diff(xrs[i]))
        inter_term = partial_inter * result.params['L_intercept'].stderr
        height_uncert = np.sqrt(amp_term**2 + grad_term**2 + x_term**2 +
                                inter_term**2)
        heights_uncert.append(height_uncert)
    heights = np.array(heights)
    cents = np.array(cents)
    maxy = np.max(heights)
    miny = np.min(heights)
    decay_pos = np.where(heights == find_nearest(heights, maxy / e))[0][0]
    decay_pos_time = cents[decay_pos]
    avg_y_sep = abs(np.mean(np.diff(heights)))
    efit = Model(echo_as_T2)
    param = efit.make_params()
    param.add('M0', value=maxy, min=maxy * 0.8, max=maxy + (avg_y_sep * 2))
    param.add('T2',
              value=decay_pos_time,
              min=decay_pos_time * 0.1,
              max=decay_pos_time * 1.5)
    param.add('c', value=miny * 0.3, min=miny * 0.1, max=miny * 1)
    param.add('ph', value=cents[0] * 0.1, min=0, max=cents[0] * 1)
    result_2 = efit.fit(
        heights,
        param,
        t=cents,
        method='leastsq',
        weights=np.sqrt(
            np.mean(np.diff(dat1['m']))**2 + np.array(heights_uncert)**2) /
        heights)
    print(result_2.fit_report())
    print('\n', result_2.params.pretty_print(fmt='e', precision=2))
    ax = plt.gca()
    ax.set_xlabel('Time (s)', fontsize=14)
    ax.set_ylabel('Magnetization (A/m)', fontsize=14)
    xes = np.linspace(np.min(cents), np.max(cents), 100)
    y = efit.eval(t=xes, params=result_2.params)
    plt.plot(xes, y, antialiased=True)
    plt.plot(cents, heights, 'x', ms=8, color='k')
    plt.plot(dat1['t'],
             dat1['m'],
             lw=2,
             antialiased=True,
             color='#4a4a4a',
             zorder=1)
    plt.title(filename)
    plt.xlim(left=0, right=np.max(cents) * 1.1)
    plt.ylim(bottom=0, top=result_2.params['M0'].value * 1.3)
    plt.axhline(result_2.params['M0'].value,
                color='k',
                ls='--',
                alpha=0.7,
                lw=1,
                zorder=2)
    plt.axhline(result_2.params['M0'].value / e,
                color='k',
                ls='--',
                alpha=0.7,
                lw=1,
                zorder=2)
    plt.text(0.9,
             0.9,
             "T_1: {:.4f} s".format(result_2.params['T2'].value),
             horizontalalignment='center',
             verticalalignment="center",
             transform=ax.transAxes,
             bbox={
                 'pad': 8,
                 'fc': 'w'
             },
             fontsize=14)
    plt.tight_layout()
    plt.tick_params(axis='both', which='major', labelsize=13)
    fig_manager = plt.get_current_fig_manager()
    fig_manager.window.showMaximized()
    plt.show()
Exemple #3
0
def compare_labels(X_true,
                   X_pred,
                   xlabels=None,
                   ylabels=None,
                   reslabels=None,
                   xlims=None,
                   reslims=None,
                   histlim=None,
                   nxb=30,
                   cornerlabel="",
                   figsize=None):

    nlabel = X_true.shape[1]

    if xlabels is None:
        xlabels = ["$X_{{true}}:{}$".format(i) for i in range(nlabel)]
    if ylabels is None:
        ylabels = ["$X_{{pred}}:{}$".format(i) for i in range(nlabel)]
    if reslabels is None:
        reslabels = ["$X_{{res}}:{}$".format(i) for i in range(nlabel)]

    # default xlim
    if xlims is None:
        xlim1 = np.min(np.vstack(
            (np.percentile(X_true, 1, axis=0), np.percentile(X_pred, 1,
                                                             axis=0))),
                       axis=0)
        xlim2 = np.min(np.vstack(
            (np.percentile(X_true, 99,
                           axis=0), np.percentile(X_pred, 99, axis=0))),
                       axis=0)
        xlims = (xlim2 - xlim1).reshape(-1, 1) * 0.4 * np.array(
            [-1, 1]) + np.vstack((xlim1, xlim2)).T
    if reslims is None:
        reslims = np.repeat(np.max(np.abs(
            np.percentile(X_pred - X_true, [1, 99], axis=0).T),
                                   axis=1).reshape(-1, 1),
                            2,
                            axis=1) * np.array([-1, 1])
        reslims = np.abs(np.diff(reslims, axis=1)) * np.array(
            [-1, 1]) * 0.2 + reslims

    # run MCMC
    X_bias, X_scatter, frs, histdata = label_diff_lmfit(X_true,
                                                        X_pred,
                                                        bins="auto",
                                                        plot=False,
                                                        emcee=True)
    print("bias", X_bias)
    print("scatter", X_scatter)
    if histlim is None:
        histlim = (0, np.max([np.max(histdata_[0]) for histdata_ in histdata]))
    histlim = np.array(histlim)

    if figsize is None:
        figsize = (3 * nlabel, 3 * nlabel)

    # draw figure
    fig, axs2 = plt.subplots(nlabel + 1, nlabel + 1, figsize=figsize)

    # 1. Gaussian
    gm = GaussianModel()
    for i in range(nlabel):
        plt.sca(axs2[i + 1, -1])
        fr = frs[i]
        hist_, bin_edge_, data_ = histdata[i]
        plt.hist(data_,
                 bins=bin_edge_,
                 histtype="step",
                 orientation="horizontal")
        axs2[i + 1, -1].plot(gm.eval(fr.mcmc.params, x=bin_edge_), bin_edge_)
        axs2[i + 1, -1].tick_params(direction='in', pad=5)
        axs2[i + 1, -1].set_xlim(histlim)
        axs2[i + 1, -1].set_ylim(reslims[i])
        axs2[i + 1, -1].set_ylim(reslims[i])
        axs2[i + 1, -1].yaxis.tick_right()
        axs2[i + 1, -1].hlines(X_bias[i], *histlim, linestyle='--', color="k")

        pos_text_x = np.dot(np.array([[0.9, 0.1]]), histlim.reshape(-1, 1))
        pos_text_y = np.dot(np.array([[0.15, 0.85]]),
                            reslims[i].reshape(-1, 1))
        axs2[i + 1, -1].text(pos_text_x, pos_text_y,
                             "$bias={:.4f}$".format(X_bias[i]))
        pos_text_x = np.dot(np.array([[0.9, 0.1]]), histlim.reshape(-1, 1))
        pos_text_y = np.dot(np.array([[0.30, 0.70]]),
                            reslims[i].reshape(-1, 1))
        axs2[i + 1, -1].text(pos_text_x, pos_text_y,
                             "$\\sigma={:.4f}$".format(X_scatter[i]))

        axs2[i + 1, -1].yaxis.tick_right()

        if i < nlabel - 1:
            axs2[i + 1, -1].set_xticklabels([])

    axs2[-1, -1].set_xlabel("Counts")

    # 2. diagnal
    for i in range(nlabel):
        image(axs2[0, i], X_true[:, i], X_pred[:, i],
              np.linspace(xlims[i][0], xlims[i][1], nxb),
              np.linspace(xlims[i][0], xlims[i][1], nxb))
        axs2[0, i].set_xlim(*xlims[i])
        axs2[0, i].set_ylim(*xlims[i])
        axs2[0, i].tick_params(direction='in', pad=5)
        axs2[0, i].set_xticklabels([])
        axs2[0, i].set_ylabel(ylabels[i])
        axs2[0, i].plot(xlims[i], xlims[i], 'k--')

    # 3. Xres vs X
    X_res = X_pred - X_true
    for i in range(nlabel):
        for j in range(nlabel):
            image(axs2[j + 1, i], X_true[:, i], X_res[:, j],
                  np.linspace(xlims[i][0], xlims[i][1], nxb),
                  np.linspace(reslims[j][0], reslims[j][1], nxb))
            axs2[j + 1, i].set_xlim(*xlims[i])
            axs2[j + 1, i].set_ylim(*reslims[j])
            axs2[j + 1, i].tick_params(direction='in', pad=5)

            if j != nlabel - 1:
                axs2[j + 1, i].set_xticklabels([])
            else:
                axs2[j + 1, i].set_xlabel(xlabels[i])

            if i != 0:
                axs2[j + 1, i].set_yticklabels([])
            else:
                axs2[j + 1, i].set_ylabel(reslabels[j])

    axs2[0, -1].set_axis_off()
    axs2[0, -1].text(np.mean(axs2[0, -1].get_xlim()),
                     np.mean(axs2[0, -1].get_ylim()),
                     cornerlabel,
                     horizontalalignment='center',
                     verticalalignment='center')

    fig.tight_layout()
    plt.subplots_adjust(wspace=0., hspace=0.)

    return fig, frs
#!/usr/bin/env python

# <examples/doc_model_savemodelresult2.py>
import numpy as np

from lmfit.model import save_modelresult
from lmfit.models import ExponentialModel, GaussianModel

dat = np.loadtxt('NIST_Gauss2.dat')
x = dat[:, 1]
y = dat[:, 0]

exp_mod = ExponentialModel(prefix='exp_')
pars = exp_mod.guess(y, x=x)

gauss1 = GaussianModel(prefix='g1_')
pars.update(gauss1.make_params())
pars['g1_center'].set(value=105, min=75, max=125)
pars['g1_sigma'].set(value=15, min=3)
pars['g1_amplitude'].set(value=2000, min=10)

gauss2 = GaussianModel(prefix='g2_')
pars.update(gauss2.make_params())
pars['g2_center'].set(value=155, min=125, max=175)
pars['g2_sigma'].set(value=15, min=3)
pars['g2_amplitude'].set(value=2000, min=10)

mod = gauss1 + gauss2 + exp_mod

init = mod.eval(pars, x=x)
Exemple #5
0
    def multiplot(self):
        os.chdir('{}'.format(self.rangepath))
        self.range()
        for i in range(0, len(self.xrange)):

            def lingauss(xvar, co_a, co_b, co_c, co_d, co_e):
                return co_a * np.exp(-((xvar - co_b)**2) /
                                     (2 * (co_c**2))) + co_d * xvar + co_e

            try:
                initial = [
                    np.max(self.yranges[i]), self.xranges[i][(np.where(
                        self.yranges[i] == np.max(self.yranges[i])))[0]],
                    np.std(self.xranges[i]), -0.1, 100
                ]
                popt, pcov = curve_fit(lingauss,
                                       self.xranges[i],
                                       self.yranges[i],
                                       initial,
                                       sigma=np.sqrt(self.yranges[i]),
                                       absolute_sigma=True,
                                       maxfev=100000)
            except TypeError:
                continue
            fig = plt.figure()
            fig.subplots_adjust(hspace=0.3, wspace=0)
            ax1 = fig.add_subplot(2, 2, 1)
            ax1.plot(self.xranges[i],
                     lingauss(self.xranges[i], *popt),
                     antialiased=True)
            ax1.plot(self.xranges[i], self.yranges[i], '.', color='#1c1c1c')
            dely = np.sqrt(self.yranges[i])
            ax1.fill_between(self.xranges[i],
                             lingauss(self.xranges[i], *popt) - dely,
                             lingauss(self.xranges[i], *popt) + dely,
                             color="#ABABAB")
            ax1.grid(color='k', linestyle='--', alpha=0.2)
            plt.title('Peak with 1 sigma error bands')

            ax2 = fig.add_subplot(2, 2, 2)
            ax2.plot(self.xranges[i],
                     self.yranges[i] - lingauss(self.xranges[i], *popt),
                     '.',
                     antialiased=True)
            ax2.grid(color='k', linestyle='--', alpha=0.2)
            plt.title('Residuals')

            ax3 = fig.add_subplot(2, 2, 3)
            ax3.plot(
                self.xranges[i],
                ((self.yranges[i] - lingauss(self.xranges[i], *popt))**2) /
                (np.sqrt(self.yranges[i]))**2,
                '.',
                antialiased=True)
            ax3.grid(color='k', linestyle='--', alpha=0.2)
            plt.title('Normalised residuals')

            ax4 = fig.add_subplot(2, 2, 4)
            n, bins, patches = ax4.hist(self.yranges[i] -
                                        lingauss(self.xranges[i], *popt),
                                        bins=10)

            mdl = GaussianModel()
            bin_centre = []
            for t in range(0, len(bins) - 1):
                bin_centre.append((bins[t + 1] + bins[t]) / 2)
            bin_centre2 = np.asarray(bin_centre)
            pars = mdl.guess(n, x=bin_centre2)
            result2 = mdl.fit(n, pars, x=bin_centre2)
            corr_coeff = 1 - result2.residual.var() / np.var(n)
            at = AnchoredText(
                "$R^2 = {:.3f}$".format(corr_coeff),
                prop=dict(size=10),
                frameon=True,
                loc=2,
            )
            ax4.add_artist(at)
            ax4.plot(bin_centre2, result2.best_fit, antialiased=True)
            ax4.grid(color='k', linestyle='--', alpha=0.2)
            plt.title('Residual histogram')

            fig.tight_layout()
            fig.set_size_inches(16.5, 10.5)

            plt.show()
Exemple #6
0
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from scipy.optimize import curve_fit
from lmfit.models import GaussianModel
from lmfit.lineshapes import gaussian

df = np.loadtxt('sarcomerelength.dat')
df1 = df.round(1) - 1.12

hist_1, bins_1 = np.histogram(df1, bins=18, range=(-0.9, 2.6), density=True)
bins = bins_1[:-1]

model = GaussianModel()
params = model.guess(hist_1, x=bins_1[1:])
result = model.fit(hist_1, params, x=bins_1[1:])

x = bins_1[1:]
#ax.bar(bins, hist_1, width=0.11, alpha=0.5, color='m', align='edge')
#ax.plot(bins_1, result[0], 'k--')
#result.plot_fit()
#ax.plot(bins_1[1:], result.init_fit, 'k--')
vd = result.params.valuesdict()

param_df = pd.DataFrame.from_dict(vd, orient="index", columns=["value"])
param_df.to_html("param_df.html")

x_new = np.linspace(x.min(), x.max(), 100)
smmoth_gauss = gaussian(x_new,
                        amplitude=param_df.iloc[0, 0],
    def run(self):

        files = self._get_input_fname(self._input_fname, self._energy)
        #        self.get_data(files, 'SlitG')
        file_no_slit = self._get_input_fname(self._input_no_slit, self._energy)

        dat_l = {}
        for key, value in files.items():
            dat_l[key] = {}
            for subkey, subvalue in value.items():
                if subkey == 'SlitG':
                    for files in subvalue:
                        if files[1] == "img":
                            img_light_l = np.mean(self.load_images(
                                files[0], files[1]),
                                                  axis=2)
                        else:
                            img_dark_l = np.mean(self.load_images(
                                files[0], files[1]),
                                                 axis=2)

                    img_l = img_light_l - img_dark_l
                    #                    img_l = self.crop_image(img_l, 80)
                    dat_l[key] = img_l

        dat_s = {}
        for key, value in file_no_slit.items():
            dat_s[key] = {}
            for subkey, subvalue in value.items():
                if subkey == 'SlitG':
                    for files in subvalue:
                        if files[1] == "img":
                            img_light = np.mean(self.load_images(
                                files[0], files[1]),
                                                axis=2)
                        else:
                            img_dark = np.mean(self.load_images(
                                files[0], files[1]),
                                               axis=2)

                    img = img_light
                    dat_s[key] = img

        fwhm = []
        fwhm_err = []
        energy = []
        for key_slit in (dat_l):
            img_weight = dat_l[key_slit] / dat_s[key_slit]
            img_w = self.crop_image(img_weight, 50)
            plt.imshow(img_w)
            plt.savefig("roi_{}_eV.png".format(key_slit))
            plt.xlabel("Horizontal position [pixel]")
            plt.ylabel("Vertical position [pixel]")
            plt.show()
            #            print(img_w.shape)

            lsf = np.diff(img_w[55])
            amplitude = np.max(lsf)
            peak = np.where(lsf == amplitude)
            peak = peak[0] * 11
            x_um = [x * 11 for x in np.arange(len(lsf))]

            plt.plot(x_um, lsf)

            model = GaussianModel(prefix='peak_') + ConstantModel()
            params = model.make_params(c=1.0,
                                       peak_center=peak,
                                       peak_sigma=22,
                                       peak_amplitude=amplitude)
            result = model.fit(lsf, params, x=x_um)
            fwhm.append(2 * np.sqrt(2 * np.log(2)) *
                        result.best_values['peak_sigma'])
            for key in result.params:
                if key == 'peak_fwhm':
                    fwhm_err.append(result.params[key].stderr)

            plt.plot(x_um, result.best_fit)
            #            plt.xlim(0, 70)
            plt.xlabel("Horizontal position [µm]")
            plt.ylabel("Counts")
            plt.savefig("fit_{}_ev.png".format(key_slit))
            plt.show()
            energy.append(key_slit)

        plt.errorbar(energy, fwhm, yerr=fwhm_err, fmt='.')
        print("{} +/- {}".format(fwhm, result.params[key].stderr))

        plt.ylim((0, 5))
        plt.xlabel("Energy [eV]")
        plt.ylabel("FWHM [pixel]")
        plt.savefig("fwhm_vs_energy.png")
        plt.show()
Exemple #8
0
def get_alignment_residual(x, engine, hb2b_setup, two_theta, roi_vec_set):
    """ Cost function for peaks alignment to determine wavelength
    :param x: list/array of detector shift/rotation and neutron wavelength values
    :x[0]: shift_x, x[1]: shift_y, x[2]: shift_z, x[3]: rot_x, x[4]: rot_y, x[5]: rot_z, x[6]: wavelength
    :param engine:
    :param hb2b_setup: HB2B class containing instrument definitions
    :param two_theta: list/array of detector positions
    :param roi_vec_set: list/array of ROI/mask vector
    :return:
    """

    GlobalParameter.global_curr_sequence += 1

    residual = np.array([])
    resNone = 0.

    TTH_Calib = np.arcsin(x[6] / 2. / dSpace) * 360. / np.pi
    TTH_Calib = TTH_Calib[~np.isnan(TTH_Calib)]

    background = LinearModel()
    for i_tth in range(len(two_theta)):
        #reduced_data_set[i_tth] = [None] * num_reduced_set
        # load instrument: as it changes
        pyrs_reducer = reduce_hb2b_pyrs.PyHB2BReduction(hb2b_setup, x[6])
        pyrs_reducer.build_instrument_prototype(two_theta[i_tth], x[0], x[1],
                                                x[2], x[3], x[4], x[5])

        DetectorAngle = np.abs(two_theta[i_tth])
        mintth = DetectorAngle - 8.0
        maxtth = DetectorAngle + 8.8

        Eta_val = pyrs_reducer.get_eta_value()
        maxEta = np.max(Eta_val) - 2
        minEta = np.min(Eta_val) + 2

        peak_centers = ''
        fit_windows = ''

        FitModel = lmfit.Model(BackGround)
        pars1 = FitModel.make_params(p0=100, p1=1, p2=0.01)

        Peaks = []
        CalibPeaks = TTH_Calib[np.where(
            (TTH_Calib > mintth) == (TTH_Calib < maxtth))[0]]
        for ipeak in range(len(CalibPeaks)):
            if (CalibPeaks[ipeak] > mintth) and (CalibPeaks[ipeak] < maxtth):
                peak_centers += '%.4f,' % CalibPeaks[ipeak]
                fit_windows += '%.4f,%.4f,' % (CalibPeaks[ipeak] - 1,
                                               CalibPeaks[ipeak] + 1)
                Peaks.append(ipeak)
                PeakModel = GaussianModel(prefix='g%d_' % ipeak)
                FitModel += PeakModel

                pars1.update(PeakModel.make_params())
                pars1['g%d_center' % ipeak].set(value=CalibPeaks[ipeak],
                                                min=CalibPeaks[ipeak] - 2,
                                                max=CalibPeaks[ipeak] + 2)
                pars1['g%d_sigma' % ipeak].set(value=0.5, min=1e-3, max=1.0)
                pars1['g%d_amplitude' % ipeak].set(value=50., min=0, max=1e6)

#        peak_centers = peak_centers[:-1]
#        fit_windows = fit_windows[:-1]

        if peak_centers == '':
            residual = np.concatenate([residual, np.array([20000])])

        else:
            # reduce data

            #            for i_roi in range( len( roi_vec_set ) ):
            print(minEta, maxEta)
            eta_roi_vec = np.arange(minEta, maxEta + 0.2, 2)

            num_rows = 1 + len(Peaks) / 2 + len(Peaks) % 2
            ax1 = plt.subplot(num_rows, 1, num_rows)
            ax1.margins(0.05)  # Default margin is 0.05, value 0 means fit

            for i_roi in range(len(roi_vec_set)):
                ws_name_i = 'reduced_data_{:02}'.format(i_roi)
                out_peak_pos_ws = 'peaks_positions_{:02}'.format(i_roi)
                fitted_ws = 'fitted_peaks_{:02}'.format(i_roi)

                # Define Mask
                Mask = np.zeros_like(Eta_val)
                if abs(eta_roi_vec[i_roi]) == eta_roi_vec[i_roi]:
                    index = np.where((Eta_val < (eta_roi_vec[i_roi] + 1)) == (
                        Eta_val > (eta_roi_vec[i_roi] - 1)))[0]
                else:
                    index = np.where((Eta_val > (eta_roi_vec[i_roi] - 1)) == (
                        Eta_val < (eta_roi_vec[i_roi] + 1)))[0]

                Mask[index] = 1.

                # reduce
                reduced_i = convert_to_2theta(engine,
                                              pyrs_reducer,
                                              Mask,
                                              ws_name_i,
                                              'SimulatedData_%d' %
                                              np.abs(DetectorAngle),
                                              min_2theta=mintth,
                                              max_2theta=maxtth,
                                              num_bins=400)
                Fitresult = FitModel.fit(reduced_i[1], pars1, x=reduced_i[0])

                #                print ('\n\n\n' )
                for p_index in Peaks:
                    residual_sq = (
                        100.0 *
                        (Fitresult.params['g%d_center' % p_index].value -
                         CalibPeaks[p_index]))**2
                    resNone += Fitresult.params[
                        'g%d_center' % p_index].value - CalibPeaks[p_index]
                    #                    print( Fitresult.params['g%d_center'%p_index].value, CalibPeaks[p_index], Fitresult.params['g%d_center'%p_index] - CalibPeaks[p_index] )
                    residual = np.concatenate(
                        [residual, np.array([residual_sq])])

#                print ('\n\n\n' )
# plot

                backgroundShift = np.average(
                    BackGround(reduced_i[0], Fitresult.params['p0'].value,
                               Fitresult.params['p1'].value,
                               Fitresult.params['p2'].value))
                ax1.plot(reduced_i[0], reduced_i[1], color=colors[i_roi % 5])

                for index_i in range(1, len(Peaks) + 1):
                    ax2 = plt.subplot(num_rows, 2, index_i)
                    ax2.plot(reduced_i[0], reduced_i[1], 'x', color='black')
                    ax2.plot(reduced_i[0], Fitresult.best_fit, color='red')
                    ax2.plot([CalibPeaks[p_index], CalibPeaks[p_index]], [
                        backgroundShift, backgroundShift +
                        Fitresult.params['g0_amplitude'].value
                    ],
                             'k',
                             linewidth=2)
                    ax2.set_xlim(
                        [CalibPeaks[p_index] - 1.5, CalibPeaks[p_index] + 1.5])

            plt.savefig('./FitFigures/Round{:010}_{:02}.png'.format(
                GlobalParameter.global_curr_sequence, i_tth))
            plt.clf()

            # fit peaks


#                FitPeaks(InputWorkspace=ws_name_i, OutputWorkspace=out_peak_pos_ws,
#                         StartWorkspaceIndex=0, StopWorkspaceIndex=0,
#                         PeakCenters=peak_centers,
#                         FitWindowBoundaryList=fit_windows,
#                         FittedPeaksWorkspace=fitted_ws,
#                         OutputPeakParametersWorkspace='hb2b_rotate_p30deg_reduced_FITS',  # FIXME - need to give a good name too
#                         OutputParameterFitErrorsWorkspace='hb2b_rotate_p30deg_reduced_Errors')

#            print ( "\n\n\n\n\n\n" )
#            print ( mtd[ out_peak_pos_ws ].readY(0) )
#            print ( CalibPeaks )
#            print ( CalibPeaks - mtd[ out_peak_pos_ws ].readY(0)  )
#            print ( "\n\n\n\n\n\n" )

# plot

#                ax1.plot(reduced_i[0], reduced_i[1], color=colors[i_roi % 5])
#                index_i = i_roi + 1
#                ax2 = plt.subplot(num_rows, 2, index_i)
#                ax2.plot(reduced_i[0], reduced_i[1], color='black')
#                ax2.plot(mtd[fitted_ws].readX(0), mtd[fitted_ws].readY(0), color='red')
#
#
#            plt.savefig('Round{:010}_{:02}.png'.format(GlobalParameter.global_curr_sequence, i_tth))
#            plt.clf()

    norm_cost = residual.sum() / (len(roi_vec_set) * len(two_theta))

    print("\n\n\n")
    print('Residual      = {}'.format(norm_cost))
    print('Residual      = {}'.format(resNone))
    print("\n\n\n")

    return (residual)
Exemple #9
0
def gauss_fit_poiss_ph(pnr,
                       min_peak_sep,
                       threshold=None,
                       weighted=False,
                       plot=False):
    """
    improve the precision in the location of the peaks by fitting them
    using a sum of Gaussian distributions
    'poiss_ph' naming convention because it only fits the amplitudes to poissonian stats for n>=1
    :param pnr: 2D histogram, output of np.histogram, assumes it's a rectangular function (first bin) + sum of gaussians, but discards the first bin.
    :param min_peak_sep: Minimum distance between each detected peak.
    :param threshold: Normalized threshold, float between [0., 1.]
    :param weighted: if True, it associate a poissonian error to the
        frequencies
    """

    # unpack the histogram into x and y values
    # first bin corresponds to n=0 traces with no photon detection events, thus having zero area.
    f0 = pnr[0][0]  #n=0 freq
    frequencies = pnr[0][1:]

    # match the number of bins to the frequencies, find the step size
    x_val = pnr[1][1:]
    step = np.diff(x_val)[0]
    x0 = pnr[1][0] + step / 2  #n=0 bin
    x_val = x_val[:-1] + step / 2.

    # find a first approximation of the peak location using local differences
    peaks_pos, peak_height = peaks([frequencies, x_val], min_peak_sep,
                                   threshold)
    print peaks_pos, peak_height

    # build a fitting model with a number of gaussian distributions matching
    # the number of peaks
    fit_model = np.sum([
        GaussianModel(prefix='g{}_'.format(k + 1))
        for k, _ in enumerate(peaks_pos)
    ])

    # Generate the initial conditions for the fit
    p = Parameters()
    p.add('n_bar', 0.2)

    p.add('A', np.max(peak_height) * min_peak_sep)
    p.add('Delta_E', peaks_pos[-1] - peaks_pos[-2])
    # p.add('g1_sigma', min_peak_sep / 5, min=0)
    p.add('sigma_p', min_peak_sep / np.sqrt(2) / np.pi, min=0)
    # n>=1 Centers
    p.add('g1_center', peaks_pos[0], min=0)
    p.add('g2_center', peaks_pos[1], min=0)
    [
        p.add('g{}_center'.format(k + 3),
              j,
              expr='g{}_center + Delta_E'.format(k + 1))
        for k, j in enumerate(peaks_pos[2:])
    ]

    # n>=1 amplitudes
    [
        p.add('g{}_amplitude'.format(k + 1),
              j * min_peak_sep / np.sqrt(2),
              expr='A * exp(-n_bar) * n_bar**{} / factorial({})'.format(
                  k + 1, k + 1),
              min=0) for k, j in enumerate(peak_height)
    ]

    # n>=1 fixed widths
    [
        p.add('g{}_sigma'.format(k + 1),
              min_peak_sep / np.sqrt(2) / np.pi,
              min=0,
              expr='sigma_p * sqrt({})'.format(k + 1)
              # expr='sigma_p'
              ) for k, _ in enumerate(peak_height)
    ]

    if weighted:
        # generates the poissonian errors, correcting for zero values
        err = np.sqrt(frequencies)
        err[frequencies == 0] = 1

        result = fit_model.fit(frequencies,
                               x=x_val,
                               params=p,
                               weights=1 / err,
                               method='powell')
    else:
        result = fit_model.fit(frequencies, x=x_val, params=p)
    return result
Exemple #10
0

##### Here you can choose what fit is best betwwenn a Gaussian (defined before, so that you can put more than one
##### or a Gaussian (one only) + constant

gmodel = Model(gaussienne)
result = gmodel.fit(intensite,
                    x=frequence,
                    b=0,
                    a=np.max(intensite),
                    xo=esperance_signal,
                    sigma=variance_signal,
                    method='leastsq')
print(result.fit_report())

mod = GaussianModel() + ConstantModel()
out = mod.fit(intensite,
              x=frequence,
              amplitude=np.max(intensite),
              center=esperance_signal,
              sigma=variance_signal,
              c=0)
print(out.fit_report(min_correl=0.25))

##### Plot

starfreq = 4.2e9  # Hz
restfreq = 4.55e9  # Hz
freqwidth = 1.5625e6  # Hz
realfrequency = starfreq + frequence * freqwidth
    def CurveFitting(self, frec, Pxx, iaf, ax):
        'Non-Linear Least-Squares Minimization and Curve-Fitting for Python'

        # ----- adjusting a model to the obtained PSD -----
        # model 1: constante
        g1 = ConstantModel(prefix='g1_')
        pars = g1.guess(Pxx, x=frec)
        pars['g1_c'].set(0)
        # model 2: k2/f^-1
        g2 = PowerLawModel(prefix='g2_')
        pars += g2.guess(Pxx, x=frec)
        pars['g2_exponent'].set(-1)
        #model 3: probability density function
        g3 = GaussianModel(prefix='g3_')
        pars += g3.guess(Pxx, x=frec)
        pars['g3_center'].set(iaf, min=iaf - 2, max=iaf + 2)
        # model 4: probability density function
        g4 = GaussianModel(prefix='g4_')
        pars += g4.guess(Pxx, x=frec)
        pars['g4_center'].set(20, min=16, max=25)
        # final model
        gA = g1 + g2 + g3 + g4
        outA = gA.fit(Pxx, pars, x=frec)
        diffA = np.sum(Pxx - outA.best_fit)
        gB = g1 + g2 + g3
        outB = gB.fit(Pxx, pars, x=frec)
        diffB = np.sum(Pxx - outB.best_fit)
        gC = g1 + g2
        outC = gC.fit(Pxx, pars, x=frec)
        diffC = np.sum(Pxx - outC.best_fit)
        diffs = np.abs([diffA, diffB, diffC])
        idx = np.where(diffs == np.min(diffs))[0][0]
        out = [outA, outB, outC][idx]
        # ----- plotting the desire PSD -----
        # original and fitted curves
        ax.plot(frec, Pxx, 'k', linewidth=2, label='PSD')
        ax.plot(frec,
                out.best_fit,
                'b.-',
                linewidth=2,
                markersize=9,
                label='BestModel')
        ax.set_xlim(frec[0], 32)
        ax.set_ylim(ymin=0)
        ax.tick_params(axis='both', labelsize=16)
        ax.set_xlabel('Frequency [Hz]', fontsize='x-large')
        ax.grid()
        # components of the fitted curved
        comps = out.eval_components(x=frec)
        g12 = comps['g1_'] + comps['g2_']
        ax.plot(frec, g12, 'g--', linewidth=2, label='PowerLawModel')
        idx1, idx2 = np.where(frec >= 5)[0][0], np.where(frec <= 15)[0][-1]
        # final value on the subplot
        if out != outC:
            diffs = out.best_fit[idx1:idx2] - g12[idx1:idx2]
            peak1 = np.amax(diffs)
            idx = np.where(diffs == peak1)[0]
            idx += len(out.best_fit[:idx1])
            ax.plot((frec[idx], frec[idx]), (g12[idx], out.best_fit[idx]),
                    'r-o',
                    linewidth=3,
                    markersize=9)
            ax.text(frec[idx],
                    g12[idx],
                    str(np.around(peak1, decimals=2)),
                    horizontalalignment='right',
                    verticalalignment='top',
                    color='r',
                    fontsize='xx-large')
        else:
            peak1 = 0
        # optional valued on the subplot
        diffs = Pxx[idx1:idx2] - g12[idx1:idx2]
        peak2 = np.amax(diffs)
        idx = np.where(peak2 == diffs)[0]
        idx += len(Pxx[:idx1])
        ax.plot((frec[idx], frec[idx]), (g12[idx], Pxx[idx]),
                'r-*',
                linewidth=3,
                markersize=11)
        ax.text(frec[idx],
                Pxx[idx],
                str(np.around(peak2, decimals=2)),
                horizontalalignment='left',
                verticalalignment='top',
                color='r',
                fontsize='xx-large')
        ax.legend(loc='upper right', shadow=True)

        return peak1, peak2
Exemple #12
0
    def twoPeakGaussianFit(self):
        try:
            nRow, nCol = self.dockedOpt.fileInfo()

            self.binFitData = zeros((nRow, 0))
            self.TwoPkGausFitData = zeros(
                (nCol, 12))  # Creates the empty 2D List
            for j in range(nCol):
                yy1 = []
                yy2 = []
                yy = self.dockedOpt.TT[:, j]
                i = 0
                for y in yy:
                    if i < len(yy) / 2:
                        yy1.append(y)
                    else:
                        yy2.append(y)
                    i += 1

                xx = arange(0, len(yy))
                xx1 = arange(0, len(yy) / 2)
                xx2 = arange(len(yy) / 2, len(yy))
                x1 = xx[0]
                x2 = xx[-1]
                y1 = yy[0]
                y2 = yy[-1]
                m = (y2 - y1) / (x2 - x1)
                b = y2 - m * x2

                mod1 = GaussianModel(prefix='p1_')

                mod2 = GaussianModel(prefix='p2_')

                pars1 = mod1.guess(yy1, x=xx1)
                pars2 = mod2.guess(yy2, x=xx2)

                mod = mod1 + mod2 + LinearModel()
                pars = pars1 + pars2

                pars.add('intercept', value=b, vary=True)
                pars.add('slope', value=m, vary=True)
                out = mod.fit(yy, pars, x=xx, slope=m)
                QMessageBox.warning(self.myMainWindow, "Hi",
                                    "About to start fitting")

                self.TwoPkGausFitData[j, :] = (out.best_values['p1_amplitude'],
                                               0, out.best_values['p1_center'],
                                               0, out.best_values['p1_sigma'],
                                               0,
                                               out.best_values['p2_amplitude'],
                                               0, out.best_values['p2_center'],
                                               0, out.best_values['p2_sigma'],
                                               0)

                # Saves fitted data of each fit
                fitData = out.best_fit
                binFit = np.reshape(fitData, (len(fitData), 1))
                self.binFitData = np.concatenate((self.binFitData, binFit),
                                                 axis=1)

                if self.continueGraphingEachFit == True:
                    self.graphEachFitRawData(xx, yy, out.best_fit, 'G')

            return False
        except Exception as ex:
            QMessageBox.warning(
                self.myMainWindow, "Error",
                "Please make sure the guesses are realistic when fitting."
                "\n\nException: " + str(ex))
            return True
Exemple #13
0
def fit_1peak_1direction(I, pos, cut_length, cut_width, cut_thickness, \
        energy_window, ix_direction, model_name, verbose=False):
    """
    To fit the peaks along one direction.
    @paras:
        I: the dataset
        pos: [H0, K0, L0] the initial postion of the peak
        cut_length, cut_width, cut_thickness, energy_window, ix_direction:
            All these are cutting setting.
            For example, if given
            ----------------------------------
            pos = [1, 2, 3], cut_length=1, cut_width=0.05, cut_thickness=0.2, 
            energy_window=4, ix_direction=2.
            ----------------------------------
            These will make cuts along [0K0] directions, giving the slice as
            H: [0.9, 1.1]            #cut_thickness=1.1-0.9=0.2
            K: [1.5, 2.5, step=0.05] #cut_length=2.5-1.5=1, cut_width=step=0.05
            L: [2.9, 3.1]            #cut_thickness=3.1-2.9=0.2
            energy: [-2, 2]          #energy_window=2-(-2)=4meV
        model_name: the name of the model to fit the peak. Default: Gaussian.
            Options: Gaussian, Lorentzian, Voigt, SkewedVoigt. 
            Only the first letter in the name matters.
        verbose: print/save the fitting curves: Default: False.
    @returns:
        the peak position along that direction
    """
    """
    To gather the information to make cut of the data. Format:
    QE_info: list or numpy array
            [[H_start, H_end, H_step],
             [K_start, K_end, K_step],
             [L_start, L_end, L_step],
             [energy_start, energy_end, energy_step]]
            If no step value or the step value is 0, means only one bin 
            [start, end) for that component.
    """
    hist_QE_info = np.zeros((4, 3))
    hist_QE_info[:3, 0] = pos
    hist_QE_info[:3, 1] = pos
    tmp = 0.5 * [
        cut_length if i == ix_direction else cut_thickness for i in xrange(3)
    ]
    hist_QE_info[:, 0] -= tmp
    hist_QE_info[:, 1] += tmp
    hist_QE_info[ix_direction, 2] = cut_width
    hist_QE_info[3, 0] = -0.5 * energy_window
    hist_QE_info[3, 1] = 0.5 * energy_window

    # cut
    data = cut(I, hist_QE_info)
    x, y = data[ix_direction], data[-1]

    # fit the peak
    flag = model_name[0].upper()
    if flag == 'G':
        mod = GaussianModel()
    elif flag == 'L':
        mod = LorentzianModel()
    elif flag == 'V':
        mod = VoigtModel()
    elif flag == '':
        mod = SkewedVoigtModel()
    else:
        print('cannot identify the model name, use Gaussain model anyway...')
        mod = GaussianModel()
    pars = mod.guess(y, x=x)
    out = mod.fit(y, pars, x=x)

    if verbose:
        # TODO: save the plot
        pass

    return pars.valuesdict()['center']  # return the center of the peak
Exemple #14
0
import matplotlib.pyplot as plt
from scipy import signal
import itertools

# file input and assign
path = '/Users/dayeen/Downloads/'
filenames = [
    'DPPC_Chol_5%_chol_scan8485_qxy_gid', 'DPPC_chol9010_scan69_70_qxy_gid',
    'DPPC_chol8020_scan72_73_qxy_gid'
]

# legends for data
data_legends = ['5 Chol', '10 Chol', '20 Chol']

#fit declaration
gauss_mod = GaussianModel(prefix='gauss_')
lorentz1 = LorentzianModel(prefix='l1_')
lorentz2 = LorentzianModel(prefix='l2_')

#color
color_pallate = ['#FF1F5B', '#009ADE', '#AF58BA', '#FFC61E', '#F28522']


def index_of(array_value, peak_value):
    """return index of array *at or below* peak_value """
    if peak_value < min(array_value):
        return 0
    return max(np.where(array_value <= peak_value)[0])


def get_peaks(x, y):
Exemple #15
0
from lmfit.models import GaussianModel, ConstantModel
import sys
fname = sys.argv[1]
data = loadtxt(fname)
res = []
n = round(len(data)**0.5)
m = n + 1
for j, d in enumerate(data):
    sigma = d[2] / 2
    if sigma == np.nan:
        continue
    y = d[3:]
    x = arange(len(y))
    y0 = np.mean([y[:200], y[-200:]])
    x0 = ((y - y0) * x).sum() / (y - y0).sum()
    mod = GaussianModel()

    pars = mod.guess(y, x=x)
    pars['sigma'].set(sigma, min=5, max=4000)
    pars['center'].set(x0, min=200, max=6500)
    #pars['amplitude'].set(y.max()/sigma,min=1,max=60536)
    bg = ConstantModel()
    pars.update(bg.make_params())
    pars['c'].set(y.min())

    model = mod + bg
    try:
        out = model.fit(y, pars, x=x)
    except:
        continue
    print(out.fit_report(min_correl=0.25))
Exemple #16
0
def gauss_fit_interp_disc(pnr, min_peak_sep, threshold=None, weighted=False):
    """
    improve the precision in the location of the peaks by fitting them
    using a sum of Gaussian distributions
    'disc' naming convention because it takes into account the n=0 rectangular function 
    in the first bin as a result of calculting 0 area in a trace where the discriminator does not trigger.
    :param pnr: 2D histogram, output of np.histogram(area under discriminator), assumes it's a rectangular function + sum of gaussians
    :param min_peak_sep: Minimum distance between each detected peak.
    :param threshold: Normalized threshold, float between [0., 1.]
    :param weighted: if True, it associate a poissonian error to the
        frequencies
    """

    # unpack the histogram into x and y values
    # first bin corresponds to n=0 traces with no photon detection events, thus having zero area.
    f0 = pnr[0][0]  #n=0 freq
    frequencies = pnr[0][1:]

    # match the number of bins to the frequencies, find the step size
    x_val = pnr[1][1:]
    step = np.diff(x_val)[0]
    x0 = pnr[1][0] + step / 2  #n=0 bin
    x_val = x_val[:-1] + step / 2.

    # find a first approximation of the peak location using local differences
    peaks_pos, peak_height = peaks([frequencies, x_val], min_peak_sep,
                                   threshold)
    print peaks_pos, peak_height

    # build a fitting model with a number of gaussian distributions matching
    # the number of peaks
    fit_model = RectangleModel(prefix='g0_', form='linear') + np.sum([
        GaussianModel(prefix='g{}_'.format(k + 1))
        for k, _ in enumerate(peaks_pos)
    ])

    # Generate the initial conditions for the fit
    p = Parameters()
    p.add('n_bar', 0.2)
    # n=0 params
    p.add('g0_amplitude', f0, expr='{}*exp(-n_bar)'.format(np.sum(pnr[0])))
    p.add('g0_center1', x0 - step / 2, vary=0)
    p.add('g0_center2', x0 + step / 2, vary=0)
    p.add('g0_sigma1', 0)
    p.add('g0_sigma2', 0)

    p.add('A', np.max(peak_height) * min_peak_sep)
    # p.add('Delta_E', peaks_pos[-1] - peaks_pos[-2])
    # p.add('g1_sigma', min_peak_sep / 5, min=0)
    p.add('sigma_p', min_peak_sep / np.sqrt(2) / np.pi, min=0)
    # n>=1 Centers
    p.add('g1_center', peaks_pos[0], min=0)
    p.add('g2_center', peaks_pos[1], min=0)
    [
        p.add(
            'g{}_center'.format(k + 3),
            j,
            # expr='g{}_center + Delta_E'.format(k + 1)
        ) for k, j in enumerate(peaks_pos[2:])
    ]

    # n>=1 amplitudes
    [
        p.add('g{}_amplitude'.format(k + 1),
              j * min_peak_sep / np.sqrt(2),
              expr='A * exp(-n_bar) * n_bar**{} / factorial({})'.format(
                  k + 1, k + 1),
              min=0) for k, j in enumerate(peak_height)
    ]

    # n>=1 fixed widths
    [
        p.add(
            'g{}_sigma'.format(k + 1),
            min_peak_sep / np.sqrt(2) / np.pi,
            min=0,
            # expr='sigma_p * sqrt({})'.format(k + 1)
            expr='sigma_p') for k, _ in enumerate(peak_height)
    ]

    if weighted:
        # generates the poissonian errors, correcting for zero values
        err = np.sqrt(pnr[0])
        err[pnr[0] == 0] = 1

        result = fit_model.fit(pnr[0],
                               x=pnr[1][:-1] + step / 2,
                               params=p,
                               weights=1 / err,
                               method='powell')
    else:
        result = fit_model.fit(pnr[0], x=pnr[1][:-1] + step / 2, params=p)

    # amplitudes = np.array([result.best_values['g{}_amplitude'.format(k)]
    #                        for k, _
    #                        in enumerate(peaks_pos)])
    # centers = np.array([result.best_values['g{}_center'.format(k)]
    #                     for k, _
    #                     in enumerate(peaks_pos)])
    # sigmas = np.array([result.best_values['g{}_sigma'.format(k)]
    #                    for k, _
    #                    in enumerate(peaks_pos)])
    # s_vec = centers.argsort()
    return result
Exemple #17
0
 zero. The while loop iterates over the peak and sets it to zero.
 '''
 iterator = data_left
 while iterator < (data_right):
     gauss_x.append(iterator)
     gauss_y.append(list_data[iterator])
     x = np.asarray(gauss_x)
     y = np.asarray(gauss_y)
     fit_channel.append(list_data[iterator])
     list_data[iterator] = 0
     iterator += 1
 i += 1
 '''
 information for plotting the Gaussian function.
 '''
 mod = GaussianModel(prefix='g1_')
 line_mod = LinearModel(prefix='line')
 pars = mod.guess(y, x=x)
 pars.update(line_mod.make_params(intercept=y.min(), slope=0))
 pars.update(mod.make_params())
 pars['g1_center'].set(gauss_x[np.argmax(gauss_y)], min=gauss_x[np.argmax(gauss_y)]\
 - 3)
 pars['g1_sigma'].set(3, min=0.25)
 pars['g1_amplitude'].set(max(gauss_y), min=max(gauss_y) - 10)
 mod = mod + line_mod
 out = mod.fit(y, pars, x=x)
 plt.plot(x, fit_channel)
 plt.plot(x, out.best_fit, '--k')
 plt.show()
 gauss_x = []
 gauss_y = []
Exemple #18
0
def gauss_fit(pnr, min_peak_sep, threshold=None, weighted=False, plot=False):
    """
    improve the precision in the location of the peaks by fitting them
    using a sum of Gaussian distributions
    NOTE: unlike gauss_fit_interp, it does not assume a Poissonian distribution
    :param pnr: 2D histogram, output of np.histogram, assumes it's a sum of gaussians
    :param min_peak_sep: Minimum distance between each detected peak.
    :param threshold: Normalized threshold, float between [0., 1.]
    :param weighted: if True, it associate a poissonian error to the
        frequencies
    """

    # unpack the histogram into x and y values
    frequencies = pnr[0]

    # match the number of bins to the frequencies, find the step size
    x_val = pnr[1]
    step = np.diff(x_val)[0]
    x_val = x_val[:-1] + step / 2.

    # find a first approximation of the peak location using local differences
    peaks_pos, peak_height = peaks(pnr, min_peak_sep, threshold)

    # build a fitting model with a number of gaussian distributions matching
    # the number of peaks
    fit_model = np.sum([
        GaussianModel(prefix='g{}_'.format(k + 1))
        for k, _ in enumerate(peaks_pos)
    ])

    # Generate the initial conditions for the fit
    p = Parameters()

    p.add('A', np.max(peak_height) * min_peak_sep)
    p.add('g1_sigma', min_peak_sep / 5, min=0)
    p.add('sigma_p', min_peak_sep / np.sqrt(2) / np.pi, min=0)

    # Centers
    p.add('g1_center', peaks_pos[0], min=0)
    p.add('g2_center', peaks_pos[1], min=0)
    [
        p.add(
            'g{}_center'.format(k + 3),
            j,
            # expr='g{}_center + Delta_E'.format(k + 1)
        ) for k, j in enumerate(peaks_pos[2:])
    ]

    # amplitudes
    [
        p.add(
            'g{}_amplitude'.format(k + 1),
            j * min_peak_sep / np.sqrt(2),
            # expr='A * exp(-n_bar) * n_bar**{} / factorial({})'.format(k, k),
            min=0) for k, j in enumerate(peak_height)
    ]

    # fixed width
    [
        p.add(
            'g{}_sigma'.format(k + 2),
            min_peak_sep / np.sqrt(2) / np.pi,
            min=0,
            # expr='sigma_p * sqrt({})'.format(k + 1)
            # expr='sigma_p'
        ) for k, _ in enumerate(peak_height[1:])
    ]

    if weighted:
        # generates the poissonian errors, correcting for zero values
        err = np.sqrt(frequencies)
        err[frequencies == 0] = 1

        result = fit_model.fit(frequencies,
                               x=x_val,
                               params=p,
                               weights=1 / err,
                               method='powell')
    else:
        result = fit_model.fit(frequencies, x=x_val, params=p)

    if plot:
        plt.figure(figsize=(10, 5))
        plt.errorbar(pnr[1][:-1] + step / 2,
                     pnr[0],
                     yerr=np.sqrt(pnr[0]),
                     linestyle='',
                     ecolor='black',
                     color='black')
        plt.plot(x_val, result.eval(x=x_val))
        [
            plt.scatter(x_val,
                        result.eval_components(x=x_val)['g{}_'.format(k + 1)],
                        marker='.') for k, _ in enumerate(result.components)
        ]
        # plt.axvline(th01,color='black', label='th01')
        plt.legend()
        print result.fit_report()
    return result
    def get_model(self):

        self.x = np.array([0,1,2,6,12,24])
        self.y = np.array(self.norm_vals)
        self.model_flag = None
        self.model_list = []

        # First model with Gaussian curve.
        self.background1 = ExponentialModel(prefix='e1_')
        self.pars1 = self.background1.guess(self.y, x=self.x)
        self.peak1 = GaussianModel(prefix='p1_')
        self.pars1 += self.peak1.guess(self.y, x=self.x)
        self.comp_mod1 = self.peak1 + self.background1
        self.init1 = self.comp_mod1.eval(self.pars1, x=self.x)
        self.comp_out1 = self.comp_mod1.fit(self.y, x=self.x, fit_kws={'nan_policy': 'omit'})
        self.comp_list1 = self.comp_out1.fit_report().split('\n')
        self.comp_chisq1 = float(self.comp_list1[6][-5:])

        # Second model with Voigt curve.
        self.background2 = ExponentialModel(prefix='e2_')
        self.pars2 = self.background2.guess(self.y, x=self.x)
        self.peak2 = VoigtModel(prefix='p2_')
        self.pars2 += self.peak2.guess(self.y, x=self.x)
        self.comp_mod2 = self.peak2 + self.background2
        self.init2 = self.comp_mod2.eval(self.pars2, x=self.x)
        self.comp_out2 = self.comp_mod2.fit(self.y, x=self.x, fit_kws={'nan_policy': 'omit'})
        self.comp_list2 = self.comp_out2.fit_report().split('\n')
        self.comp_chisq2 = float(self.comp_list2[6][-5:])

        # Exponential model for reference
        self.exp_mod = ExponentialModel(prefix='onlye_')
        self.pars = self.exp_mod.guess(self.y, x=self.x)
        self.init = self.exp_mod.eval(self.pars, x=self.x)

        self.exp_out = self.exp_mod.fit(self.y, x=self.x, missing='drop')
        self.exp_list = self.exp_out.fit_report().split('\n')
        self.exp_chisq = float(self.exp_list[6][-5:])

        self.model_list = [self.comp_chisq1, self.comp_chisq2, self.exp_chisq]

        if np.count_nonzero(np.isinf(self.comp_out1.best_fit)) == 5 and np.count_nonzero(np.isinf(self.comp_out2.best_fit)):
             model_flag = "exponential"
             self.out = self.exp_out

        elif len(self.model_list) == len(set(self.model_list)):

             if min(self.model_list) == self.comp_chisq1:
                 self.model_flag = "Gaussian compound"
                 self.out = self.comp_out1

             elif min(self.model_list) == self.comp_chisq2:
                 self.model_flag = "Voigt compound"
                 self.out = self.comp_out2

             elif min(self.model_list) == self.exp_chisq:
                 self.model_flag = "exponential"
                 self.out = self.exp_out

        elif len(self.model_list) != len(set(self.model_list)):

             if min(self.model_list) == self.comp_chisq1:
                 self.model_flag = "Gaussian compound"
                 self.out = self.comp_out1

             elif min(self.model_list) == self.comp_chisq2:
                 self.model_flag = "Voigt compound"
                 self.out = self.comp_out2

             elif min(self.model_list) == self.exp_chisq:
                 self.model_flag = "exponential"
                 self.out = self.exp_out


             if min(self.model_list) == self.comp_chisq1 and self.comp_chisq1 == self.comp_chisq2:
                 self.model_flag = "Both compounds"
                 self.out = self.comp_out2

             if min(self.model_list) == self.comp_chisq2 and self.comp_chisq2 == self.exp_chisq:
                 self.model_flag = "Voigt compound and exponential"
                 self.out = self.comp_out2

             if min(self.model_list) == self.exp_chisq and self.exp_chisq == self.comp_chisq1:
                 self.model_flag = "Gaussian compound and exponential"
                 self.out = self.comp_out1


        return self.comp_out1, self.comp_chisq1, self.comp_out2, self.comp_chisq2, self.exp_out, self.exp_chisq, self.model_flag
Exemple #20
0
def gauss_fit_poiss_ph_region(pnr,
                              min_peak_sep,
                              th01=None,
                              threshold=None,
                              weighted=False,
                              plot=False):
    """
    improve the precision in the location of the peaks by fitting them
    using a sum of Gaussian distributions
    'poiss_ph' naming convention because it only fits the amplitudes to poissonian stats for n>=1
    :param pnr: 2D histogram, output of np.histogram, assumes it's a rectangular function (first bin) + sum of gaussians, but discards the first bin.
    :param min_peak_sep: Minimum distance between each detected peak.
    :param threshold: Normalized threshold, float between [0., 1.]
    :param weighted: if True, it associate a poissonian error to the
        frequencies
    """

    # unpack the histogram into x and y values
    # first bin corresponds to n=0 traces with no photon detection events, thus having zero area.
    f0 = pnr[0][0]  #n=0 freq
    frequencies = pnr[0][1:]

    # match the number of bins to the frequencies, find the step size
    x_val = pnr[1][1:]
    step = np.diff(x_val)[0]
    x0 = pnr[1][0] + step / 2  #n=0 bin
    x_val = x_val[:-1] + step / 2.

    # find a first approximation of the peak location using local differences
    peaks_pos, peak_height = peaks([frequencies, x_val], min_peak_sep,
                                   threshold)
    print 'est peak pos = {}\nest peak hts = {}'.format(peaks_pos, peak_height)

    # detect min between n=0 and n=1
    if th01 == None:
        th01 = x_val[find_idx(x_val, peaks_pos[0]) +
                     np.argmin(frequencies[(x_val > peaks_pos[0])
                                           & (x_val < peaks_pos[1])])]

    print 'th01 = {}'.format(th01)

    # constrain fitting region:
    x_val_ = x_val
    frequencies_ = frequencies

    mask = x_val > th01
    x_val = x_val[mask]
    frequencies = frequencies[mask]
    peaks_pos = peaks_pos[peaks_pos > th01]
    peak_height = peak_height[peaks_pos > th01]

    # build a fitting model with a number of gaussian distributions matching
    # the number of peaks
    fit_model = np.sum([
        GaussianModel(prefix='g{}_'.format(k + 1))
        for k, _ in enumerate(peaks_pos)
    ])

    # Generate the initial conditions for the fit
    p = Parameters()
    p.add('n_bar', 0.2, min=0)

    p.add('A', np.max(peak_height) * min_peak_sep)
    # p.add('Delta_E', peaks_pos[-1] - peaks_pos[-2])
    p.add('Delta_E', 5)
    # p.add('g1_sigma', min_peak_sep / 5, min=0)
    p.add('sigma_p', min_peak_sep / np.sqrt(2) / np.pi, min=0)
    # n>=1 Centers
    p.add('g1_center', peaks_pos[0], min=0, vary=1)
    p.add('g2_center', peaks_pos[1], min=0)
    [
        p.add(
            'g{}_center'.format(k + 3),
            j,
            # expr='g{}_center + Delta_E'.format(k + 1)
        ) for k, j in enumerate(peaks_pos[2:])
    ]

    # n>=1 amplitudes
    [
        p.add('g{}_amplitude'.format(k + 1),
              j * min_peak_sep / np.sqrt(2),
              expr='A * exp(-n_bar) * n_bar**{} / factorial({})'.format(
                  k + 1, k + 1),
              min=0) for k, j in enumerate(peak_height)
    ]

    # n>=1 fixed widths
    [
        p.add('g{}_sigma'.format(k + 1),
              min_peak_sep / np.sqrt(2) / np.pi,
              min=0,
              expr='sigma_p * sqrt({})'.format(k + 1)
              # expr='sigma_p'
              ) for k, _ in enumerate(peak_height)
    ]

    if weighted:
        # generates the poissonian errors, correcting for zero values
        err = np.sqrt(frequencies)
        err[frequencies == 0] = 1

        result = fit_model.fit(frequencies,
                               x=x_val,
                               params=p,
                               weights=1 / err,
                               method='powell')
    else:
        result = fit_model.fit(frequencies, x=x_val, params=p)

    n_bar = result.params.valuesdict()['n_bar']
    print 'poissonian probs from n=1,2...= {}'.\
    format([np.exp(-n_bar) * n_bar**(k+1) / math.factorial(k+1)\
    for k, j in enumerate(peak_height)])

    if plot:
        plt.figure(figsize=(10, 5))
        plt.errorbar(pnr[1][:-1] + step / 2,
                     pnr[0],
                     yerr=np.sqrt(pnr[0]),
                     linestyle='',
                     ecolor='black',
                     color='black')
        [
            plt.plot(x_val,
                     result.eval_components(x=x_val)['g{}_'.format(k + 1)])
            for k, _ in enumerate(result.components)
        ]
        plt.axvline(th01, color='black', label='th01')
        plt.legend()
        print result.fit_report()
    return result

def per_iteration(pars, iter, resid, *args, **kws):
    if iter < 3 or iter % 10 == 0:
        out = ['== %i ' % iter]
        for key, val in pars.valuesdict().items():
            out.append('%s=%.3f' % (key, val))
        print ', '.join(out)
        print args, kws


x = linspace(0., 20, 401)
y = gaussian(x, amplitude=24.56, center=7.6543, sigma=1.23)
y = y - .20 * x + 3.333 + random.normal(scale=0.23, size=len(x))

mod = GaussianModel(prefix='peak_') + LinearModel(prefix='bkg_')

pars = mod.make_params()
pars['peak_amplitude'].value = 3.0
pars['peak_center'].value = 6.0
pars['peak_sigma'].value = 2.0
pars['bkg_intercept'].value = 0.0
pars['bkg_slope'].value = 0.0

out = mod.fit(y, pars, x=x, iter_cb=per_iteration)

pylab.plot(x, y, 'b--')

# print(' Nfev = ', out.nfev)
print(out.fit_report())
def fit_1peak_1direction(df, pos, cut_length, cut_width, cut_thickness, \
                         energy_window, ix_direction, model_name, \
                         show=False, save=False, outdir='./peaks_reports/fig/', suffix=''):
    """
    To fit the peaks along one direction.
    @paras:
        infile: the dataset file path.
        pos: [H0, K0, L0] the initial postion of the peak
        cut_length, cut_width, cut_thickness, energy_window, ix_direction:
            All these are cutting setting.
            For example, if given
            ----------------------------------
            pos = [1, 2, 3], cut_length=1, cut_width=0.05, cut_thickness=0.2, 
            energy_window=4, ix_direction=2.
            ----------------------------------
            These will make cuts along [0K0] directions, giving the slice as
            H: [0.9, 1.1]            #cut_thickness=1.1-0.9=0.2
            K: [1.5, 2.5, step=0.05] #cut_length=2.5-1.5=1, cut_width=step=0.05
            L: [2.9, 3.1]            #cut_thickness=3.1-2.9=0.2
            energy: [-2, 2]          #energy_window=2-(-2)=4meV
        model_name: the name of the model to fit the peak. Default: Gaussian.
            Options: Gaussian, Lorentzian, Voigt, SkewedVoigt. 
            Only the first letter in the name matters.
        verbose: print/save the fitting curves: Default: False.
    @returns:
        the peak position along that direction
    """
    """
    To gather the information to make cut of the data. Format:
    QE_info: list or numpy array
            [[H_start, H_end, H_step],
             [K_start, K_end, K_step],
             [L_start, L_end, L_step],
             [energy_start, energy_end, energy_step]]
            If no step value or the step value is 0, means only one bin 
            [start, end) for that component.
    """
    QE_info = np.zeros((4, 3))
    QE_info[:3, 0] = pos
    QE_info[:3, 1] = pos
    tmp = 0.5 * np.array(
        [cut_length if i == ix_direction else cut_thickness for i in range(3)])
    QE_info[:3, 0] -= tmp
    QE_info[:3, 1] += tmp
    QE_info[ix_direction, 2] = cut_width
    QE_info[3, 0] = -0.5 * energy_window
    QE_info[3, 1] = 0.5 * energy_window

    # cut
    y = cut(df, QE_info, ignore_energy=True)
    x = get_bin_centers(*QE_info[ix_direction])
    if np.sum(np.isnan(y)) >= 0.5 * y.size:
        return np.nan
    ix_not_nan = np.isfinite(y)  #np.logical_not(np.isnan(y))
    y = y[ix_not_nan]
    x = x[ix_not_nan]

    # fit the peak
    flag = model_name[0].upper()
    if flag == 'G':
        mod = GaussianModel()
    elif flag == 'L':
        mod = LorentzianModel()
    elif flag == 'V':
        mod = VoigtModel()
    elif flag == 'P':
        mod = PseudoVoigtModel()
    elif flag == 'S':
        mod = SkewedVoigtModel()
    else:
        print('cannot identify the model name, use Gaussain model anyway...')
        mod = GaussianModel()
    pars = mod.guess(y, x=x)
    out = mod.fit(y, pars, x=x)
    if not out.success:
        return np.nan

    if show or save:

        x_labels = ["Q_" + chr(120 + i) + " (r.l.u.)" for i in range(3)]
        plt.scatter(x, y, c='b')
        plt.plot(x, out.best_fit, 'r-', label=mod.name+ \
                 '\n$\\vdash$ center={:.3e}'.format(out.params.valuesdict()['center'])+ \
                 '\n$\\vdash$ red-chisqr={:.3e}'.format(out.redchi))
        plt.xlabel(x_labels[ix_direction])
        plt.ylabel("Intensity (arb. units)")
        plt.legend(loc='best')

        if save:
            if not os.path.exists(outdir):
                os.makedirs(outdir)
            fname = "peak_" + '_'.join(map(str, np.round(pos).astype(int))) \
                    + "_along_" + chr(120+ix_direction) + suffix
            plt.savefig(outdir + fname, dpi=300)

        if show:
            plt.show()
        plt.close()
    print("Fit result: center =", out.params.valuesdict()['center'])
    print("Fit Statistics: chi-sqr = {:.3e},".format(out.chisqr))
    print("                reduce chi-sqr = {:.3e}".format(out.redchi))
    print("                Akaike info crit = {:.3e}".format(out.aic))
    print("                Bayesian info crit = {:.3e}".format(out.bic))

    return out.params.valuesdict()['center']
Exemple #23
0
def prepareFittingModels(roiCoordsList, modelType):
    modelList = []
    paramList = []
    index = 1

    maxYVal = 0
    for region in roiCoordsList[0]:
        if region['y'].max() > maxYVal:
            maxYVal = region['y'].max()

    for region in roiCoordsList:
        individualModelsList = []
        individualParamsList = []
        if isinstance(region, dict):
            # If the region is just a single region, make it a list so the for loops pulls a dict rather than a dict entry
            region = [region]
        for entry in region:
            prefixName = 'v' + str(index) + '_'
            index += 1
            # pull info out of region dict
            selectedXVals = entry['x']
            selectedYVals = entry['y']

            mod = None
            if modelType.lower() == 'voigt':
                mod = VoigtModel(prefix=prefixName)
            elif modelType.lower() == 'psuedovoigt':
                mod = PseudoVoigtModel(prefix=prefixName)
            elif modelType.lower() == 'lorentzian':
                mod = LorentzianModel(prefix=prefixName)
            elif modelType.lower() == 'gaussian':
                mod = GaussianModel(prefix=prefixName)
            elif modelType.lower() == 'pearsonvii':
                mod = Pearson7Model(prefix=prefixName)

            assert mod, "Entered model type is not supported"
            individualModelsList.append(mod)
            pars = mod.guess(selectedYVals, x=selectedXVals, negative=False)
            pars[prefixName + 'center'].set(min=min(selectedXVals),
                                            max=max(selectedXVals))
            pars[prefixName + 'amplitude'].set(min=0, max=maxYVal)
            pars[prefixName + 'sigma'].set(min=0)
            pars[prefixName + 'height'].set(min=0, max=maxYVal)
            if modelType.lower() == 'gaussian':
                minimumFWHM = 0.01
                pars[prefixName + 'sigma'].set(
                    min=minimumFWHM / 2 *
                    np.sqrt(np.log(2) * 2))  # Converts from FWHM to sigma
            if modelType.lower() == 'pearsonvii':
                pars[prefixName + 'expon'].set(
                    min=0.5 + np.finfo(float).eps)  # prevents negative values
            if modelType.lower() == 'voigt':
                pars[prefixName + 'gamma'].set(value=0.3,
                                               vary=True,
                                               expr='',
                                               min=0)
            individualParamsList.append(pars)
        combinedModel = individualModelsList[0]
        combinedParams = individualParamsList[0]
        if len(individualModelsList) > 1:
            for model, params in zip(individualModelsList[1:],
                                     individualParamsList[1:]):
                combinedModel += model
                combinedParams += params
        modelList.append(combinedModel)
        paramList.append(combinedParams)
    return modelList, paramList
Exemple #24
0
from qcodes.plots.qcmatplotlib import MatPlot
from qcodes.plots.pyqtgraph import QtPlot
from scipy.optimize import curve_fit
import scipy.integrate as integrate
import pandas as pd
import matplotlib.pyplot as plt
from lmfit.models import LorentzianModel, ConstantModel, GaussianModel

pd_dat = pd.read_csv(
    'Scripts and PPT Summary/CryoRX/2020-06-22/18-15-29_qtt_scan1D/RP1.dat',
    skiprows=[0, 2],
    delimiter='\t')

xval = pd_dat['# "RP1"']
yval = pd_dat['S21mag']

peak = GaussianModel()
offset = ConstantModel()
model = peak + offset

pars = offset.make_params(c=np.median(yval))
pars += peak.guess(yval, x=xval, amplitude=-0.5)
result = model.fit(yval, pars, x=xval)

x = [[1 + 1.j * 1], [1 + 1.j * 100]]
print(np.mean(x))
print(abs(result.values['height']))

plt.plot(xval, yval)
plt.plot(xval, result.best_fit, 'b--')
plt.show()
Exemple #25
0
xmin1 = 4750.
xmax1 = 5100.
xmin2 = 6350.
xmax2 = 6800.

y1 = y[((x > xmin1) & (x < xmax1)) | ((x > xmin2) & (x < xmax2))]
x1 = x[((x > xmin1) & (x < xmax1)) | ((x > xmin2) & (x < xmax2))]


# exp = ExponentialModel(prefix='exp_')
# pars = exp.guess(y1, x=x1)


offset = 2000. / 2.35 / cspeed * OIIIb

n6716 = GaussianModel(prefix='n6716_')
pars = n6716.guess(y1, x=x1)
pars.update(n6716.make_params())
pars['n6716_center'].set(6716)
pars['n6716_sigma'].set(600. / 2.35 / cspeed * 6716, min=200. /
                     2.35 / cspeed * 6716,  max=800. / 2.35 / cspeed * 6716)
pars['n6716_amplitude'].set(100, min=0)

n6731 = GaussianModel(prefix='n6731_')
pars.update(n6731.make_params())
pars['n6731_center'].set(expr='n6716_center/6716.*6731.')
pars['n6731_sigma'].set(expr='n6716_sigma/6716.*6731.')
pars['n6731_amplitude'].set(100, min=0)

n6583 = GaussianModel(prefix='n6583_')
pars.update(n6583.make_params())
Exemple #26
0
def spectrum_calibration(channel_width, energy_list, data_2_calibrate):
    import numpy as np
    import matplotlib.pyplot as plt
    #from scipy.optimize import curve_fit
    #from modelling import gauss
    import statsmodels.api as sm
    from lmfit.models import GaussianModel
    from lmfit.models import LinearModel
    '''
    The while loop goes through and identifies the largest peak in the
    spectrum and it records the position of that peak. It then removes
    the peak by removing 10 channels from the right and left of the peak.
    The code will then search for the next largest position.
    '''

    i = 0
    channel_max_list = []
    gauss_x = []
    gauss_y = []
    fit_channel = []
    while i < len(energy_list):
        channel_max = np.argmax(data_2_calibrate)
        data_left = channel_max - channel_width
        data_right = channel_max + channel_width
        channel_max_list.append(channel_max)
        iterator = data_left
        while iterator < (data_right):
            gauss_x.append(iterator)
            gauss_y.append(data_2_calibrate[iterator])
            x = np.asarray(gauss_x)
            y = np.asarray(gauss_y)
            fit_channel.append(data_2_calibrate[iterator])
            data_2_calibrate[iterator] = 0
            iterator += 1
        i += 1
        mod = GaussianModel(prefix='g1_')
        line_mod = LinearModel(prefix='line')
        pars = mod.guess(y, x=x)
        pars.update(line_mod.make_params(intercept=y.min(), slope=0))
        pars.update(mod.make_params())
        pars['g1_center'].set(gauss_x[np.argmax(gauss_y)], min=gauss_x[np.argmax(gauss_y)]\
        - 3)
        pars['g1_sigma'].set(3, min=0.25)
        pars['g1_amplitude'].set(max(gauss_y), min=max(gauss_y) - 10)
        mod = mod + line_mod
        out = mod.fit(y, pars, x=x)
        gauss_x = []
        gauss_y = []
        fit_channel = []
        #print(out.fit_report(min_correl=10))
        #for key in out.params:
        #    print(key, "=", out.params[key].value, "+/-", out.params[key].stderr)
    '''
    sorting channel number so the correct channel number corresponds with
    the correct energy.
    '''
    channel_number = sorted(channel_max_list, key=int)
    energy = energy_list
    results = sm.OLS(energy, sm.add_constant(channel_number)).fit()

    slope, intercept = np.polyfit(channel_number, energy, 1)

    abline_values = [slope * i + intercept for i in channel_number]
    plt.plot(channel_number, energy, 'ro')
    plt.plot(channel_number, abline_values, 'b')
    plt.xlabel('Channel Number')
    plt.ylabel('Energy [keV]')
    plt.title('Best Fit Line')
    return slope, intercept
Exemple #27
0
 def singleGauss(self, g1):
     modGauss1 = GaussianModel(prefix='g1_')
     params = modGauss1.make_params(g1_amplitude=g1['amp'],
                                    g1_center=g1['mu'])
     result = modGauss1.fit(self.yData, params, x=self.xData)
     return result, modGauss1
Exemple #28
0
def NewFit8(amp1, amp2, amp3, amp4, amp5, amp6, amp7, amp8, mu1, mu2, mu3, mu4,
            mu5, mu6, mu7, mu8, sig1, sig2, sig3, sig4, sig5, sig6, sig7, sig8,
            x, y):

    '=========================================='
    'Define the first gaussian'
    gauss1 = GaussianModel(prefix='g1_')  # Model first as a gaussian
    pars = gauss1.guess(y, x=x)  # Make a gautomatic guess of the parameters

    'Set the Parameters values'
    pars['g1_center'].set(mu1, vary=True)
    pars['g1_sigma'].set(sig1, vary=True)
    pars['g1_amplitude'].set(amp1, min=0, vary=True)

    '==========================================='
    'Define the second Gaussian'
    gauss2 = GaussianModel(prefix='g2_')
    pars.update(
        gauss2.make_params())  #update the parameter list with another gaussian

    pars['g2_center'].set(mu2, vary=True)
    pars['g2_sigma'].set(sig2, vary=True)
    pars['g2_amplitude'].set(amp2, min=0, vary=True)

    '==========================================='

    'Define the third Gaussian'
    gauss3 = GaussianModel(prefix='g3_')
    pars.update(
        gauss3.make_params())  #update the parameter list with another gaussian

    pars['g3_center'].set(mu3, vary=True)
    pars['g3_sigma'].set(sig3, vary=True)
    pars['g3_amplitude'].set(amp3, min=0, vary=True)

    '==========================================='

    'Define the four Gaussian'
    gauss4 = GaussianModel(prefix='g4_')
    pars.update(
        gauss4.make_params())  #update the parameter list with another gaussian

    pars['g4_center'].set(mu4, vary=True)
    pars['g4_sigma'].set(sig4, vary=True)
    pars['g4_amplitude'].set(amp4, min=0, vary=True)

    '==========================================='

    'Define the fith Gaussian'
    gauss5 = GaussianModel(prefix='g5_')
    pars.update(
        gauss5.make_params())  #update the parameter list with another gaussian

    pars['g5_center'].set(mu5, vary=True)
    pars['g5_sigma'].set(sig5, vary=True)
    pars['g5_amplitude'].set(amp5, min=0, vary=True)

    '==========================================='
    'Define the sixth Gaussian'
    gauss6 = GaussianModel(prefix='g6_')
    pars.update(
        gauss6.make_params())  #update the parameter list with another gaussian

    pars['g6_center'].set(mu6, vary=True)
    pars['g6_sigma'].set(sig6, vary=True)
    pars['g6_amplitude'].set(amp6, min=0, vary=True)

    '==========================================='
    'Define the seventh Gaussian'
    gauss7 = GaussianModel(prefix='g7_')
    pars.update(
        gauss7.make_params())  #update the parameter list with another gaussian

    pars['g7_center'].set(mu7, vary=True)
    pars['g7_sigma'].set(sig7, vary=True)
    pars['g7_amplitude'].set(amp7, min=0, vary=True)

    '==========================================='

    'Define the eigth Gaussian'
    gauss8 = GaussianModel(prefix='g8_')
    pars.update(
        gauss8.make_params())  #update the parameter list with another gaussian

    pars['g8_center'].set(mu8, vary=True)
    pars['g8_sigma'].set(sig8, vary=True)
    pars['g8_amplitude'].set(amp8, min=0, vary=True)

    '==========================================='
    'Make the model as the sum of gaussians'
    mod = gauss1 + gauss2 + gauss3 + gauss4 + gauss5 + gauss6 + gauss7 + gauss8

    'Fit and print the data'
    out = mod.fit(y, pars, x=x)
    print(out.fit_report(min_correl=0.5))
    plt.plot(x, out.best_fit, 'r-', linewidth=1.50)
    plt.show()
    return pars
Exemple #29
0
        # run fit
        self.minimize(method=self.method)
        self.best_fit = self.eval(params=self.params, **self.userkws)


test = True
if test:
    import numpy as np
    from lmfit.models import GaussianModel
    xs  = np.arange(1000)
    ys  =  np.exp(-(xs - 200)**2 / 100)
    ys1 = 3*np.exp(-(xs - 800)**2 / 200) + ys
    ys2 = 10*np.exp(-(xs - 500)**2 / 50) + ys
    
    gf = GlobalFit()
    m = GaussianModel(prefix='m_') # common model
    m1 = GaussianModel(prefix='m1_') + m
    m2 = GaussianModel(prefix='m2_') + m
    gf.add_curve('c1', m1, ys1)
    gf.add_curve('c2', m2, ys2)
    gf.tie_all('m_center')
    params = gf.make_params()
    params['m_center'].value = 200
    params['c1_m1_center'].value = 700
    params['c2_m2_center'].value = 600
    print params
    gf.fit(params, x=xs)
    print gf.params
    
    from matplotlib import pyplot as pl
Exemple #30
0
def pre_edge_baseline(energy,
                      norm=None,
                      group=None,
                      form='lorentzian',
                      emin=None,
                      emax=None,
                      elo=None,
                      ehi=None,
                      with_line=True,
                      _larch=None):
    """remove baseline from main edge over pre edge peak region

    This assumes that pre_edge() has been run successfully on the spectra
    and that the spectra has decent pre-edge subtraction and normalization.

    Arguments
    ----------
    energy:    array of x-ray energies, in eV, or group (see note 1)
    norm:      array of normalized mu(E)
    group:     output group
    elo:       low energy of pre-edge peak region to not fit baseline [e0-20]
    ehi:       high energy of pre-edge peak region ot not fit baseline [e0-10]
    emax       max energy (eV) to use for baesline fit [e0-5]
    emin:      min energy (eV) to use for baesline fit [e0-40]
    form:      form used for baseline (see note 2)  ['lorentzian']
    with_line: whether to include linear component in baseline ['True']


    Returns
    -------
      None

    A group named 'prepeaks' will be created in the output group, with the following
    attributes:
        energy        energy array for pre-edge peaks = energy[emin-eneg:emax+epos]
        baseline      fitted baseline array over pre-edge peak energies
        mu            baseline-subtraced spectrum over pre-edge peak energies
        dmu           estimated uncertainty in mu from fit
        centroid      estimated centroid of pre-edge peaks (see note 3)
        peak_energies list of predicted peak energies (see note 4)
        fit_details   details of fit to extract pre-edge peaks.

    (if the output group is None, _sys.xafsGroup will be written to)

    Notes
    -----
     1 If the first argument is a Group, it must contain 'energy' and 'norm'.
       See First Argrument Group in Documentation

     2 A function will be fit to the input mu(E) data over the range between
       [emin:elo] and [ehi:emax], ignorng the pre-edge peaks in the
       region [elo:ehi].  The baseline function is specified with the `form`
       keyword argument, which can be one of
           'lorentzian', 'gaussian', or 'voigt',
       with 'lorentzian' the default.  In addition, the `with_line` keyword
       argument can be used to add a line to this baseline function.

     3 The value calculated for `prepeaks.centroid`  will be found as
         (prepeaks.energy*prepeaks.mu).sum() / prepeaks.mu.sum()
     4 The values in the `peak_energies` list will be predicted energies
       of the peaks in `prepeaks.mu` as found by peakutils.

    """
    energy, norm, group = parse_group_args(energy,
                                           members=('energy', 'norm'),
                                           defaults=(norm, ),
                                           group=group,
                                           fcn_name='pre_edge_baseline')
    if len(energy.shape) > 1:
        energy = energy.squeeze()
    if len(norm.shape) > 1:
        norm = norm.squeeze()

    dat_emin, dat_emax = min(energy), max(energy)

    dat_e0 = getattr(group, 'e0', -1)

    if dat_e0 > 0:
        if emin is None:
            emin = dat_e0 - 30.0
        if emax is None:
            emax = dat_e0 - 1.0
        if elo is None:
            elo = dat_e0 - 15.0
        if ehi is None:
            ehi = dat_e0 - 5.0
        if emin < 0:
            emin += dat_e0
        if elo < 0:
            elo += dat_e0
        if emax < dat_emin:
            emax += dat_e0
        if ehi < dat_emin:
            ehi += dat_e0

    if emax is None or emin is None or elo is None or ehi is None:
        raise ValueError("must provide emin and emax to pre_edge_baseline")

    # get indices for input energies
    if emin > emax:
        emin, emax = emax, emin
    if emin > elo:
        elo, emin = emin, elo
    if ehi > emax:
        ehi, emax = emax, ehi

    imin = index_of(energy, emin)
    ilo = index_of(energy, elo)
    ihi = index_of(energy, ehi)
    imax = index_of(energy, emax)

    # build xdat, ydat: dat to fit (skipping pre-edge peaks)
    xdat = np.concatenate((energy[imin:ilo + 1], energy[ihi:imax + 1]))
    ydat = np.concatenate((norm[imin:ilo + 1], norm[ihi:imax + 1]))

    # build fitting model: note that we always include
    # a LinearModel but may fix slope and intercept
    form = form.lower()
    if form.startswith('voig'):
        model = VoigtModel()
    elif form.startswith('gaus'):
        model = GaussianModel()
    else:
        model = LorentzianModel()

    model += LinearModel()
    params = model.make_params(amplitude=1.0,
                               sigma=2.0,
                               center=emax,
                               intercept=0,
                               slope=0)
    params['amplitude'].min = 0.0
    params['sigma'].min = 0.25
    params['sigma'].max = 50.0
    params['center'].max = emax + 25.0
    params['center'].min = emax - 25.0

    if not with_line:
        params['slope'].vary = False
        params['intercept'].vary = False

    # run fit
    result = model.fit(ydat, params, x=xdat)

    # energy including pre-edge peaks, for output
    edat = energy[imin:imax + 1]

    # get baseline and resulting mu over edat range
    bline = result.eval(result.params, x=edat)
    mu = norm[imin:imax + 1] - bline

    # uncertainty in mu includes only uncertainties in baseline fit
    dmu = result.eval_uncertainty(result.params, x=edat)

    # estimate centroid and its uncertainty
    cen = (edat * mu).sum() / mu.sum()
    cen_plus = (edat * (mu + dmu)).sum() / (mu + dmu).sum()
    cen_minus = (edat * (mu - dmu)).sum() / (mu - dmu).sum()
    dcen = abs(cen_minus - cen_plus) / 2.0

    # locate peak positions
    peak_energies = []
    if HAS_PEAKUTILS:
        peak_ids = peakutils.peak.indexes(mu, thres=0.05, min_dist=2)
        peak_energies = [edat[pid] for pid in peak_ids]

    group = set_xafsGroup(group, _larch=_larch)
    group.prepeaks = Group(energy=edat,
                           mu=mu,
                           delta_mu=dmu,
                           baseline=bline,
                           centroid=cen,
                           delta_centroid=dcen,
                           peak_energies=peak_energies,
                           fit_details=result,
                           emin=emin,
                           emax=emax,
                           elo=elo,
                           ehi=ehi,
                           form=form,
                           with_line=with_line)
    return