Exemplo n.º 1
0
    def __LorentzianFit(self):
        """
        Fitting by Lorentzian

        Lambda function will written in __LorentzianFunc

        Lorentzian parameter will be written in __fit_param

        amplitude: 'amplitude', center frequency: 'center', sigma: 'sigma', fwhm: 'fwhm', height: 'height'
        """

        x, y = self.__freq, self.__intensity / self.__reference_intensity

        mod = LorentzianModel()
        pars = mod.guess(1 - y, x=x)
        out = mod.fit(1 - y, pars, x=x)

        def loren(a, x, x0, sigma):            return a/np.pi * sigma / \
((x-x0)**2+sigma**2)  # Lorentzian template function

        self.__LorentzianFunc = lambda x: 1 - \
            loren(out.values['amplitude'], x,
                  out.values['center'], out.values['sigma'])
        self.__fit_param = out.values
        self.__CalcTemp(out.values['center'] * 1000)
Exemplo n.º 2
0
def calculateQ_1peak(filename, time, taper, lambda0, slope,
                     modulation_coefficient, rg):
    q = readlvm(filename)
    q = q.ravel()
    q = q / taper - 1
    valley = q.min()
    valley_index = np.argmin(q)
    q_valley = q[valley_index - rg:valley_index + rg]
    l_valley = time[valley_index - rg:valley_index + rg]

    q_valley = q_valley * -1
    peaks, peak_info = find_peaks(q_valley, height=-valley)
    #results_half = peak_widths(q_valley, peaks, rel_height=0.5)

    mod = LorentzianModel()
    x = np.asarray(list(range(0, 2 * rg)))
    pars = mod.guess(q_valley, x=x)
    out = mod.fit(q_valley, pars, x=x)

    res = out.fit_report()
    info = res.split("\n")
    variables = parse_info(info, 1)

    h_res, w_res, c_res = variables['height'], variables['fwhm'], variables[
        'center']
    print(h_res, w_res, c_res)
    l = lambda0 + l_valley[int(float(c_res))] * slope * modulation_coefficient
    d_lambda = (l_valley[1] -
                l_valley[0]) * float(w_res) * slope * modulation_coefficient
    Q = l / d_lambda

    return Q, float(h_res) * 100, l
Exemplo n.º 3
0
    def check_energy(self, f, s, deg):
        p = int(np.argwhere(s == np.max(s)))
        freq = f[p]
        f_mask = (freq - 5e2 < f) & (f < freq + 5e2)
        x = f[f_mask]
        y = s[f_mask]
        mod = LorentzianModel()
        pars = mod.guess(y, x=x)
        out = mod.fit(y, pars, x=x)
        power = si.simps(out.best_fit, x)

        l = const.c / self.F0
        # Calculate corresponding energy with formula: $ E = 0.5 m_{\mathrm{e}} [f_{\mathrm{r}} \lambda_\Re / (2 \cos\theta)]^2 $
        E_plasma = (0.5 * const.m_e *
                    (freq * l / (2 * np.cos(deg * np.pi / 180)))**2 / const.eV)
        res = 0
        if self.vol == 1:
            if bool(15.58 < E_plasma < 18.42):
                res = 1
            elif bool(22.47 < E_plasma < 23.75):
                res = 2
        else:
            if bool(20.29 < E_plasma < 22.05):
                res = 1
            elif bool(22.45 < E_plasma < 23.87):
                res = 2
            elif bool(25.38 < E_plasma < 27.14):
                res = 3
        return power, res, freq
Exemplo n.º 4
0
 def lorentzian_model_w_lims(self, peak_pos, sigma, min_max_range):
     x, y = self.background_correction()
     lmodel = LorentzianModel(prefix='l1_')  # calling lorentzian model
     pars = lmodel.guess(y, x=x)  # parameters - center, width, height
     pars['l1_center'].set(peak_pos,
                           min=min_max_range[0],
                           max=min_max_range[1])
     pars['l1_sigma'].set(sigma)
     result = lmodel.fit(y, pars, x=x, nan_policy='propagate')
     return result
Exemplo n.º 5
0
def fit():
	global fitx
	global fity
	fitx =fitx
	fity =fity
	a.clear
	mod = LorentzianModel()
	pars = mod.guess(fity,x=fitx)
	out = mod.fit(fity,pars, x=fitx)
	a.plot(fitx, fity)
	dataPlot.draw()
Exemplo n.º 6
0
def params_Lorentzian(x, y):
    mod = LorentzianModel()
    params = mod.guess(y, x)
    print(params)
    out = mod.fit(y, params, x=x)
    print(out.fit_report(min_correl=0.3))
    init = mod.eval(params, x=x)
    plt.figure(2)
    plt.plot(x, y, 'b')
    plt.plot(x, init, 'k--')
    plt.plot(x, out.best_fit, 'r-')
Exemplo n.º 7
0
def lorentzian(x, y):

	# Lorentzian fit to a curve

    x_shifted = x - x.min() # Shifting to 0  
    y_shifted = y - y.min() # Shifting to 0    
    mod = LorentzianModel() # Setting model type
    pars = mod.guess(y_shifted, x=x_shifted) # Estimating fit
    out = mod.fit(y_shifted, pars, x=x_shifted) # Fitting fit
    # print(out.fit_report(min_correl=0.25)) # Outputting best fit results
    print("Lorentzian FWHM = ", out.params['fwhm'].value) # Outputting only FWHM
    out.plot() # Plotting fit
Exemplo n.º 8
0
    def fitcurve(self, w, f, xrange, nistlines, **kwargs):
        w = np.array(w)[xrange]
        f = np.array(f)[xrange]
        x = np.array(w)
        y = np.array(-f) + np.max(
            np.array(f))  #invert the spectrum upside down

        mod = LorentzianModel()
        pars = mod.guess(y, x=x)
        out = mod.fit(y, pars, x=x)
        report = out.fit_report(min_correl=0.25)
        """extract lorentzian parameters from the report output"""
        center = float(report.split('center:')[1].split(
            '+/-', 1)[0])  #x (wavelenth) value of the maximum
        amp = float(report.split('amplitude:')[1].split(
            '+/-', 1)[0])  #y (flux) value of the maximum
        fwhm = float(report.split('fwhm:')[1].split(
            '+/-', 1)[0])  #full width at half maximum
        #get chi-squared value of the fit
        chi_sq = float(
            report.split('reduced chi-square')[0].split(
                'chi-square         = ')[1])
        iterations = int(
            report.split('# data points')[0].split('# function evals   = ')[1])

        #Plot inverted data points, Lorentzian curve, and absorption lines from NIST
        fig = plt.figure(figsize=(6, 4))
        plt.plot(x, y, 'bo', label='Inverted')
        plt.plot(x, out.best_fit, 'r-', color='g', label='Best Lorentz fit')
        line_error = []
        for i in range(len(nistlines)):
            line_error.append(center - nistlines[i])
            plt.axvline(x=nistlines[i],
                        ymin=0,
                        ymax=1,
                        linewidth=.5,
                        color='r')
        plt.axvline(x=center - fwhm, ymin=0, ymax=1, linewidth=.5, color='k')
        plt.axvline(x=center + fwhm, ymin=0, ymax=1, linewidth=.5, color='k')

        #Print summary for each fitted curve
        if kwargs.get('showCurv', True) == True:
            plt.show()
            print 'Center of function: ', center
            print 'Fit iterations: ', iterations
            print 'Chi-squared: ', chi_sq
            print 'Wavelength range: ', w[0], '-', w[-1]
        else:
            plt.close()  #don't show plot

        return center, amp, fwhm, chi_sq, line_error
Exemplo n.º 9
0
    def onePeakLorentzianFit(self):
        try:
            nRow, nCol = self.dockedOpt.fileInfo()

            self.gausFit.binFitData = plab.zeros((nRow, 0))
            self.gausFit.OnePkFitData = plab.zeros(
                (nCol, 6))  # Creates the empty 2D List
            for j in range(nCol):
                yy = self.dockedOpt.TT[:, j]
                xx = plab.arange(0, len(yy))

                x1 = xx[0]
                x2 = xx[-1]
                y1 = yy[0]
                y2 = yy[-1]
                m = (y2 - y1) / (x2 - x1)
                b = y2 - m * x2

                mod = LorentzianModel()
                pars = mod.guess(yy, x=xx, slope=m)
                mod = mod + LinearModel()
                pars.add('intercept', value=b, vary=True)
                pars.add('slope', value=m, vary=True)
                out = mod.fit(yy, pars, x=xx, slope=m)

                self.gausFit.OnePkFitData[j, :] = (
                    out.best_values['amplitude'], 0, out.best_values['center'],
                    0, out.best_values['sigma'], 0)

                # Saves fitted data of each fit
                fitData = out.best_fit
                binFit = np.reshape(fitData, (len(fitData), 1))
                self.gausFit.binFitData = np.concatenate(
                    (self.gausFit.binFitData, binFit), axis=1)

                if self.gausFit.continueGraphingEachFit == True:
                    self.gausFit.graphEachFitRawData(xx, yy, out.best_fit, 'L')

            return False
        except:
            return True
Exemplo n.º 10
0
        def onclick(event):
            global X

            plt.clf()
            x = event.xdata
            y = event.ydata
            print('waint...')
            L1 = LorentzianModel(prefix='L1_')
            pars = L1.guess(psd, x=freq)
            pars.update(L1.make_params())
            pars['L1_center'].set(x, min=x - 10, max=x + 10)
            #pars['L1_sigma'].set(y, min=0)
            pars['L1_amplitude'].set(y, min=0)
            out = L1.fit(psd, pars, x=freq)
            X = out.best_fit
            plt.title(str(ordem) + " Lorenzian fit")
            plt.ylabel('PSD[ppm$^2$/$\mu$Hz]')
            plt.xlabel('Frequency [$\mu$Hz]')
            plt.minorticks_on()
            plt.tick_params(direction='in',
                            which='major',
                            top=True,
                            right=True,
                            left=True,
                            length=8,
                            width=1,
                            labelsize=15)
            plt.tick_params(direction='in',
                            which='minor',
                            top=True,
                            right=True,
                            length=5,
                            width=1,
                            labelsize=15)
            plt.tight_layout()
            plt.loglog(freq, psd, 'k', lw=0.5, alpha=0.5)
            plt.plot(freq, out.best_fit, 'k-', lw=0.5, alpha=0.5)
            plt.xlim(min(freq), max(freq))
            plt.ylim(min(psd), max(psd))
            plt.draw()
            print('Done')
Exemplo n.º 11
0
def test_convolution():
    r"""Convolution of function with delta dirac should return the function"""

    # Reference Lorentzian parameter values
    amplitude = 42.0
    sigma = 0.042
    center = 0.0003

    c1 = LorentzianModel(prefix='c1_')
    p = c1.make_params(amplitude=amplitude, center=center, sigma=sigma)
    c2 = DeltaDiracModel(prefix='c2_')
    p.update(c2.make_params(amplitude=1.0, center=0.0))

    e = 0.0004 * np.arange(-250, 1500)  # energies in meV
    # convolve Lorentzian with delta Dirac
    y1 = Convolve(c1, c2).eval(params=p, x=e)  # should be the lorentzian
    # reverse order, convolve delta Dirac with Lorentzian
    y2 = Convolve(c2, c1).eval(params=p, x=e)  # should be the lorentzian

    # We will fit a Lorentzian model against datasets y1 and y2
    m = LorentzianModel()
    all_params = 'amplitude sigma center'.split()
    for y in (y1, y2):
        params = m.guess(y, x=e)
        # Set initial model Lorentzian parameters far from optimal solution
        params['amplitude'].set(value=amplitude * 10)
        params['sigma'].set(value=sigma * 4)
        params['center'].set(value=center * 7)

        # fit Lorentzian model against dataset y
        r = m.fit(y, params, x=e)

        # Compare the reference Lorentzian parameters against
        # parameters of the fitted model
        assert_allclose([amplitude, sigma, center],
                        [r.params[p].value for p in all_params],
                        rtol=0.01,
                        atol=0.00001)
Exemplo n.º 12
0
def fit_lorentzian(y: np.ndarray, x: np.ndarray) -> np.ndarray:
    """Fits profile to lorentzian. Used with np.apply_along_axis.

    Parameters
    ----------
    y
        y values of profile to be fitted.
        Units: none.
    x
        x values of profiles to be fitted
        Units: Hz.

    Returns
    -------
    result_params
       A numpy array of the returning parameters
       ['Center', 'HWHM', 'max height', 'chi sq'].
       Units: [same as x, same as x, None, None].
    """
    model = LorentzianModel()

    center_guess = x[y.argmax()]
    HWHM_guess = x[y.argmax()] / 1000

    params = model.make_params(amplitude=y.max() * np.pi * HWHM_guess,
                               center=center_guess,
                               sigma=HWHM_guess)

    result = model.fit(y, params, x=x)
    result_params = np.array([
        result.params["center"].value,
        result.params["sigma"].value,
        result.params["height"].value,
        result.chisqr,
    ])
    return result_params
            if len(x) == len(y) == 3:
                continue
            else:

                g_model = GaussianModel()

                g_pars = g_model.guess(y, x=x)
                g_out = g_model.fit(y, g_pars, x=x)
                g_qc = g_out.redchi

                # print(g_out.fit_report(min_correl=0.25))

                l_model = LorentzianModel()

                l_pars = l_model.guess(y, x=x)
                l_out = l_model.fit(y, l_pars, x=x)
                l_qc = l_out.redchi

                # print(l_out.fit_report(min_correl=0.25))

                v_model = VoigtModel()

                v_pars = v_model.guess(y, x=x)
                v_out = v_model.fit(y, v_pars, x=x)
                v_qc = v_out.redchi

                # print(v_out.fit_report(min_correl=0.25))

                # other models to try

                do_model = DampedOscillatorModel()
Exemplo n.º 14
0
def residuals(force_pickrange=False):
    datafolder = filedialog.askopenfilenames(
        initialdir="C:\\Users\Josh\IdeaProjects\OpticalPumping",
        title="Select data for bulk plotting")
    for filename in datafolder:
        if 'data' in filename:
            global dat3
            name_ext = filename.split('/')[-1]
            name_no_ending = name_ext.split('.csv')[0]
            parts = name_no_ending.split('_')
            print(parts)

            dat1 = read_csv(
                "C:\\Users\\Josh\\IdeaProjects\\OpticalPumping\\Sweep_dat\\{}".
                format(name_ext),
                names=['xdat', 'ydat'])
            dat1 = dat1[np.abs(dat1['xdat'] - dat1['xdat'].mean()) <= (
                3 * dat1['xdat'].std())]
            xdat = np.array(dat1['xdat'])
            ydat = np.array(dat1['ydat'])
            if force_pickrange:
                fig1, ax1 = plt.subplots()
                plt.title('Pick Ranges for Exponential decay fit')
                Figure1, = ax1.plot(xdat, ydat, '.')
                print(xdat[5], xdat[-5])
                plt.xlim([xdat[5], xdat[-5]])
                Sel3 = RangeTool(xdat, ydat, Figure1, ax1, name_no_ending)
                fullscreen()
                plt.show()
                dat3 = Sel3.return_range()
            if not force_pickrange:
                try:
                    dat3 = read_csv(
                        "C:\\Users\\Josh\\IdeaProjects\\OpticalPumping\\Sweep_ranges\\{}"
                        .format(name_ext),
                        names=[
                            'Lower Bound', 'LowerIndex', 'Upper Bound',
                            'UpperIndex'
                        ])
                except FileNotFoundError or force_pickrange:
                    fig1, ax1 = plt.subplots()
                    plt.title('Pick Ranges for Exponential decay fit')
                    Figure1, = ax1.plot(xdat, ydat, '.')
                    print(xdat[5], xdat[-5])
                    plt.xlim([xdat[5], xdat[-5]])
                    Sel3 = RangeTool(xdat, ydat, Figure1, ax1, name_no_ending)
                    fullscreen()
                    plt.show()
                    dat3 = Sel3.return_range()

            mdl = LorentzianModel()
            try:
                lowerindex = int(dat3.at[0, 'LowerIndex'])
                upperindex = int(dat3.at[0, 'UpperIndex'])
            except ValueError:
                pass
            try:
                params = mdl.guess(data=ydat[lowerindex:upperindex],
                                   x=xdat[lowerindex:upperindex])
                result = mdl.fit(ydat[lowerindex:upperindex],
                                 params,
                                 x=xdat[lowerindex:upperindex])
                resultdata = mdl.eval(x=xdat[lowerindex:upperindex],
                                      params=result.params)
                # print(result.fit_report())
                MultiPlot(xdat[lowerindex:upperindex],
                          ydat[lowerindex:upperindex], resultdata, xdat, ydat,
                          name_no_ending, parts[1])
            except UnboundLocalError:
                pass
Exemplo n.º 15
0
def calculateQ_2peaks(filename, time, taper, lambda0, slope,
                      modulation_coefficient, rg):
    q = readlvm(filename)
    q = q.ravel()
    q = q / taper - 1
    valley = q.min()
    valley_index = np.argmin(q)
    max_height = -1 * q[valley_index]
    q_valley = q[valley_index - rg:valley_index + rg]
    l_valley = time[valley_index - rg:valley_index + rg]

    q_valley = q_valley * -1
    peaks, peak_info = find_peaks(q_valley,
                                  height=max_height * 0.6,
                                  prominence=0.05,
                                  distance=50)
    #results_half = peak_widths(q_valley, peaks, rel_height=0.5)

    if len(peaks) != 2:
        print("Wrong peaks with num:", len(peaks))
        return None, None, None, None, None, None

    x = np.asarray(list(range(0, 2 * rg)))
    y = q_valley

    # One peak guess to get width
    g_mod = LorentzianModel()
    g_pars = g_mod.guess(y, x=x)
    g_out = g_mod.fit(y, g_pars, x=x)
    g_res = g_out.fit_report()
    g_info = g_res.split("\n")
    g_variables = parse_info(g_info, 1)
    guessedWidth = float(g_variables['fwhm']) / 2

    exp_mod = ExponentialModel(prefix='exp_')
    pars = exp_mod.guess(y, x=x)

    lorenz1 = LorentzianModel(prefix='l1_')
    pars.update(lorenz1.make_params())
    pars['l1_center'].set(peaks[0])
    pars['l1_sigma'].set(guessedWidth)
    pars['l1_amplitude'].set(np.pi * guessedWidth * q_valley[peaks[0]])

    lorenz2 = LorentzianModel(prefix='l2_')
    pars.update(lorenz2.make_params())
    pars['l2_center'].set(peaks[1])
    pars['l2_sigma'].set(guessedWidth)
    pars['l2_amplitude'].set(np.pi * guessedWidth * q_valley[peaks[1]])

    mod = lorenz1 + lorenz2 + exp_mod
    init = mod.eval(pars, x=x)
    out = mod.fit(y, pars, x=x)
    res = out.fit_report()
    info = res.split("\n")
    variables = parse_info(info, 2)

    l1_h_res, l1_w_res, l1_c_res = variables['l1_height'], variables[
        'l1_fwhm'], variables['l1_center']
    print(l1_h_res, l1_w_res, l1_c_res)
    l1 = lambda0 + l_valley[int(
        float(l1_c_res))] * slope * modulation_coefficient
    d_lambda1 = (l_valley[1] - l_valley[0]
                 ) * float(l1_w_res) * slope * modulation_coefficient
    Q1 = l1 / d_lambda1

    l2_h_res, l2_w_res, l2_c_res = variables['l2_height'], variables[
        'l2_fwhm'], variables['l2_center']
    print(l2_h_res, l2_w_res, l2_c_res)
    l2 = lambda0 + l_valley[int(
        float(l2_c_res))] * slope * modulation_coefficient
    d_lambda2 = (l_valley[1] - l_valley[0]
                 ) * float(l2_w_res) * slope * modulation_coefficient
    Q2 = l2 / d_lambda2

    return Q1, float(l1_h_res) * 100, l1, Q2, float(l2_h_res) * 100, l2
Exemplo n.º 16
0
def FIT_PS(kic):
    """Essa rotina fita curvas Lorenzianas e Gaussianas no Power Spectrum somente com o kic da estrela"""
    path = 'TEMP/' + str(kic) + '/'
    file = np.loadtxt(str(path) + 'PS_' + str(kic) + '.txt')
    f = file[:, 0]
    p = file[:, 1]

    def fit_gauss(freq, psd):
        global X
        X = [], []
        fig = plt.figure(figsize=(8, 5), dpi=130)
        plt.loglog(freq, psd, 'k', lw=0.5, alpha=0.5)
        plt.title("Select the gaussian fit region")
        plt.ylabel('PSD[ppm$^2$/$\mu$Hz]')
        plt.xlabel('Frequency [$\mu$Hz]')
        plt.xlim(min(freq), max(freq))
        plt.ylim(min(psd), max(psd))

        def onclick(event):
            global X
            x = event.xdata
            X = np.append(X, x)
            if len(X) == 1:
                plt.plot((x, x), (min(psd), max(psd)),
                         'r--',
                         alpha=0.5,
                         lw=0.8)
                plt.xlim(min(freq), max(freq))
                plt.ylim(min(psd), max(psd))
                plt.draw()
            if len(X) == 2:
                plt.fill_betweenx((min(psd), max(psd)), (X[-2], X[-2]),
                                  (X[-1], X[-1]),
                                  color='red',
                                  alpha=0.2)
                plt.plot((X[-1], X[-1]), (min(psd), max(psd)),
                         'r--',
                         alpha=0.5,
                         lw=0.8)
                plt.xlim(min(freq), max(freq))
                plt.ylim(min(psd), max(psd))
                plt.draw()
            if len(X) > 2:
                plt.clf()
                plt.title("Selecione a regiao do fit gaussiano")
                plt.ylabel('PSD[ppm$^2$/$\mu$Hz]')
                plt.xlabel('Frequency [$\mu$Hz]')
                plt.xlim(min(freq), max(freq))
                plt.ylim(min(psd), max(psd))

                plt.tight_layout()
                plt.loglog(freq, psd, 'k', lw=0.5, alpha=0.5)
                plt.plot((X[-2], X[-2]), (min(psd), max(psd)),
                         'r--',
                         alpha=0.5,
                         lw=0.8)
                plt.plot((X[-1], X[-1]), (min(psd), max(psd)),
                         'r--',
                         alpha=0.5,
                         lw=0.8)
                plt.fill_betweenx((min(psd), max(psd)), (X[-2], X[-2]),
                                  (X[-1], X[-1]),
                                  color='red',
                                  alpha=0.2)
                plt.draw()
            print('Ultimo click: x = ', x)

        fig.canvas.mpl_connect('button_press_event', onclick)

        plt.tight_layout()
        plt.show()
        plt.clf()

        dados = (X[-2], X[-1])
        return min(dados), max(dados)

    def gauss(x, y):
        print(
            'Selecione a regiao do fit gaussiano quantas vezes for necessario')
        l1, l2 = fit_gauss(x, y)
        index1 = np.where(abs(x - l1) == min(abs(x - l1)))[0]
        index1 = np.int(index1)
        index2 = np.where(abs(x - l2) == min(abs(x - l2)))[0]
        index2 = np.int(index2)
        print(index1, index2)
        x1 = x[index1:index2]
        y1 = y[index1:index2]
        mod = GaussianModel()
        pars = mod.guess(y1, x=x1)
        out = mod.fit(y1, pars, x=x1)
        xgauss = copy.copy(x1)
        ygauss = copy.copy(out.best_fit)
        #-#######
        x0 = x[0:index1]
        y0 = np.zeros(len(y[0:index1]))
        x2 = x[index2:]
        y2 = np.zeros(len(y[index2:]))
        x1 = np.append(x0, x1)
        x1 = np.append(x1, x2)
        y1 = np.append(y0, out.best_fit)
        y1 = np.append(y1, y2)
        print(out.fit_report(min_correl=0.25))

        mod = GaussianModel()
        pars = mod.guess(y1, x=x1)
        out = mod.fit(y1, pars, x=x1)

        #-#######

        return x1, out.best_fit, xgauss, ygauss

    y = p * 1.0e12
    x = f
    xx, yy, xgauss, ygauss = gauss(x, y)

    fiti = get_fit(x, y, 'Select the first')
    fiti2 = get_fit(x, y, 'Select the second')

    L1 = LorentzianModel(prefix='L1_')
    pars = L1.guess(y, x=x)
    out = L1.fit(y, pars, x=x)
    result = out.minimize('least_squares')
    plt.clf()
    plt.close('all')
    fig = plt.figure(figsize=(8, 5), dpi=130)
    plt.style.use('classic')
    plt.loglog(f, y, "#ff7f0e", lw=0.5)
    plt.plot(x, fiti, 'k--', lw=0.5)
    plt.plot(x, fiti2, 'k--', lw=0.5)
    plt.plot((x[0], x[-1]), (y[-1] / 2, y[-1] / 2), 'b--', lw=0.5)
    plt.plot(xgauss, ygauss, 'k--', lw=0.5)
    plt.plot(x, fiti + fiti2 - ((fiti + fiti2) / 2) + yy, 'r-', lw=0.8)
    plt.title('KIC ' + np.str(kic))
    plt.ylabel('PSD[ppm$^2$/$\mu$Hz]')
    plt.xlabel('Frequency [$\mu$Hz]')
    plt.ylim(1.0e-4, max(y))
    plt.xlim(min(f), max(f))
    plt.tight_layout()
    plt.savefig(str(path) + 'PS_KIC' + str(kic) + '_fit.png', dpi=270)
    plt.show()
def main():
    regionname = sys.argv[1]  ## parameter passed
    short = regionname.replace(" ", "").lower()
    appName = config['common']['appName']
    spark = s.spark_session(appName)
    spark = s.setSparkConfBQ(spark)
    # Get data from BigQuery table
    start_date = "201001"
    end_date = "202001"
    lst = (spark.sql(
        "SELECT FROM_unixtime(unix_timestamp(), 'dd/MM/yyyy HH:mm:ss.ss') ")
           ).collect()
    print("\nStarted at")
    uf.println(lst)
    # Model predictions
    read_df = s.loadTableFromBQ(spark, config['GCPVariables']['sourceDataset'],
                                config['GCPVariables']['sourceTable'])
    df_10 = read_df.filter(F.date_format('Date',"yyyyMM").cast("Integer").between(f'{start_date}', f'{end_date}') & (lower(col("regionname"))== f'{regionname}'.lower())). \
            select(F.date_format('Date',"yyyyMM").cast("Integer").alias("Date") \
                 , round(col("flatprice")).alias("flatprice") \
                 , round(col("terracedprice")).alias("terracedprice")
                 , round(col("semidetachedprice")).alias("semidetachedprice")
                 , round(col("detachedprice").alias("detachedprice")))
    print(df_10.toPandas().columns.tolist())
    p_dfm = df_10.toPandas()  # converting spark DF to Pandas DF

    # Non-Linear Least-Squares Minimization and Curve Fitting
    # Define model to be Lorentzian and depoly it
    model = LorentzianModel()
    n = len(p_dfm.columns)
    for i in range(n):
        if (p_dfm.columns[i] != 'Date'):  # yyyyMM is x axis in integer
            # it goes through the loop and plots individual average curves one by one and then prints a report for each y value
            vcolumn = p_dfm.columns[i]
            print(vcolumn)
            params = model.guess(p_dfm[vcolumn], x=p_dfm['Date'])
            result = model.fit(p_dfm[vcolumn], params, x=p_dfm['Date'])
            # plot the data points, initial fit and the best fit
            plt.plot(p_dfm['Date'], p_dfm[vcolumn], 'bo', label='data')
            plt.plot(p_dfm['Date'],
                     result.init_fit,
                     'k--',
                     label='initial fit')
            plt.plot(p_dfm['Date'], result.best_fit, 'r-', label='best fit')
            plt.legend(loc='upper left')
            plt.xlabel("Year/Month", fontdict=config['plot_fonts']['font'])
            plt.text(0.35,
                     0.55,
                     "Fit Based on Non-Linear Lorentzian Model",
                     transform=plt.gca().transAxes,
                     color="grey",
                     fontsize=9)
            if vcolumn == "flatprice": property = "Flat"
            if vcolumn == "terracedprice": property = "Terraced"
            if vcolumn == "semidetachedprice": property = "semi-detached"
            if vcolumn == "detachedprice": property = "detached"
            plt.ylabel(f"""{property} house prices in millions/GBP""",
                       fontdict=config['plot_fonts']['font'])
            plt.title(
                f"""Monthly {property} price fluctuations in {regionname}""",
                fontdict=config['plot_fonts']['font'])
            plt.xlim(200901, 202101)
            print(result.fit_report())
            plt.show()
            plt.close()
    lst = (spark.sql(
        "SELECT FROM_unixtime(unix_timestamp(), 'dd/MM/yyyy HH:mm:ss.ss') ")
           ).collect()
    print("\nFinished at")
    uf.println(lst)
Exemplo n.º 18
0
 def lorentzian_model(self):
     x, y = self.background_correction()
     lmodel = LorentzianModel(prefix='l1_')  # calling lorentzian model
     pars = lmodel.guess(y, x=x)  # parameters - center, width, height
     result = lmodel.fit(y, pars, x=x, nan_policy='propagate')
     return result
Exemplo n.º 19
0
    # Fit Gaussian
    #n = len(x)                          #the number of data
    #mean = sum(x*y)/n                   #note this correction
    #sigma = sum(y*(x-mean)**2)/n        #note this correction
    mean = 200
    sigma = 50
    popt, pcov = curve_fit(gaus, x, y, p0=[1, mean, sigma, 0.01], maxfev=1000)

    # Spline fitting
    #spline = interp1d(x, y, kind='cubic')

    # Fit Lorentzian
    model = LorentzianModel()
    params = model.guess(y, x=x)
    result = model.fit(y, params, x=x)

    # Logistic function
    x2 = x.copy()
    y2 = y.copy()
    y2[y.argmax():] = y.max()
    popt2, pcov2 = curve_fit(logifunc, x2, y2, p0=[300, 150, 0.1, 0])

    x3 = x.copy()
    y3 = y.copy()
    p = np.polyfit(x3, y3, 3)

    #result.plot_fit()
    plt.close('all')
    fig, ax = plt.subplots(nrows=1, ncols=1)
    plt.plot(x, y, 'b+:', label='data')
Exemplo n.º 20
0
def fit_pin(
    img_pin:    np.ndarray, 
    img_white:  np.ndarray, 
    side_mount: bool=False,
    ) -> float:
    """
    Description
    -----------
    Use Gaussian peak fit to locate the center of the peak.

    Parameters
    ----------
    img_pin: np.ndarray
        Tomo image containing a pin
    img_white: np.ndarray
        White field image, which should have the same FOV of img_pin minus the
        pin
    side_mount: bool
        If the pin is mounted sideway (very rare due to statibility issue),
        toggle this option to True.

    Returns
    -------
    float
        The sub-pixel position of the center of the Gaussian peak that best
        fit the profile of the pin
    """
    _ax = 1 if side_mount else 0
    
    _pin = _safe_read_img(img_pin  ).astype(float)
    _bg  = _safe_read_img(img_white).astype(float)
    
    # detect corners
    cnrs = np.array(detect_slit_corners(_pin))
    l = int(cnrs[:,1].min())+13
    r = int(cnrs[:,1].max())-13
    t = int(cnrs[:,0].min())-13
    b = int(cnrs[:,0].max())+13
    
    _pf_pin = np.average(_pin, axis=_ax)[t:b] if side_mount else np.average(_pin, axis=_ax)[l:r]
    _pf_bg  = np.average(_bg,  axis=_ax)[t:b] if side_mount else np.average(_bg,  axis=_ax)[l:r]
    
    # rescale bg to match pin image to counter
    # 1. beam intensity fluctuation
    # 2. exposure time change
    # 3. other artifacts that leads to sudden change in image intensity
    _pf_bg = (_pf_bg-_pf_bg.min())/(_pf_bg.max()-_pf_bg.min())*(_pf_pin.max()-_pf_pin.min()) + _pf_pin.min()
    
    _pf = (_pf_bg**2 - _pf_pin**2)/_pf_bg**2
    _pf[_pf<0] = 0
    _pf = np.power(_pf/_pf.max(), 5)
    
    _mod = LorentzianModel(prefix='pin_')
    _fit = _mod.fit(_pf, x=np.arange(_pf.shape[0]), pin_center= np.argmax(_pf))
    
    if 0<_fit.best_values['pin_center']<_pf.shape[0]:
        return _fit.best_values['pin_center']+t if side_mount else _fit.best_values['pin_center']+l
    else:
        warnings.warn('Using Edge detection as backup, less accurate, might fail')
        peak = get_pin_tip(_safe_read_img(img_pin).astype(float))
        return peak[0] if side_mount else peak[1]
Exemplo n.º 21
0
    def main():
        appName = "ukhouseprices"
        spark = s.spark_session(appName)
        spark.sparkContext._conf.setAll(v.settings)
        sc = s.sparkcontext()
        #
        # Get data from Hive table
        regionname = "Kensington and Chelsea"
        tableName = "ukhouseprices"
        fullyQualifiedTableName = v.DSDB + "." + tableName
        summaryTableName = v.DSDB + "." + "summary"
        start_date = "2010"
        end_date = "2020"
        lst = (spark.sql(
            "SELECT FROM_unixtime(unix_timestamp(), 'dd/MM/yyyy HH:mm:ss.ss') "
        )).collect()
        print("\nStarted at")
        uf.println(lst)
        # Model predictions
        spark.conf.set("spark.sql.execution.arrow.pyspark.enabled", "true")
        #summary_df = spark.sql(f"""SELECT cast(date_format(datetaken, "yyyyMM") as int) as datetaken, flatprice, terracedprice, semidetachedprice, detachedprice FROM {summaryTableName}""")
        summary_df = spark.sql(
            f"""SELECT cast(Year as int) as year, AVGFlatPricePerYear, AVGTerracedPricePerYear, AVGSemiDetachedPricePerYear, AVGDetachedPricePerYear FROM {v.DSDB}.yearlyhouseprices"""
        )
        df_10 = summary_df.filter(
            col("year").between(f'{start_date}', f'{end_date}'))
        print(df_10.toPandas().columns.tolist())

        # show pandas column list ['Year', 'AVGPricePerYear', 'AVGFlatPricePerYear', 'AVGTerracedPricePerYear', 'AVGSemiDetachedPricePerYear', 'AVGDetachedPricePerYear']
        p_dfm = df_10.toPandas()  # converting spark DF to Pandas DF
        data = p_dfm.values

        # Non-Linear Least-Squares Minimization and Curve Fitting
        model = LorentzianModel()
        n = len(p_dfm.columns)
        for i in range(n):
            if p_dfm.columns[i] != 'year':  # year is x axis in integer
                # it goes through the loop and plots individual average curves one by one and then prints a report for each y value
                vcolumn = p_dfm.columns[i]
                print(vcolumn)
                params = model.guess(p_dfm[vcolumn], x=p_dfm['year'])
                result = model.fit(p_dfm[vcolumn], params, x=p_dfm['year'])
                result.plot_fit()

                # do linear regression here
                # Prepare data for Machine Learning.And we need two columns only — features and label(p_dfm.columns[i]]):
                inputCols = ['year']
                vectorAssembler = VectorAssembler(inputCols=inputCols,
                                                  outputCol='features')
                vhouse_df = vectorAssembler.transform(df_10)
                vhouse_df = vhouse_df.select(
                    ['features', 'AVGFlatPricePerYear'])
                vhouse_df.show(20)
                if vcolumn == "AVGFlatPricePerYear":
                    plt.xlabel("Year", fontdict=v.font)
                    plt.ylabel("Flat house prices in millions/GBP",
                               fontdict=v.font)
                    plt.title(
                        f"""Flat price fluctuations in {regionname} for the past 10 years """,
                        fontdict=v.font)
                    plt.text(0.35,
                             0.45,
                             "Best-fit based on Non-Linear Lorentzian Model",
                             transform=plt.gca().transAxes,
                             color="grey",
                             fontsize=10)
                    print(result.fit_report())
                    plt.xlim(left=2009)
                    plt.xlim(right=2022)
                    plt.show()
                    plt.close()
                elif vcolumn == "AVGTerracedPricePerYear":
                    plt.xlabel("Year", fontdict=v.font)
                    plt.ylabel("Terraced house prices in millions/GBP",
                               fontdict=v.font)
                    plt.title(
                        f"""Terraced house price fluctuations in {regionname} for the past 10 years """,
                        fontdict=v.font)
                    plt.text(0.35,
                             0.45,
                             "Best-fit based on Non-Linear Lorentzian Model",
                             transform=plt.gca().transAxes,
                             color="grey",
                             fontsize=10)
                    print(result.fit_report())
                    plt.show()
                    plt.close()
                elif vcolumn == "AVGSemiDetachedPricePerYear":
                    plt.xlabel("Year", fontdict=v.font)
                    plt.ylabel("semi-detached house prices in millions/GBP",
                               fontdict=v.font)
                    plt.title(
                        f"""semi-detached house price fluctuations in {regionname} for the past 10 years """,
                        fontdict=v.font)
                    plt.text(0.35,
                             0.45,
                             "Best-fit based on Non-Linear Lorentzian Model",
                             transform=plt.gca().transAxes,
                             color="grey",
                             fontsize=10)
                    print(result.fit_report())
                    plt.show()
                    plt.close()
                elif vcolumn == "AVGDetachedPricePerYear":
                    plt.xlabel("Year", fontdict=v.font)
                    plt.ylabel("detached house prices in millions/GBP",
                               fontdict=v.font)
                    plt.title(
                        f"""detached house price fluctuations in {regionname} for the past 10 years """,
                        fontdict=v.font)
                    plt.text(0.35,
                             0.45,
                             "Best-fit based on Non-Linear Lorentzian Model",
                             transform=plt.gca().transAxes,
                             color="grey",
                             fontsize=10)
                    print(result.fit_report())
                    plt.show()
                    plt.close()

        p_df = df_10.select('AVGFlatPricePerYear', 'AVGTerracedPricePerYear',
                            'AVGSemiDetachedPricePerYear',
                            'AVGDetachedPricePerYear').toPandas().describe()
        print(p_df)
        #axs = scatter_matrix(p_df, figsize=(10, 10))
        # Describe returns a DF where count,mean, min, std,max... are values of the index
        y = p_df.loc[['min', 'mean', 'max']]
        #y = p_df.loc[['averageprice', 'flatprice']]
        ax = y.plot(linewidth=2, colormap='jet', marker='.', markersize=20)
        plt.grid(True)
        plt.xlabel("UK House Price Index, January 2020", fontdict=v.font)
        plt.ylabel("Property Prices in millions/GBP", fontdict=v.font)
        plt.title(
            f"""Property price fluctuations in {regionname} for the past 10 years """,
            fontdict=v.font)
        plt.legend(p_df.columns)
        plt.show()
        plt.close()
        lst = (spark.sql(
            "SELECT FROM_unixtime(unix_timestamp(), 'dd/MM/yyyy HH:mm:ss.ss') "
        )).collect()
        print("\nFinished at")
        uf.println(lst)
Exemplo n.º 22
0
    col("datetaken").between(f'{start_date}', f'{end_date}'))
print(df_10.toPandas().columns.tolist())
p_dfm = df_10.toPandas()  # converting spark DF to Pandas DF

# Non-Linear Least-Squares Minimization and Curve Fitting

# Define model to be Lorentzian and deploy it
model = LorentzianModel()
n = len(p_dfm.columns)
for i in range(n):
    if p_dfm.columns[i] != 'datetaken':  # yyyyMM is x axis in integer
        # it goes through the loop and plots individual average curves one by one and then prints a report for each y value
        vcolumn = p_dfm.columns[i]
        print(vcolumn)
        params = model.guess(p_dfm[vcolumn], x=p_dfm['datetaken'])
        result = model.fit(p_dfm[vcolumn], params, x=p_dfm['datetaken'])
        result.plot_fit()
        plt.margins(0.15)
        plt.subplots_adjust(bottom=0.25)
        plt.xticks(rotation=90)
        plt.xlabel("year/month", fontdict=v.font)
        plt.text(0.35,
                 0.45,
                 "Best-fit based on Non-Linear Lorentzian Model",
                 transform=plt.gca().transAxes,
                 color="grey",
                 fontsize=9)
        plt.xlim(left=200900)
        plt.xlim(right=202100)
        if vcolumn == "flatprice": property = "Flat"
        if vcolumn == "terracedprice": property = "Terraced"
Exemplo n.º 23
0
def main():
    regionname = sys.argv[1]  ## parameter passed
    short = regionname.replace(" ", "").lower()
    print(f"""Getting plots for {regionname}""")
    appName = "ukhouseprices"
    spark = s.spark_session(appName)
    sc = s.sparkcontext()
    #
    # Get data from BigQuery table
    summaryTableName = v.fullyQualifiedoutputTableId
    start_date = "201001"
    end_date = "202001"
    lst = (spark.sql(
        "SELECT FROM_unixtime(unix_timestamp(), 'dd/MM/yyyy HH:mm:ss.ss') ")
           ).collect()
    print("\nStarted at")
    uf.println(lst)
    # Model predictions
    spark.conf.set("spark.sql.execution.arrow.pyspark.enabled", "true")
    # read data from the Bigquery table summary
    print("\nreading data from " + v.fullyQualifiedoutputTableId)

    summary_df = spark.read. \
                  format("bigquery"). \
                  option("credentialsFile",v.jsonKeyFile). \
                  option("project", v.projectId). \
                  option("parentProject", v.projectId). \
                  option("dataset", v.targetDataset). \
                  option("table", v.targetTable). \
        load()
    df_10 = summary_df.filter(F.col("Date").between(f'{start_date}', f'{end_date}')). \
        select(F.date_format('Date',"yyyyMM").cast("Integer").alias("date"), 'flatprice', 'terracedprice', 'semidetachedprice', 'detachedprice')
    df_10.printSchema()
    print(df_10.toPandas().columns.tolist())
    p_dfm = df_10.toPandas()  # converting spark DF to Pandas DF
    # Non-Linear Least-Squares Minimization and Curve Fitting

    # Define model to be Lorentzian and deploy it
    model = LorentzianModel()
    n = len(p_dfm.columns)
    for i in range(n):
        if p_dfm.columns[i] != "date":  # yyyyMM is x axis in integer
            # it goes through the loop and plots individual average curves one by one and then prints a report for each y value
            vcolumn = p_dfm.columns[i]
            print(vcolumn)
            params = model.guess(p_dfm[vcolumn], x=p_dfm['date'])
            result = model.fit(p_dfm[vcolumn], params, x=p_dfm['date'])
            result.plot_fit()
            plt.margins(0.15)
            plt.subplots_adjust(bottom=0.25)
            plt.xticks(rotation=90)
            plt.xlabel("year/month", fontdict=v.font)
            plt.text(0.35,
                     0.45,
                     "Best-fit based on Non-Linear Lorentzian Model",
                     transform=plt.gca().transAxes,
                     color="grey",
                     fontsize=9)
            plt.xlim(left=200900)
            plt.xlim(right=202100)
            if vcolumn == "flatprice": property = "Flat"
            if vcolumn == "terracedprice": property = "Terraced"
            if vcolumn == "semidetachedprice": property = "semi-detached"
            if vcolumn == "detachedprice": property = "detached"
            plt.ylabel(f"""{property} house prices in millions/GBP""",
                       fontdict=v.font)
            plt.title(
                f"""Monthly {property} prices fluctuations in {regionname}""",
                fontdict=v.font)
            print(result.fit_report())
            plt.show()
            plt.close()

    lst = (spark.sql(
        "SELECT FROM_unixtime(unix_timestamp(), 'dd/MM/yyyy HH:mm:ss.ss') ")
           ).collect()
    print("\nFinished at")
    uf.println(lst)
Exemplo n.º 24
0
def plot_data(data):
    signal_format = 'hist'  # 'line' or 'hist' or None
    Total_SM_label = False  # for Total SM black line in plot and legend
    plot_label = r'$Z \rightarrow ll$'
    signal_label = plot_label

    signal = None
    for s in ZBosonSamples.samples.keys():
        if s not in stack_order and s != 'data': signal = s

    for x_variable, hist in ZBosonHistograms.hist_dict.items():

        h_bin_width = hist['bin_width']
        h_num_bins = hist['num_bins']
        h_xrange_min = hist['xrange_min']
        h_xlabel = hist['xlabel']
        h_log_y = hist['log_y']
        h_y_label_x_position = hist['y_label_x_position']
        h_legend_loc = hist['legend_loc']
        h_log_top_margin = hist[
            'log_top_margin']  # to decrease the separation between data and the top of the figure, remove a 0
        h_linear_top_margin = hist[
            'linear_top_margin']  # to decrease the separation between data and the top of the figure, pick a number closer to 1

        bins = [h_xrange_min + x * h_bin_width for x in range(h_num_bins + 1)]
        bin_centres = [
            h_xrange_min + h_bin_width / 2 + x * h_bin_width
            for x in range(h_num_bins)
        ]

        if store_histograms:
            stored_histos = {}

        if load_histograms:  # not doing line for now
            npzfile = np.load(f'histograms/{x_variable}_hist_{fraction}.npz')
            # load bins
            loaded_bins = npzfile['bins']
            if not np.array_equal(bins, loaded_bins):
                print('Bins mismatch. That\'s a problem')
                raise Exception

            # load data
            data_x = npzfile['data']
            data_x_errors = np.sqrt(data_x)
            # load weighted signal
            signal_x_reshaped = npzfile[signal]
            signal_color = ZBosonSamples.samples[signal]['color']
            # load backgrounds
            mc_x_heights_list = []
            # mc_weights = []
            mc_colors = []
            mc_labels = []
            mc_x_tot = np.zeros(len(bin_centres))
            for s in stack_order:
                if not s in npzfile: continue
                mc_labels.append(s)
                # mc_x.append(data[s][x_variable].values)
                mc_colors.append(ZBosonSamples.samples[s]['color'])
                # mc_weights.append(data[s].totalWeight.values)
                mc_x_heights = npzfile[s]
                mc_x_heights_list.append(mc_x_heights)
                mc_x_tot = np.add(mc_x_tot, mc_x_heights)
            mc_x_err = np.sqrt(mc_x_tot)

        else:
            # ======== This creates histograms for the raw data events ======== #
            # no weights necessary (it's data)
            data_x, _ = np.histogram(data['data'][x_variable].values,
                                     bins=bins)
            data_x_errors = np.sqrt(data_x)
            if store_histograms:
                stored_histos[
                    'data'] = data_x  # saving histograms for later loading

            # ======== This creates histograms for signal simulation (Z->ll) ======== #
            # need to consider the event weights here
            signal_x = None
            if signal_format == 'line':
                signal_x, _ = np.histogram(
                    data[signal][x_variable].values,
                    bins=bins,
                    weights=data[signal].totalWeight.values)
            elif signal_format == 'hist':
                signal_x = data[signal][x_variable].values
                signal_weights = data[signal].totalWeight.values
                signal_color = ZBosonSamples.samples[signal]['color']
                signal_x_reshaped, _ = np.histogram(
                    data[signal][x_variable].values,
                    bins=bins,
                    weights=data[signal].totalWeight.values)
                if store_histograms:
                    stored_histos[
                        signal] = signal_x_reshaped  # saving histograms for later loading

            # ======== This creates histograms for all of the background simulation ======== #
            # weights are also necessary here, since we produce an arbitrary number of MC events
            mc_x_heights_list = []
            mc_weights = []
            mc_colors = []
            mc_labels = []
            mc_x_tot = np.zeros(len(bin_centres))

            for s in stack_order:
                if not s in data: continue
                if data[s].empty: continue
                mc_labels.append(s)
                # mc_x.append(data[s][x_variable].values)
                mc_colors.append(ZBosonSamples.samples[s]['color'])
                mc_weights.append(data[s].totalWeight.values)
                mc_x_heights, _ = np.histogram(
                    data[s][x_variable].values,
                    bins=bins,
                    weights=data[s].totalWeight.values)  #mc_heights?
                mc_x_heights_list.append(mc_x_heights)
                mc_x_tot = np.add(mc_x_tot, mc_x_heights)
                if store_histograms:
                    stored_histos[
                        s] = mc_x_heights  #saving histograms for later loading

            mc_x_err = np.sqrt(mc_x_tot)

        data_x_without_bkg = data_x - mc_x_tot

        # data fit

        # get rid of zero errors (maybe messy) : TODO a better way to do this?
        for i, e in enumerate(data_x_errors):
            if e == 0: data_x_errors[i] = np.inf
        if 0 in data_x_errors:
            print('please don\'t divide by zero')
            raise Exception

        bin_centres_array = np.asarray(bin_centres)

        # *************
        # Models
        # *************

        doniach_mod = DoniachModel()
        pars_doniach = doniach_mod.guess(data_x_without_bkg,
                                         x=bin_centres_array,
                                         amplitude=2100000 * fraction,
                                         center=90.5,
                                         sigma=2.3,
                                         height=10000 * fraction / 0.01,
                                         gamma=0)
        doniach = doniach_mod.fit(data_x_without_bkg,
                                  pars_doniach,
                                  x=bin_centres_array,
                                  weights=1 / data_x_errors)
        params_dict_doniach = doniach.params.valuesdict()

        gaussian_mod = GaussianModel()
        pars_gaussian = gaussian_mod.guess(data_x_without_bkg,
                                           x=bin_centres_array,
                                           amplitude=6000000 * fraction,
                                           center=90.5,
                                           sigma=3)
        gaussian = gaussian_mod.fit(data_x_without_bkg,
                                    pars_gaussian,
                                    x=bin_centres_array,
                                    weights=1 / data_x_errors)
        params_dict_gaussian = gaussian.params.valuesdict()

        lorentzian_mod = LorentzianModel()
        pars = lorentzian_mod.guess(data_x_without_bkg,
                                    x=bin_centres_array,
                                    amplitude=6000000 * fraction,
                                    center=90.5,
                                    sigma=2.9,
                                    gamma=1)
        lorentzian = lorentzian_mod.fit(data_x_without_bkg,
                                        pars,
                                        x=bin_centres_array,
                                        weights=1 / data_x_errors)
        params_dict_lorentzian = lorentzian.params.valuesdict()

        voigt_mod = VoigtModel()
        pars = voigt_mod.guess(data_x_without_bkg,
                               x=bin_centres_array,
                               amplitude=6800000 * fraction,
                               center=90.5,
                               sigma=1.7)
        voigt = voigt_mod.fit(data_x_without_bkg,
                              pars,
                              x=bin_centres_array,
                              weights=1 / data_x_errors)
        params_dict_voigt = voigt.params.valuesdict()

        voigt_mod_2 = VoigtModel()
        polynomial = PolynomialModel(2)
        pars = voigt_mod_2.guess(data_x_without_bkg,
                                 x=bin_centres_array,
                                 amplitude=6800000 * fraction,
                                 center=90.5,
                                 sigma=1.7)
        pars += polynomial.guess(data_x_without_bkg,
                                 x=bin_centres_array,
                                 c0=data_x_without_bkg.max(),
                                 c1=0,
                                 c2=0)
        voigt_poly_mod = voigt_mod_2 + polynomial
        voigt_poly = voigt_poly_mod.fit(data_x_without_bkg,
                                        pars,
                                        x=bin_centres_array,
                                        weights=1 / data_x_errors)
        params_dict_voigt_poly = voigt_poly.params.valuesdict()

        if store_histograms:
            # save all histograms in npz format. different file for each variable. bins are common
            os.makedirs('histograms', exist_ok=True)
            np.savez(f'histograms/{x_variable}_hist.npz',
                     bins=bins,
                     **stored_histos)
            # ======== Now we start doing the fit ======== #

        # *************
        # Main plot
        # *************
        plt.clf()
        plt.axes([0.1, 0.3, 0.85, 0.65])  # (left, bottom, width, height)
        main_axes = plt.gca()
        main_axes.errorbar(x=bin_centres,
                           y=data_x,
                           yerr=data_x_errors,
                           fmt='ko',
                           label='Data')
        # this effectively makes a stacked histogram
        bottoms = np.zeros_like(bin_centres)
        for mc_x_height, mc_color, mc_label in zip(mc_x_heights_list,
                                                   mc_colors, mc_labels):
            main_axes.bar(bin_centres,
                          mc_x_height,
                          bottom=bottoms,
                          color=mc_color,
                          label=mc_label,
                          width=h_bin_width * 1.01)
            bottoms = np.add(bottoms, mc_x_height)

        main_axes.plot(bin_centres, doniach.best_fit, '-r', label='Doniach')
        main_axes.plot(bin_centres, gaussian.best_fit, '-g', label='Gaussian')
        main_axes.plot(bin_centres,
                       lorentzian.best_fit,
                       '-y',
                       label='Lorentzian')
        main_axes.plot(bin_centres, voigt.best_fit, '--', label='Voigt')
        main_axes.plot(bin_centres,
                       voigt_poly.best_fit,
                       '-v',
                       label='Voigt and Polynomial')

        if Total_SM_label:
            totalSM_handle, = main_axes.step(bins,
                                             np.insert(mc_x_tot, 0,
                                                       mc_x_tot[0]),
                                             color='black')
        if signal_format == 'line':
            main_axes.step(bins,
                           np.insert(signal_x, 0, signal_x[0]),
                           color=ZBosonSamples.samples[signal]['color'],
                           linestyle='--',
                           label=signal)
        elif signal_format == 'hist':
            main_axes.bar(bin_centres,
                          signal_x_reshaped,
                          bottom=bottoms,
                          color=signal_color,
                          label=signal,
                          width=h_bin_width * 1.01)
            bottoms = np.add(bottoms, signal_x_reshaped)
        main_axes.bar(bin_centres,
                      2 * mc_x_err,
                      bottom=bottoms - mc_x_err,
                      alpha=0.5,
                      color='none',
                      hatch="////",
                      width=h_bin_width * 1.01,
                      label='Stat. Unc.')

        mc_x_tot = bottoms

        main_axes.set_xlim(left=h_xrange_min, right=bins[-1])
        main_axes.xaxis.set_minor_locator(
            AutoMinorLocator())  # separation of x axis minor ticks
        main_axes.tick_params(which='both',
                              direction='in',
                              top=True,
                              labeltop=False,
                              labelbottom=False,
                              right=True,
                              labelright=False)

        if h_log_y:
            main_axes.set_yscale('log')
            smallest_contribution = mc_x_heights_list[
                0]  # TODO: mc_heights or mc_x_heights
            smallest_contribution.sort()
            bottom = smallest_contribution[-2]
            if bottom == 0: bottom = 0.001  # log doesn't like zero
            top = np.amax(data_x) * h_log_top_margin
            main_axes.set_ylim(bottom=bottom, top=top)
            main_axes.yaxis.set_major_formatter(CustomTicker())
            locmin = LogLocator(base=10.0,
                                subs=(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8,
                                      0.9),
                                numticks=12)
            main_axes.yaxis.set_minor_locator(locmin)
        else:
            main_axes.set_ylim(
                bottom=0,
                top=(np.amax(data_x) + math.sqrt(np.amax(data_x))) *
                h_linear_top_margin)
            main_axes.yaxis.set_minor_locator(AutoMinorLocator())
            main_axes.yaxis.get_major_ticks()[0].set_visible(False)

        plt.text(0.015,
                 0.97,
                 'ATLAS Open Data',
                 ha="left",
                 va="top",
                 family='sans-serif',
                 transform=main_axes.transAxes,
                 fontsize=13)
        plt.text(0.015,
                 0.9,
                 'for education',
                 ha="left",
                 va="top",
                 family='sans-serif',
                 transform=main_axes.transAxes,
                 style='italic',
                 fontsize=8)
        plt.text(0.015,
                 0.86,
                 r'$\sqrt{s}=13\,\mathrm{TeV},\;\int L\,dt=$' +
                 str(lumi_used) + '$\,\mathrm{fb}^{-1}$',
                 ha="left",
                 va="top",
                 family='sans-serif',
                 transform=main_axes.transAxes)
        plt.text(0.015,
                 0.78,
                 plot_label,
                 ha="left",
                 va="top",
                 family='sans-serif',
                 transform=main_axes.transAxes)
        plt.text(0.015,
                 0.72,
                 r'$m_Z = $' + str(round(params_dict_doniach['center'], 4)) +
                 ' GeV',
                 ha="left",
                 va="top",
                 family='sans-serif',
                 transform=main_axes.transAxes,
                 fontsize=10)

        # Create new legend handles but use the colors from the existing ones
        handles, labels = main_axes.get_legend_handles_labels()
        if signal_format == 'line':
            handles[labels.index(signal)] = Line2D(
                [], [],
                c=ZBosonSamples.samples[signal]['color'],
                linestyle='dashed')
        uncertainty_handle = mpatches.Patch(facecolor='none', hatch='////')
        if Total_SM_label:
            handles.append((totalSM_handle, uncertainty_handle))
            labels.append('Total SM')
        else:
            handles.append(uncertainty_handle)
            labels.append('Stat. Unc.')

        # specify order within legend
        new_handles = [
            handles[labels.index('Data')], handles[labels.index('Doniach')],
            handles[labels.index('Gaussian')],
            handles[labels.index('Lorentzian')],
            handles[labels.index('Voigt')],
            handles[labels.index('Voigt and Polynomial')]
        ]
        new_labels = [
            'Data', 'Doniach', 'Gaussian', 'Lorentzian', 'Voigt',
            'Voigt and Polynomial'
        ]
        for s in reversed(stack_order):
            if s not in labels:
                continue
            new_handles.append(handles[labels.index(s)])
            new_labels.append(s)
        if signal is not None:
            new_handles.append(handles[labels.index(signal)])
            new_labels.append(signal_label)
        if Total_SM_label:
            new_handles.append(handles[labels.index('Total SM')])
            new_labels.append('Total SM')
        else:
            new_handles.append(handles[labels.index('Stat. Unc.')])
            new_labels.append('Stat. Unc.')
        main_axes.legend(handles=new_handles,
                         labels=new_labels,
                         frameon=False,
                         loc=h_legend_loc,
                         fontsize='x-small')

        # *************
        # Data / MC plot
        # *************

        plt.axes([0.1, 0.1, 0.85, 0.2])  # (left, bottom, width, height)
        ratio_axes = plt.gca()
        ratio_axes.yaxis.set_major_locator(
            MaxNLocator(nbins='auto', symmetric=True))
        ratio_axes.errorbar(
            x=bin_centres, y=data_x / signal_x_reshaped, fmt='ko'
        )  # TODO: yerr=data_x_errors produce error bars that are too big
        ratio_axes.set_xlim(left=h_xrange_min, right=bins[-1])
        ratio_axes.plot(bins, np.ones(len(bins)), color='k')
        ratio_axes.xaxis.set_minor_locator(
            AutoMinorLocator())  # separation of x axis minor ticks
        ratio_axes.xaxis.set_label_coords(
            0.9, -0.2)  # (x,y) of x axis label # 0.2 down from x axis
        ratio_axes.set_xlabel(h_xlabel, fontname='sans-serif', fontsize=11)
        ratio_axes.set_ylim(bottom=0, top=2)
        ratio_axes.set_yticks([0, 1])
        ratio_axes.tick_params(which='both',
                               direction='in',
                               top=True,
                               labeltop=False,
                               right=True,
                               labelright=False)
        ratio_axes.yaxis.set_minor_locator(AutoMinorLocator())
        ratio_axes.set_ylabel(r'Data / Pred',
                              fontname='sans-serif',
                              x=1,
                              fontsize=11)

        # Generic features for both plots
        main_axes.yaxis.set_label_coords(h_y_label_x_position, 1)
        ratio_axes.yaxis.set_label_coords(h_y_label_x_position, 0.5)

        plt.savefig("ZBoson_" + x_variable + ".pdf", bbox_inches='tight')

        # ========== Statistics ==========

        # ========== Doniach ==========
        chisqr_doniach = mychisqr(doniach.residual, doniach.best_fit)
        redchisqr_doniach = chisqr_doniach / doniach.nfree
        center_doniach = params_dict_doniach['center']
        sigma_doniach = params_dict_doniach['sigma']

        rel_unc_center_doniach = doniach.params[
            'center'].stderr / doniach.params['center'].value
        rel_unc_sigma_doniach = doniach.params[
            'sigma'].stderr / doniach.params['sigma'].value

        # ========== Gaussian ==========
        chisqr_gaussian = mychisqr(gaussian.residual, gaussian.best_fit)
        redchisqr_gaussian = chisqr_gaussian / gaussian.nfree
        center_gaussian = params_dict_gaussian['center']
        sigma_gaussian = params_dict_gaussian['sigma']

        rel_unc_center_gaussian = gaussian.params[
            'center'].stderr / gaussian.params['center'].value
        rel_unc_sigma_gaussian = gaussian.params[
            'sigma'].stderr / gaussian.params['sigma'].value

        # ========== Lorentzian ==========
        chisqr_lorentzian = mychisqr(lorentzian.residual, lorentzian.best_fit)
        redchisqr_lorentzian = chisqr_lorentzian / lorentzian.nfree
        center_lorentzian = params_dict_lorentzian['center']
        sigma_lorentzian = params_dict_lorentzian['sigma']

        rel_unc_center_lorentzian = lorentzian.params[
            'center'].stderr / lorentzian.params['center'].value
        rel_unc_sigma_lorentzian = lorentzian.params[
            'sigma'].stderr / lorentzian.params['sigma'].value

        # ========== Voigt ==========
        chisqr_voigt = mychisqr(voigt.residual, voigt.best_fit)
        redchisqr_voigt = chisqr_voigt / voigt.nfree
        center_voigt = params_dict_voigt['center']
        sigma_voigt = params_dict_voigt['sigma']

        rel_unc_center_voigt = voigt.params['center'].stderr / voigt.params[
            'center'].value
        rel_unc_sigma_voigt = voigt.params['sigma'].stderr / voigt.params[
            'sigma'].value

        # ========== Voigt and Polynomial ==========
        chisqr_voigt_poly = mychisqr(voigt_poly.residual, voigt_poly.best_fit)
        redchisqr_voigt_poly = chisqr_voigt_poly / voigt_poly.nfree
        center_voigt_poly = params_dict_voigt_poly['center']
        sigma_voigt_poly = params_dict_voigt_poly['sigma']

        rel_unc_center_voigt_poly = voigt_poly.params[
            'center'].stderr / voigt_poly.params['center'].value
        rel_unc_sigma_voigt_poly = voigt_poly.params[
            'sigma'].stderr / voigt_poly.params['sigma'].value

        df_dict = {
            'fraction': [fraction],
            'luminosity': [lumi_used],
            'doniach chisqr': [chisqr_doniach],
            'doniach redchisqr': [redchisqr_doniach],
            'doniach center': [rel_unc_center_doniach],
            'doniach sigma': [rel_unc_sigma_doniach],
            'gaussian chisqr': [chisqr_gaussian],
            'gaussian redchisqr': [redchisqr_gaussian],
            'gaussian center': [rel_unc_center_gaussian],
            'gaussian sigma': [rel_unc_sigma_gaussian],
            'lorentzian chisqr': [chisqr_lorentzian],
            'lorentzian redchisqr': [redchisqr_lorentzian],
            'lorentzian center': [rel_unc_center_lorentzian],
            'lorentzian sigma': [rel_unc_sigma_lorentzian],
            'voigt chisqr': [chisqr_voigt],
            'voigt redchisqr': [redchisqr_voigt],
            'voigt center': [rel_unc_center_voigt],
            'voigt sigma': [rel_unc_sigma_voigt],
            'voigt poly chisqr': [chisqr_voigt_poly],
            'voigt poly redchisqr': [redchisqr_voigt_poly],
            'voigt poly center': [rel_unc_center_voigt_poly],
            'voigt poly sigma': [rel_unc_sigma_voigt_poly]
        }

        temp = pd.DataFrame(df_dict)

        fit_results = pd.read_csv('fit_results.csv')

        fit_results_concat = pd.concat([fit_results, temp])

        fit_results_concat.to_csv('fit_results.csv', index=False)

        print("=====================================================")
        print("Statistics for the Doniach Model: ")
        print("\n")
        print("chi^2 = " + str(chisqr_doniach))
        print("chi^2/dof = " + str(redchisqr_doniach))
        print("center = " + str(center_doniach))
        print("sigma = " + str(sigma_doniach))
        print("Relative Uncertainty of Center = " +
              str(rel_unc_center_doniach))
        print("Relative Uncertainty of Sigma = " + str(rel_unc_sigma_doniach))

        print("\n")
        print("=====================================================")
        print("Statistics for the Gaussian Model: ")
        print("\n")
        print("chi^2 = " + str(chisqr_gaussian))
        print("chi^2/dof = " + str(redchisqr_gaussian))
        print("center = " + str(center_gaussian))
        print("sigma = " + str(sigma_gaussian))
        print("Relative Uncertainty of Center = " +
              str(rel_unc_center_gaussian))
        print("Relative Uncertainty of Sigma = " + str(rel_unc_sigma_gaussian))

        print("\n")
        print("=====================================================")
        print("Statistics for the Lorentzian Model: ")
        print("\n")
        print("chi^2 = " + str(chisqr_lorentzian))
        print("chi^2/dof = " + str(redchisqr_lorentzian))
        print("center = " + str(center_lorentzian))
        print("sigma = " + str(sigma_lorentzian))
        print("Relative Uncertainty of Center = " +
              str(rel_unc_center_lorentzian))
        print("Relative Uncertainty of Sigma = " +
              str(rel_unc_sigma_lorentzian))

        print("\n")
        print("=====================================================")
        print("Statistics for the Voigt Model: ")
        print("\n")
        print("chi^2 = " + str(chisqr_voigt))
        print("chi^2/dof = " + str(redchisqr_voigt))
        print("center = " + str(center_voigt))
        print("sigma = " + str(sigma_voigt))
        print("Relative Uncertainty of Center = " + str(rel_unc_center_voigt))
        print("Relative Uncertainty of Sigma = " + str(rel_unc_sigma_voigt))

        print("\n")
        print("=====================================================")
        print("Statistics for the Voigt and Polynomial Model: ")
        print("\n")
        print("chi^2 = " + str(chisqr_voigt_poly))
        print("chi^2/dof = " + str(redchisqr_voigt_poly))
        print("center = " + str(center_voigt_poly))
        print("sigma = " + str(sigma_voigt_poly))
        print("Relative Uncertainty of Center = " +
              str(rel_unc_center_voigt_poly))
        print("Relative Uncertainty of Sigma = " +
              str(rel_unc_sigma_voigt_poly))

        # ========= Plotting Residuals =========

        # ========= Doniach Residuals =========

        plt.clf()
        plt.axes([0.1, 0.3, 0.85, 0.65])  # (left, bottom, width, height)
        main_axes = plt.gca()

        main_axes.set_title("Doniach Model Residuals")

        main_axes.errorbar(x=bin_centres, y=doniach.residual, fmt='ko')

        main_axes.set_xlim(left=h_xrange_min, right=bins[-1])
        main_axes.xaxis.set_minor_locator(
            AutoMinorLocator())  # separation of x axis minor ticks
        main_axes.tick_params(which='both',
                              direction='in',
                              top=True,
                              labeltop=False,
                              right=True,
                              labelright=False)

        main_axes.set_xlabel(r'$M_Z$ GeV')
        main_axes.xaxis.get_major_ticks()[0].set_visible(False)

        main_axes.set_ylim(bottom=1.05 * doniach.residual.min(),
                           top=1.05 * doniach.residual.max())
        main_axes.yaxis.set_minor_locator(AutoMinorLocator())
        main_axes.yaxis.get_major_ticks()[0].set_visible(False)
        main_axes.set_ylabel("Residual")

        plt.savefig("plots/doniach_residuals.pdf", bbox_inches='tight')

        # ========= Gaussian Residuals =========

        plt.clf()
        plt.axes([0.1, 0.3, 0.85, 0.65])  # (left, bottom, width, height)
        main_axes = plt.gca()

        main_axes.set_title("Gaussian Model Residuals")

        main_axes.errorbar(x=bin_centres, y=gaussian.residual, fmt='ko')

        main_axes.set_xlim(left=h_xrange_min, right=bins[-1])
        main_axes.xaxis.set_minor_locator(
            AutoMinorLocator())  # separation of x axis minor ticks
        main_axes.tick_params(which='both',
                              direction='in',
                              top=True,
                              labeltop=False,
                              right=True,
                              labelright=False)

        main_axes.set_xlabel(r'$M_Z$ GeV')
        main_axes.xaxis.get_major_ticks()[0].set_visible(False)

        main_axes.set_ylim(bottom=1.05 * gaussian.residual.min(),
                           top=1.05 * gaussian.residual.max())
        main_axes.yaxis.set_minor_locator(AutoMinorLocator())
        main_axes.yaxis.get_major_ticks()[0].set_visible(False)
        main_axes.set_ylabel("Residual")

        plt.savefig("plots/gaussian_residuals.pdf", bbox_inches='tight')

        # ========= Lorentzian Residuals =========

        plt.clf()
        plt.axes([0.1, 0.3, 0.85, 0.65])  # (left, bottom, width, height)
        main_axes = plt.gca()

        main_axes.set_title("Lorentzian Model Residuals")

        main_axes.errorbar(x=bin_centres, y=lorentzian.residual, fmt='ko')

        main_axes.set_xlim(left=h_xrange_min, right=bins[-1])
        main_axes.xaxis.set_minor_locator(
            AutoMinorLocator())  # separation of x axis minor ticks
        main_axes.tick_params(which='both',
                              direction='in',
                              top=True,
                              labeltop=False,
                              right=True,
                              labelright=False)

        main_axes.set_xlabel(r'$M_Z$ GeV')
        main_axes.xaxis.get_major_ticks()[0].set_visible(False)

        main_axes.set_ylim(bottom=1.05 * lorentzian.residual.min(),
                           top=1.05 * lorentzian.residual.max())
        main_axes.yaxis.set_minor_locator(AutoMinorLocator())
        main_axes.yaxis.get_major_ticks()[0].set_visible(False)
        main_axes.set_ylabel("Residual")

        plt.savefig("plots/lorentzian_residuals.pdf", bbox_inches='tight')

        # ========= Voigt Residuals =========

        plt.clf()
        plt.axes([0.1, 0.3, 0.85, 0.65])  # (left, bottom, width, height)
        main_axes = plt.gca()

        main_axes.set_title("Voigt Model Residuals")

        main_axes.errorbar(x=bin_centres, y=voigt.residual, fmt='ko')

        main_axes.set_xlim(left=h_xrange_min, right=bins[-1])
        main_axes.xaxis.set_minor_locator(
            AutoMinorLocator())  # separation of x axis minor ticks
        main_axes.tick_params(which='both',
                              direction='in',
                              top=True,
                              labeltop=False,
                              right=True,
                              labelright=False)

        main_axes.set_xlabel(r'$M_Z$ GeV')
        main_axes.xaxis.get_major_ticks()[0].set_visible(False)

        main_axes.set_ylim(bottom=1.05 * voigt.residual.min(),
                           top=1.05 * voigt.residual.max())
        main_axes.yaxis.set_minor_locator(AutoMinorLocator())
        main_axes.yaxis.get_major_ticks()[0].set_visible(False)
        main_axes.set_ylabel("Residual")

        plt.savefig("plots/voigt_residuals.pdf", bbox_inches='tight')

        # ========= Voigt and Polynomial Residuals =========

        plt.clf()
        plt.axes([0.1, 0.3, 0.85, 0.65])  # (left, bottom, width, height)
        main_axes = plt.gca()

        main_axes.set_title("Voigt and Polynomial Model Residuals")

        main_axes.errorbar(x=bin_centres, y=voigt_poly.residual, fmt='ko')

        main_axes.set_xlim(left=h_xrange_min, right=bins[-1])
        main_axes.xaxis.set_minor_locator(
            AutoMinorLocator())  # separation of x axis minor ticks
        main_axes.tick_params(which='both',
                              direction='in',
                              top=True,
                              labeltop=False,
                              right=True,
                              labelright=False)

        main_axes.set_xlabel(r'$M_Z$ GeV')
        main_axes.xaxis.get_major_ticks()[0].set_visible(False)

        main_axes.set_ylim(bottom=1.05 * voigt_poly.residual.min(),
                           top=1.05 * voigt_poly.residual.max())
        main_axes.yaxis.set_minor_locator(AutoMinorLocator())
        main_axes.yaxis.get_major_ticks()[0].set_visible(False)
        main_axes.set_ylabel("Residual")

        plt.savefig("plots/voigt_poly_residuals.pdf", bbox_inches='tight')

    if load_histograms: return None, None
    return signal_x, mc_x_tot
Exemplo n.º 25
0
def fittingLoretzian(x, y):  #fits a loretzian curve
    mod = LorentzianModel()
    pars = mod.guess(y, x=x)
    out = mod.fit(y, pars, x=x)
    print(out.fit_report(min_correl=0.25))
    return out.best_fit
Exemplo n.º 26
0
import matplotlib.pyplot as plt
import pandas as pd

from lmfit.models import LorentzianModel

dframe = pd.read_csv('peak.csv')

model = LorentzianModel()
params = model.guess(dframe['y'], x=dframe['x'])

result = model.fit(dframe['y'], params, x=dframe['x'])

print(result.fit_report())
result.plot_fit()
plt.show()
Exemplo n.º 27
0
            Matrix_results_name= ub_wind_path(Matrix_results_name, system='wind') 
            xls = pd.ExcelFile(Matrix_results_name)
            sheets = xls.sheet_names
            ##
            for sh in sheets:
                Matrix_results = pd.read_excel(Matrix_results_name, sheet_name=sh)  
                df = Matrix_results.iloc[:180, :] ### just the quadrant
                df=pd.DataFrame(df)
                for TR in df.columns:
                    data=df[TR] 
                    X=data.index.values
                    Y=data.values
                    mod = LorentzianModel()
                    #mod =GaussianModel()
                    pars = mod.guess(Y, x=X)
                    out = mod.fit(Y, pars, x=X)
                    Y_lorenz = mod.eval(pars, x=X)
                    #print(out.fit_report(min_correl=0.25))
                    #plt.plot(X, Y_lorenz, 'k--', label='Lorentzian')
                    #dec_angle_lorenz =  (90-np.where(Y_lorenz==max(Y_lorenz))[0][0])/2  #out.params['center'].value / 2
                    #error =  abs( (90-np.where(Y_lorenz==max(Y_lorenz))[0][0])/2 ) #round(ref_angle - dec_angle_lorenz, 3) 
                    error =  (90-np.where(Y_lorenz==max(Y_lorenz))[0][0])/2  #round(ref_angle - dec_angle_lorenz, 3) 
                    results.append( [error, TR, CONDITION, SUBJECT_USE_ANALYSIS, sh[-1], brain_region])
            
            
            


df = pd.DataFrame(np.array(results)) 
df.columns = ['error', 'TR', 'CONDITION', 'subject', 'session', 'ROI']
df['TR'] = df.TR.astype(float)
Exemplo n.º 28
0
import matplotlib.pyplot as plt

from lmfit.models import LorentzianModel
import pandas as pd

dframe = pd.read_csv('peak.csv')

model = LorentzianModel()
params = model.guess(dframe['y'], x=dframe['x'])

result = model.fit(dframe['y'], params, x=dframe['x'])

print(result.fit_report())
result.plot_fit()
plt.show()
Exemplo n.º 29
0
N = projection.shape[0]
# sample spacing
T = nm_per_px
y = projection
yf = scipy.fftpack.fft(y)
xf = np.linspace(0.0, 1.0/(2.0*T), N/2)

fig1, ax = plt.subplots(1)
thegraphy = 2.0/N * np.abs(yf[:N//2])
fromj = 9
toj = 17
ax.plot(xf[:50], thegraphy[:50], '-o')
y = thegraphy[fromj:toj]
x = xf[fromj:toj]
pars = mod.guess(y, x=x)
out = mod.fit(y, pars, x=x)
x2 = np.linspace(np.min(x), np.max(x), 300)
y2 = mod.eval(out.params, x=x2)
plt.plot(x2, y2, '--', alpha=1)
print(out.fit_report(min_correl=0.25))

fromj = 18
toj = 25
y = thegraphy[fromj:toj]
x = xf[fromj:toj]
pars = mod.guess(y, x=x)
out = mod.fit(y, pars, x=x)
x2 = np.linspace(np.min(x), np.max(x), 300)
params2 = out.params
# params2['center'] = 0.15390835*1.3
y2 = mod.eval(params2, x=x2)
Exemplo n.º 30
0
def fit_lorentzian(x, y):
    mod = LorentzianModel()

    pars = mod.guess(y, x=x)
    out = mod.fit(y, pars, x=x)
    return out