예제 #1
0
def find_fit_sigmoid(x, y):
    model_gompertz = lm.models.Model(gompertz)
    params_gompertz = lm.Parameters()
    params_gompertz.add('asymptote', value=1E-3, min=1E-8)
    params_gompertz.add('displacement', value=1E-3, min=1E-8)
    params_gompertz.add('step_center', value=1E-3, min=1E-8)

    result_gompertz = model_gompertz.fit(y, params_gompertz, x=x)

    step_mod = StepModel(form='erf', prefix='step_')
    line_mod = LinearModel(prefix='line_')

    params_stln = line_mod.make_params(intercept=y.min(), slope=0)
    params_stln += step_mod.guess(y, x=x, center=90)

    model_stln = step_mod + line_mod
    result_stln = model_stln.fit(y, params_stln, x=x)

    ret_result = None
    ret_model = None

    if result_stln.chisqr < result_gompertz.chisqr:
        ret_result = result_stln
        ret_model = model_stln
    else:
        ret_result = result_gompertz
        ret_model = model_gompertz

    return ret_result, ret_model
예제 #2
0
def make_model(peak_positions,
               fwhm=0.05,
               max_fwhm=0.5,
               pos_range=0.5,
               amplitude=1000.):
    n_peaks = len(peak_positions)
    pars = Parameters()

    bg = LinearModel(prefix='bg_')
    pars.update(bg.make_params(slope=0, intercept=0))

    mod = bg
    #pars['bg_intercept'].set(vary=True)
    #pars['bg_slope'].set(vary=True)

    for i in range(n_peaks):
        prefix = 'pk{}_'.format(i)
        peak = PseudoVoigtModel(prefix=prefix)
        # Set this zero
        pars.update(peak.make_params())
        pars[prefix + 'center'].set(peak_positions[i],
                                    min=peak_positions[i] - pos_range,
                                    max=peak_positions[i] + pos_range,
                                    vary=True)
        pars[prefix + 'sigma'].set(fwhm, min=0., max=max_fwhm, vary=True)
        pars[prefix + 'amplitude'].set(amplitude, min=0., vary=True)
        pars[prefix + 'fraction'].set(0.0, min=0., max=1., vary=True)
        mod += peak
    return mod, pars
예제 #3
0
def rotational_temperature_analysis(L, E_upper):
    """
    Function that will perform a rotational temperature analysis. This will perform a least-squares fit of log(L),
    which is related to the theoretical line strength and integrated flux, and the upper state energy for the same
    transition.

    Parameters
    ----------
    L - 1D array
        Value L related to the line and theoretical line strength
    E_upper - 1D array
        The upper state energy in wavenumbers.

    Returns
    -------
    ModelResult - object
        Result of the least-squares fit
    """
    # Convert the upper state energy
    E_upper *= units.kbcm
    logL = np.log(L)
    model = LinearModel()
    params = model.make_params()
    result = model.fit(x=E_upper, y=logL, params=params)
    return result
예제 #4
0
def correct_data(xData, yData, startIdx=0, endIdx=-1):
    y = yData
    # start x at zero
    x = xData - xData[0]

    # linear model
    lr = LinearModel()
    params = lr.make_params()

    # fit model
    xSelection = x[startIdx:endIdx]
    ySelection = y[startIdx:endIdx]
    outParams = lr.fit(ySelection, params, x=xSelection)

    # construct corrected data
    linear_trend = x * outParams.best_values['slope'] + outParams.best_values[
        'intercept']
    yCorrected = y - linear_trend + y[0]

    result = {
        "correctedData":
        json.loads(
            pd.DataFrame({
                "x": xData.tolist(),
                "y": yCorrected.tolist()
            }).to_json(orient='records'))
    }
    return result
예제 #5
0
def lmDDOFit(xdata,
             ydata,
             params,
             ctr_range=1.2,
             amp_range=3,
             sig_range=6,
             weightexponential=0):

    x = xdata
    y = ydata
    #Define a linear model and a Damped Oscillator Model
    line_mod = LinearModel(prefix='line_')
    ddo_mod = DampedOscillatorModel(prefix='ddo_')
    #Initial Pars for Linear Model
    pars = line_mod.make_params(intercept=0, slope=0)
    pars['line_intercept'].set(0, vary=True)
    pars['line_slope'].set(0, vary=True)
    #Extend param list to use multiple peaks. Currently unused.
    peaks = []
    #Add fit parameters, Center, Amplitude, and Sigma
    for i in range(0, len(params) / 3):
        peaks.append(DampedOscillatorModel(prefix='ddo' + str(i) + '_'))
        pars.update(peaks[i].make_params())
        ctr = params[3 * i]
        amp = params[3 * i + 1]
        sig = params[3 * i + 2]
        pars['ddo' + str(i) + '_center'].set(ctr,
                                             min=ctr / ctr_range,
                                             max=ctr * ctr_range)
        pars['ddo' + str(i) + '_amplitude'].set(amp,
                                                min=amp / amp_range,
                                                max=amp * amp_range)
        pars['ddo' + str(i) + '_sigma'].set(sig,
                                            min=sig / sig_range,
                                            max=sig * sig_range)
#Create full model. Add linear model and all peaks
    mod = line_mod
    for i in xrange(len(peaks)):
        mod = mod + peaks[i]


#Initialize fit
    init = mod.eval(pars, x=x)
    #Do the fit. The weight exponential can weight the points porportional to the
    #amplitude of y point. In this way, points on peak can be given more weight.
    out = mod.fit(y, pars, x=x, weights=y**weightexponential)
    #Get the fit parameters
    fittedsigma = out.params['ddo0_sigma'].value
    fittedAmp = out.params['ddo0_amplitude'].value
    fittedCenter = out.params['ddo0_center'].value
    fittedIntercept = out.params['line_intercept'].value
    fittedSlope = out.params['line_slope'].value
    fittedQ = 1 / (2 * fittedsigma)
    #Returns the output fit as well as an array of the fit parameters
    """Returns output fit as will as list of important fitting parameters"""
    return out, [
        fittedCenter, fittedAmp, fittedsigma, fittedQ, fittedIntercept,
        fittedSlope
    ]
예제 #6
0
def gauss_peak_fit(energy_data, cnts_data, energy_spectrum, channel_width):
    '''
    spectrum_gauss_fit takes an input spectrum and finds the peaks of the
    spectrum and fits a gaussian curve to the photopeaks and returns the
    amplitude and sigma of the gaussian peak.
    Make sure the spectrum is calibrated first.

    sigma_list, amplitude_list = spectrum_gauss_fit(energy_data, cnts_data, energy_spectrum, channel_width)

    energy_data: .energies_kev that has been calibrated from becquerel
    cnts_data: .cps_vals from becquerel spectrum
    energy_spectrum: an array of gamma energies generated from gamma_energies
    channel_width: width of the peak for analysis purposes
    '''
    sigma_list = []
    amplitude_list = []
    for erg in energy_spectrum:
        x_loc = list(
            filter(lambda x: (erg - 3) < energy_data[x] < (erg + 3),
                   range(len(energy_data))))
        x_loc_pk = range(int(x_loc[0] - 5), int(x_loc[0] + 5))
        pk_cnt = np.argmax(cnts_data[x_loc_pk])
        ch_width = range(int(x_loc_pk[pk_cnt] - channel_width),
                         int(x_loc_pk[pk_cnt] + channel_width))

        calibration = energy_data[ch_width]
        real_y_gauss = cnts_data[ch_width]
        x = np.asarray(calibration)
        real_y = np.asarray(real_y_gauss)

        mod_gauss = GaussianModel(prefix='g1_')
        line_mod = LinearModel(prefix='line')
        pars = mod_gauss.guess(real_y, x=x)
        pars.update(line_mod.make_params(intercept=real_y.min(), slope=0))
        pars.update(mod_gauss.make_params())
        pars['g1_center'].set(x[np.argmax(real_y)], min=x[np.argmax(real_y)]\
        - 3)
        pars['g1_sigma'].set(3, min=0.25)
        pars['g1_amplitude'].set(max(real_y), min=max(real_y) - 10)
        mod = mod_gauss + line_mod
        out = mod.fit(real_y, pars, x=x)

        #print("The amplitude sum is %0.2f" % sum(real_y))
        gauss_x = []
        gauss_y = []
        parameter_list_1 = []
        real_y_gauss = []
        #print(out.fit_report(min_correl=10))
        sigma = out.params['g1_sigma'].value
        amplitude = out.params['g1_amplitude'].value
        sigma_list.append(sigma)
        amplitude_list.append(amplitude)
        fit_params = {}

        #gauss_fit_parameters = [out.params[key].value for k in out.params]
        #print(key, "=", out.params[key].value, "+/-", out.params[key].stderr)
        gauss_fit_parameters = []

    return sigma_list, amplitude_list
예제 #7
0
    def fitting_math(
        self,
        xfile: List[str],
        yfile: List[str],
        flag: int = 1,
    ) -> Any:
        """PeakLogic.fitting_math() fits the data to a cosh and a
        gaussian, then subtracts the cosh to find peak current.."""

        try:
            center: float = self.app.peak_center_.get()
            x: "np.ndarray[Any, np.dtype[np.float64]]" = np.array(
                xfile, dtype=np.float64)
            y: "np.ndarray[Any, np.dtype[np.float64]]" = np.array(
                yfile, dtype=np.float64)

            # cut out outliers
            passingx: "np.ndarray[Any, np.dtype[np.float64]]"
            passingy: "np.ndarray[Any, np.dtype[np.float64]]"
            passingx, passingy = self.trunc_edges(xfile, yfile)

            rough_peak_positions = [min(passingx), center]

            min_y = float(min(passingy))
            model = LinearModel(prefix="Background")
            params = model.make_params()  # a=0, b=0, c=0
            params.add("slope", 0, min=0)
            # params.add("b", 0, min=0)
            params.add("intercept", 0, min=min_y)

            for i, cen in enumerate(rough_peak_positions):
                peak, pars = self.add_lz_peak(f"Peak_{i+1}", cen)
                model = model + peak
                params.update(pars)

            _ = model.eval(params, x=passingx)
            result = model.fit(passingy, params, x=passingx)
            comps = result.eval_components()

            ip = float(max(comps["Peak_2"]))

            if flag == 1:
                return ip
            if flag == 0:
                return (
                    x,
                    y,
                    result.best_fit,
                    comps["Background"],
                    comps["Peak_1"],
                    comps["Peak_2"],
                    ip,
                    passingx,
                )

        except Exception:  # pragma: no cover
            print("Error Fitting")
            print(sys.exc_info())
            return -1
예제 #8
0
    def lin_and_multi_gaussian(self, numOfComponents, cList, sList, aList, lS, lI, limits):
        """All lists should be the same length"""
        gList = []

        if self.xAxis == 'wave' and self.initVals == 'vel':
            cList = vel_to_wave(self.restWave, vel=np.array(cList), flux=0)[0]
            sList = vel_to_wave(self.restWave, vel=np.array(sList), flux=0, delta=True)[0]
            aList = vel_to_wave(self.restWave, vel=0, flux=np.array(aList))[1]
        elif self.xAxis == 'vel' and self.initVals == 'wave':
            cList = wave_to_vel(self.restWave, wave=np.array(cList), flux=0)[0]
            sList = wave_to_vel(self.restWave, wave=np.array(sList), flux=0, delta=True)[0]
            aList = wave_to_vel(self.restWave, wave=0, flux=np.array(aList))[1]

        lin = LinearModel(prefix='lin_')
        self.linGaussParams = lin.guess(self.flux, x=self.x)
        self.linGaussParams.update(lin.make_params())
        self.linGaussParams['lin_slope'].set(lS, vary=True)
        self.linGaussParams['lin_intercept'].set(lI, vary=True)

        for i in range(numOfComponents):
            if type(limits['c']) is list:
                cLimit = limits['c'][i]
            else:
                cLimit = limits['c']
            if type(limits['s']) is list:
                sLimit = limits['s'][i]
            else:
                sLimit = limits['s']
            if type(limits['a']) is list:
                aLimit = limits['a'][i]
            else:
                aLimit = limits['a']
            lims = {'c': cLimit, 's': sLimit, 'a': aLimit}
            prefix = 'g{0}_'.format(i+1)
            gList.append(self._gaussian_component(self.linGaussParams, prefix, cList[i], sList[i], aList[i], lims))
        gList = np.array(gList)
        mod = lin + gList.sum()

        init = mod.eval(self.linGaussParams, x=self.x)
        out = mod.fit(self.flux, self.linGaussParams, x=self.x, weights=self.weights)
        f = open(os.path.join(constants.OUTPUT_DIR, self.rp.regionName, "{0}_Log.txt".format(self.rp.regionName)), "a")
        print("######## %s %s Linear and Multi-gaussian Model ##########\n" % (self.rp.regionName, self.lineName))
        print(out.fit_report())
        f.write("######## %s %s Linear and Multi-gaussian Model ##########\n" % (self.rp.regionName, self.lineName))
        f.write(out.fit_report())
        f.close()
        components = out.eval_components()

        if not hasattr(self.rp, 'plotResiduals'):
            self.rp.plotResiduals = True
        self.plot_emission_line(numOfComponents, components, out, self.rp.plotResiduals, init=init, scaleFlux=self.rp.scaleFlux)

        self._get_amplitude(numOfComponents, out)

        return out, components
예제 #9
0
def get_linearmodel(slope=0.8, intercept=0.5, noise=1.5):
    # create data to be fitted
    np.random.seed(88)
    x = np.linspace(0, 10, 101)
    y = intercept + x * slope
    y = y + np.random.normal(size=len(x), scale=noise)

    model = LinearModel()
    params = model.make_params(intercept=intercept, slope=slope)

    return x, y, model, params
예제 #10
0
def get_linearmodel(slope=0.8, intercept=0.5, noise=1.5):
    # create data to be fitted
    np.random.seed(88)
    x = np.linspace(0, 10, 101)
    y = intercept + x*slope
    y = y + np.random.normal(size=len(x), scale=noise)

    model = LinearModel()
    params = model.make_params(intercept=intercept, slope=slope)

    return x, y, model, params
예제 #11
0
def fitsample(data, theta_initial, theta_final):        
    
    
    x = data[:,0]
    y = data[:,1]
    m = (x > theta_initial) & (x < theta_final)
    x_fit = x[m]
    y_fit = y[m]

    

    pseudovoigt1 = VoigtModel(prefix = 'pv1_')    
    pars= pseudovoigt1.make_params()
    pars['pv1_center'].set(13.5, min = 13.4, max = 13.6)
    pars['pv1_sigma'].set(0.05, min= 0.01, max = 0.1)
    pars['pv1_amplitude'].set(70, min = 1, max = 100)
    #pars['pv1_fraction'].set(0.5)
    

    lorentz2 = LorentzianModel(prefix = 'lor2_')
    pars.update(lorentz2.make_params())
    pars['lor2_center'].set(13.60, min = 13.4, max = 13.9)
    pars['lor2_sigma'].set(0.1, min= 0.01)
    pars['lor2_amplitude'].set(10, min = 1, max = 50 )
    #pars['lor2_fraction'].set(0.5)
    
    line1 = LinearModel(prefix ='l1_')
    pars.update(line1.make_params())
    pars['l1_slope'].set(0)
    pars['l1_intercept'].set(240, min = 200, max = 280)

    
    
    mod = pseudovoigt1 + lorentz2 + line1
    v = pars.valuesdict()
     
    result = mod.fit(y_fit, pars, x=x_fit)    

    #print(result.fit_report())    
    pv1_pos = result.params['pv1_center'].value
    pv1_height = result.params['pv1_height'].value
    lor2_pos = result.params['lor2_center'].value
    lor2_height = result.params['lor2_height'].value
    #peak_area = pars['gau1_fwhm'].value*peak_amp
    #plt.xlim([theta_initial, theta_final])
    #plt.ylim([100, 500])
    #plt.semilogy(x_fit, y_fit, 'bo')
    
    #plt.semilogy (x_fit, result.init_fit, 'k--')    
    #plt.semilogy(x_fit, result.best_fit, 'r-')
    #plt.show()
    return pv1_pos, pv1_height, lor2_pos, lor2_height
예제 #12
0
def plot_gauss(energy_data, cnts_data, energy_spectrum, channel_width):
    '''
    plot_gauss takes an input spectrum and plots the gaussian fit to the photopeaks
    Make sure the spectrum is calibrated first.
    plot_gauss(energy_data, cnts_data, energy_spectrum, channel_width)
    energy_data: .energies_kev that has been calibrated from becquerel
    cnts_data: .cps_vals from becquerel spectrum
    energy_spectrum: an array of gamma energies generated from gamma_energies
    channel_width: width of the peak for analysis purposes
    '''
    for erg in energy_spectrum:
        x_loc = list(
            filter(lambda x: (erg - 3) < energy_data[x] < (erg + 3),
                   range(len(energy_data))))
        x_loc_pk = range(int(x_loc[0] - 5), int(x_loc[0] + 5))
        pk_cnt = np.argmax(cnts_data[x_loc_pk])
        ch_width = range(int(x_loc_pk[pk_cnt] - channel_width),
                         int(x_loc_pk[pk_cnt] + channel_width))

        calibration = energy_data[ch_width]
        real_y_gauss = cnts_data[ch_width]
        x = np.asarray(calibration)
        real_y = np.asarray(real_y_gauss)

        mod_gauss = GaussianModel(prefix='g1_')
        line_mod = LinearModel(prefix='line')
        pars = mod_gauss.guess(real_y, x=x)
        pars.update(line_mod.make_params(intercept=real_y.min(), slope=0))
        pars.update(mod_gauss.make_params())
        pars['g1_center'].set(x[np.argmax(real_y)], min=x[np.argmax(real_y)]\
        - 3)
        pars['g1_sigma'].set(3, min=0.25)
        pars['g1_amplitude'].set(max(real_y), min=max(real_y) - 10)
        mod = mod_gauss + line_mod
        out = mod.fit(real_y, pars, x=x)

        plt.figure()
        plt.plot(x, real_y)
        plt.plot(x, out.best_fit, 'k--')
        energy_title = np.argmax(x)
        max_y = np.argmax(real_y)  # Find the maximum y value
        max_x = x[(
            max_y)]  # Find the x value corresponding to the maximum y value
        plt.title('Gaussian fit around %0.1f keV' % x[max_y])
        plt.xlabel('Energy (keV)')
        plt.ylabel('CPS')
        plt.show()
        gauss_x = []
        gauss_y = []
        parameter_list_1 = []
        real_y_gauss = []
예제 #13
0
    def _setup_model(self):
        lin = LinearModel(prefix='Lin_')

        model = lin

        psi_min, psi_max = self._f.get_domain()[0], self._f.get_domain()[-1]
        slope = (self._f(psi_max) - self._f(psi_min)) / (psi_max - psi_min)

        lin.set_param_hint('intercept', value=self._f(0))
        lin.set_param_hint('slope', value=slope)

        params = lin.make_params()

        return model, params
예제 #14
0
def fit_adev(tau, adev, err_lo, err_high):

    fit_tau_over = 499

    # If there are at least 2 datapoints to fit at large tau_values, fit them
    if len(tau[np.where(tau > fit_tau_over)]) >= 2:

        # TODO: take into account asymmetric errorbars
        weights = (err_lo + err_high) / 2  # take naive 1-std errorbar average

        x = np.array([t for t in tau
                      if t > fit_tau_over])  # only fit long tau values
        y = np.array([a for i, a in enumerate(adev) if tau[i] > fit_tau_over
                      ])  # take equivalent in adev array
        w = np.array([
            h for i, h in enumerate(weights) if tau[i] > fit_tau_over
        ])  # take equivalent in weights array

        # Fit straight line on a log10 scale
        x = np.log10(x)
        y = np.log10(y)
        w_log = np.log10(np.exp(1)) * (w / y
                                       )  # error propagation for log10(y +- w)

        # Weighted Least Squares fit; ax + b
        model = LinearModel()

        params = model.make_params()
        params['intercept'].max = -10
        params['intercept'].value = -15
        params['intercept'].min = -19
        params['intercept'].brute_step = 0.005
        params[
            'slope'].value = -0.5  # assume white noise dominates on fitting range
        params['slope'].vary = False  # ... so we keep this parameter fixed

        res = model.fit(y, params, weights=1 / w_log**2, x=x)

        a = res.values['slope']
        b = res.values['intercept']

        x_smooth = np.logspace(0, 5, 20)
        return x_smooth, 10**(res.eval(x=np.log10(x_smooth))), a, b

    # Else if there are not enough large tau_values to fit, return empty arrays
    else:

        return [], [], 0.5, -1
예제 #15
0
    def _setup_model(self):
        peak_k1 = self._peaks[0](prefix='Peak_KAlpha1_')
        peak_k2 = self._peaks[1](prefix='Peak_KAlpha2_')
        bkgrd = LinearModel(prefix='Background_')
        # bkgrd_2 = GaussianModel(prefix='Background_Gauss_')

        model = peak_k1 + peak_k2 + bkgrd  # + bkgrd_2

        self.apply_parameter_hints(model)

        params = bkgrd.make_params()
        # params = peak_k1.make_params()
        params.update(peak_k1.make_params())
        params.update(peak_k2.make_params())
        # params.update(bkgrd_2.make_params())

        return model, params
예제 #16
0
def test_final_parameter_values():
    model = LinearModel()
    params = model.make_params()
    params['intercept'].set(value=-1, min=-20, max=0)
    params['slope'].set(value=1, min=-100, max=400)

    np.random.seed(78281)
    x = np.linspace(0, 9, 10)
    y = x * 1.34 - 4.5 + np.random.normal(scale=0.05, size=x.size)

    result = model.fit(y, x=x, method='nelder', params=params)

    assert_almost_equal(result.chisqr, 0.014625543, decimal=6)
    assert_almost_equal(result.params['intercept'].value,
                        -4.511087126,
                        decimal=6)
    assert_almost_equal(result.params['slope'].value, 1.339685514, decimal=6)
예제 #17
0
def peakfit(xvals, yvals, yerrors=None):
    """
    Fit peak to scans
    """

    peak_mod = VoigtModel()
    # peak_mod = GaussianModel()
    bkg_mod = LinearModel()

    pars = peak_mod.guess(yvals, x=xvals)
    pars += bkg_mod.make_params(intercept=np.min(yvals), slope=0)
    # pars['gamma'].set(value=0.7, vary=True, expr='') # don't fix gamma

    mod = peak_mod + bkg_mod
    out = mod.fit(yvals, pars, x=xvals, weights=yerrors)

    return out
예제 #18
0
파일: plot.py 프로젝트: RFlehr/AccQ
 def calculateSlope(self, x, y, numPoints = 50):
     nop = self.__regPoints
     if nop > len(x):
         _x=x
         _y=y
     else:
         _x = x[-nop:]
         _y = y[-nop:]
     mod = LinearModel()
     pars = mod.make_params()
     pars['slope'].set(0.0)
     pars['intercept'].set(0.0)
     out = mod.fit(_y, pars, x=_x)
     slope = str("{0:.3f}".format(out.best_values['slope']*1000))
     self.returnSlope.emit(slope)
     _time = self.setTimeLabel(_x)
     self.__Regres.setData(_time, out.best_fit)
예제 #19
0
def lmDDOFit(xdata, ydata, params, ctr_range = 1.2, amp_range = 3 , sig_range= 6, weightexponential = 0):    
    
    
    x = xdata
    y = ydata
#Define a linear model and a Damped Oscillator Model    
    line_mod = LinearModel(prefix='line_')
    ddo_mod = DampedOscillatorModel(prefix='ddo_')
#Initial Pars for Linear Model
    pars =  line_mod.make_params(intercept=0, slope=0)
    pars['line_intercept'].set(0, vary=True)
    pars['line_slope'].set(0, vary=True)
#Extend param list to use multiple peaks. Currently unused.
    peaks=[]
#Add fit parameters, Center, Amplitude, and Sigma
    for i in range(0, len(params)/3):
        peaks.append(DampedOscillatorModel(prefix='ddo'+str(i)+'_'))
        pars.update(peaks[i].make_params())
        ctr=params[3*i]
        amp=params[3*i+1]
        sig=params[3*i+2]
        pars['ddo'+str(i)+'_center'].set(ctr, min=ctr/ctr_range, max=ctr*ctr_range)
        pars['ddo'+str(i)+'_amplitude'].set(amp,min=amp/amp_range, max=amp*amp_range)
        pars['ddo'+str(i)+'_sigma'].set(sig, min=sig/sig_range, max=sig*sig_range)
#Create full model. Add linear model and all peaks
    mod=line_mod
    for i in xrange(len(peaks)):
        mod=mod+peaks[i]
#Initialize fit
    init = mod.eval(pars, x=x)
#Do the fit. The weight exponential can weight the points porportional to the
#amplitude of y point. In this way, points on peak can be given more weight.     
    out=mod.fit(y, pars,x=x, weights=y**weightexponential)
#Get the fit parameters
    fittedsigma = out.params['ddo0_sigma'].value
    fittedAmp = out.params['ddo0_amplitude'].value
    fittedCenter = out.params['ddo0_center'].value
    fittedIntercept = out.params['line_intercept'].value
    fittedSlope = out.params['line_slope'].value
    fittedQ=1/(2*fittedsigma)
#Returns the output fit as well as an array of the fit parameters
    """Returns output fit as will as list of important fitting parameters"""
    return out, [fittedCenter, fittedAmp, fittedsigma, fittedQ, fittedIntercept, fittedSlope]
예제 #20
0
    def _create_model(self, num_comps=1, guess=True):
        lin = LinearModel()
        lin.set_param_hint('slope', vary=False)
        lin.set_param_hint('intercept', vary=False)
        pars = lin.make_params(slope=0, intercept=0)
        mod = lin

        for i in range(num_comps):
            g = MyGaussianModel(prefix='g{}_'.format(i + 1))
            g.set_param_hint('amplitude', min=0.)

            if guess:
                pars.update(g.guess(self['flux'], self['vel'], w=2))
            else:
                pars.update(
                    g.make_params(amplitude=0.15, center=0, sigma=200 / 2.35))

            mod = mod + g

        return mod, pars
예제 #21
0
def measure_line_index_recover_spectrum(wave, params, norm=False):
    """ recover the fitted line profile from params

    Parameters
    ----------
    wave: array-like
        the wavelength to which the recovered flux correspond

    params: 5-element tuple
        the 1 to 5 elements are:
        mod_linear_slope
        mod_linear_intercept
        mod_gauss_amplitude
        mod_gauss_center
        mod_gauss_sigma

    norm: bool
        if True, linear model (continuum) is deprecated
        else linear + Gaussian model is used

    """
    from lmfit.models import LinearModel, GaussianModel
    mod_linear = LinearModel(prefix='mod_linear_')
    mod_gauss = GaussianModel(prefix='mod_gauss_')
    par_linear = mod_linear.make_params()
    par_gauss = mod_gauss.make_params()
    par_linear['mod_linear_slope'].value = params[0]
    par_linear['mod_linear_intercept'].value = params[1]
    par_gauss['mod_gauss_amplitude'].value = params[2]
    par_gauss['mod_gauss_center'].value = params[3]
    par_gauss['mod_gauss_sigma'].value = params[4]
    if not norm:
        flux = 1 - mod_gauss.eval(params=par_gauss, x=wave)
    else:
        flux = \
            (1 - mod_gauss.eval(params=par_gauss, x=wave)) * \
            mod_linear.eval(params=par_linear, x=wave)
    return flux
예제 #22
0
def measure_line_index_recover_spectrum(wave, params, norm=False):
    """ recover the fitted line profile from params

    Parameters
    ----------
    wave: array-like
        the wavelength to which the recovered flux correspond

    params: 5-element tuple
        the 1 to 5 elements are:
        mod_linear_slope
        mod_linear_intercept
        mod_gauss_amplitude
        mod_gauss_center
        mod_gauss_sigma

    norm: bool
        if True, linear model (continuum) is deprecated
        else linear + Gaussian model is used

    """
    from lmfit.models import LinearModel, GaussianModel
    mod_linear = LinearModel(prefix='mod_linear_')
    mod_gauss = GaussianModel(prefix='mod_gauss_')
    par_linear = mod_linear.make_params()
    par_gauss = mod_gauss.make_params()
    par_linear['mod_linear_slope'].value = params[0]
    par_linear['mod_linear_intercept'].value = params[1]
    par_gauss['mod_gauss_amplitude'].value = params[2]
    par_gauss['mod_gauss_center'].value = params[3]
    par_gauss['mod_gauss_sigma'].value = params[4]
    if not norm:
        flux = 1 - mod_gauss.eval(params=par_gauss, x=wave)
    else:
        flux = \
            (1 - mod_gauss.eval(params=par_gauss, x=wave)) * \
            mod_linear.eval(params=par_linear, x=wave)
    return flux
예제 #23
0
def fit(filename):

    kB_wn = 0.69503477  # cm-1/K

    data = np.loadtxt(filename)

    trans = transitions()
    trans = assign(data, trans)
    trans = sorted(trans, key=lambda t: t.E)

    xxx = [t.E for t in trans]
    y_exp = [t.Y / (t.A * t.g) for t in trans]
    y_exp = list(np.log(y_exp))

    model = LinearModel()
    pars = model.make_params(intercept=5.0, slope=-1.0 / (kB_wn * 300))

    out = model.fit(y_exp, pars, x=xxx)

    y_calc = out.best_fit
    T = -1 / (kB_wn * out.params['slope'])

    return xxx, y_exp, y_calc, T, trans
예제 #24
0
파일: fit.py 프로젝트: lnls-sol/py4syn-old
def fitGauss(xarray, yarray):
    """
    This function mix a Linear Model with a Gaussian Model (LMFit).

    See also: `Lmfit Documentation <http://cars9.uchicago.edu/software/python/lmfit/>`_

    Parameters
    ----------
    xarray : array
        X data
    yarray : array
        Y data

    Returns
    -------
    peak value: `float`
    peak position: `float`
    min value: `float`
    min position: `float`
    fwhm: `float`
    fwhm positon: `float`
    center of mass: `float`
    fit_Y: `array`
    fit_result: `ModelFit`


    Examples
    --------
    >>> import pylab as pl
    >>> data = 'testdata.txt'
    >>> X = pl.loadtxt(data);
    >>> x = X[:,0];
    >>> y = X[:,7];
    >>>
    >>> pkv, pkp, minv, minp, fwhm, fwhmp, com = fitGauss(x, y)
    >>> print("Peak ", pkv, " at ", pkp)
    >>> print("Min ", minv, " at ", minp)
    >>> print("Fwhm ", fwhm, " at ", fwhmp)
    >>> print("COM = ", com)
    >>>
    """
    from lmfit.models import GaussianModel, LinearModel

    y = yarray
    x = xarray

    gaussMod = GaussianModel()
    linMod = LinearModel()
    pars = linMod.make_params(intercept=y.min(), slope=0)
    pars += linMod.guess(y, x=x)

    pars += gaussMod.guess(y, x=x)

    mod = gaussMod + linMod

    fwhm = 0
    fwhm_position = 0

    try:
        result = mod.fit(y, pars, x=x)
        fwhm = result.values['fwhm']
        fwhm_position = result.values['center']
    except:

        result = None

    peak_position = xarray[np.argmax(y)]
    peak = np.max(y)

    minv_position = x[np.argmin(y)]
    minv = np.min(y)

    COM = (np.multiply(x, y).sum()) / y.sum()

    return (peak, peak_position, minv, minv_position, fwhm, fwhm_position, COM,
            result)
def get_wavelength_from_std_tth(x, y, d_spacings, ns, plot=False):
    """
    Return the wavelength from a two theta scan of a standard

    Parameters
    ----------
    x: ndarray
        the two theta coordinates
    y: ndarray
        the detector intensity
    d_spacings: ndarray
        the dspacings of the standard
    ns: ndarray
        the multiplicity of the reflection
    plot: bool
        If true plot some of the intermediate data
    Returns
    -------
    float:
        The average wavelength
    float:
        The standard deviation of the wavelength
    """
    l, r, c = find_peaks(y, sides=12)
    n_sym_peaks = len(c) // 2
    lmfit_centers = []
    for lidx, ridx, peak_center in zip(l, r, c):
        suby = y[lidx:ridx]
        subx = x[lidx:ridx]
        mod1 = VoigtModel()
        mod2 = LinearModel()
        pars1 = mod1.guess(suby, x=subx)
        pars2 = mod2.make_params(slope=0, intercept=0)
        mod = mod1 + mod2
        pars = pars1 + pars2
        out = mod.fit(suby, pars, x=subx)
        lmfit_centers.append(out.values['center'])
        if plot:
            plt.plot(subx, out.best_fit, '--')
            plt.plot(subx, suby - out.best_fit, '.')
    lmfit_centers = np.asarray(lmfit_centers)
    if plot:
        plt.plot(x, y, 'b')
        plt.plot(x[c], y[c], 'ro')
        plt.plot(x, np.zeros(x.shape), 'k.')
        plt.show()

    offset = []
    for i in range(0, n_sym_peaks):
        o = (np.abs(lmfit_centers[i]) -
             np.abs(lmfit_centers[2 * n_sym_peaks - i - 1])) / 2.
        # print(o)
        offset.append(o)
    print('predicted offset {}'.format(np.median(offset)))
    lmfit_centers += np.median(offset)
    print(lmfit_centers)
    wavelengths = []
    l_peaks = lmfit_centers[lmfit_centers < 0.]
    r_peaks = lmfit_centers[lmfit_centers > 0.]
    for peak_set in [r_peaks, l_peaks[::-1]]:
        for peak_center, d, n in zip(peak_set, d_spacings, ns):
            tth = np.deg2rad(np.abs(peak_center))
            wavelengths.append(lamda_from_bragg(tth, d, n))
    return np.average(wavelengths), np.std(wavelengths), np.median(offset)
예제 #26
0
def spectrum_calibration(channel_width, energy_list, data_2_calibrate):
    '''
    The while loop goes through and identifies the largest peak in the
    spectrum and it records the position of that peak. It then removes
    the peak by removing 10 channels from the right and left of the peak.
    The code will then search for the next largest position.
    '''

    i = 0
    channel_max_list = []
    gauss_x = []
    gauss_y = []
    fit_channel = []
    channel_std_list = []
    while i < len(energy_list):
        channel_max = np.argmax(data_2_calibrate)
        #channel_max_list.append(channel_max)
        data_left = channel_max - channel_width
        data_right = channel_max + channel_width
        '''
        Instead of deleting the items from the list. I am placing them to
        zero. The while loop iterates over the peak and sets it to zero.
        '''
        iterator = data_left
        while iterator < (data_right):
            gauss_x.append(iterator)
            gauss_y.append(data_2_calibrate[iterator])
            x = np.asarray(gauss_x)
            y = np.asarray(gauss_y)
            fit_channel.append(data_2_calibrate[iterator])
            data_2_calibrate[iterator] = 0
            iterator += 1
        i += 1
        '''
        information for plotting the Gaussian function.
        '''
        mod = GaussianModel(prefix='g1_')
        line_mod = LinearModel(prefix='line')
        pars = mod.guess(y, x=x)
        pars.update(line_mod.make_params(intercept=y.min(), slope=0))
        pars.update(mod.make_params())
        pars['g1_center'].set(gauss_x[np.argmax(gauss_y)], min=gauss_x[np.argmax(gauss_y)]\
        - 3)
        pars['g1_sigma'].set(3, min=0.25)
        pars['g1_amplitude'].set(max(gauss_y), min=max(gauss_y) - 10)
        mod = mod + line_mod
        out = mod.fit(y, pars, x=x)
        center = out.params['g1_center'].value
        center_std = out.params['g1_center'].stderr
        channel_max_list.append(center)
        channel_std_list.append(center_std)
        gauss_x = []
        gauss_y = []
        fit_channel = []
    '''
    sorting channel number so the correct channel number corresponds with
    the correct energy.
    '''
    channel_number = sorted(channel_max_list, key=int)
    channel_std = sorted(channel_std_list, key=int)
    energy = energy_list
    results = sm.OLS(energy, sm.add_constant(channel_number)).fit()

    slope, intercept = np.polyfit(channel_number, energy, 1)

    abline_values = [slope * i + intercept for i in channel_number]
    plt.figure()
    plt.errorbar(channel_number,
                 energy,
                 channel_std,
                 marker='o',
                 linestyle=None)
    plt.plot(channel_number, abline_values)
    plt.xlabel('Channel Number')
    plt.ylabel('Energy [keV]')
    plt.title('Best Fit Line $y=%3.7sx+%3.7s$' % (slope, intercept),
              fontsize=16)
    plt.savefig('../images/best_fit_line.png')
    return slope, intercept
for i, fname in enumerate(filenames):
    # Find temperature
    split_name = fname.split('_')
    for part in split_name:
        try:
            c_temp = int(part)
        except ValueError:
            pass
    invT[i] = 1000.0 / (c_temp + 273.15)
    yprime = pd.read_csv(fname,
                         header=0,
                         index_col='# f',
                         usecols=['# f', 'realY'])
    yprime = yprime * thickness / area
    ax1.loglog(yprime.index, yprime, '.', c=colors[i])
    sigmas[i] = np.log10(yprime[yprime.index < 50].median())
    ax1.axhline(10**sigmas[i], c='r')
    ax2.plot(invT[i], sigmas[i], 'o', c=colors[i])
    plt.savefig('test_{:02d}.png'.format(i), dpi=96)

m2 = LinearModel()
# Initialize parameters for linear model
p2 = m2.make_params()
# Fit Arrhenius
out2 = m2.fit(sigmas, p2, x=invT)
EA = out2.values['slope'] * 8.6173303e-5 * -1000 / np.log10(np.e)
ax2.plot(invT, out2.best_fit, 'r-', label=r'E$_A$ = {:.3f} eV'.format(EA))
ax2.legend()
plt.show()
plt.savefig('impedance_activation.png'.format(i), dpi=96)
def correlate_spectra(obs_flx, obs_wvl, ref_flx, ref_wvl, plot=None):

    # convert spectra sampling to logspace
    obs_flux_res_log, _ = spectra_logspace(obs_flx, obs_wvl)
    ref_flux_sub_log, wvl_log = spectra_logspace(ref_flx, ref_wvl)
    wvl_step = ref_wvl[1] - ref_wvl[0]

    # correlate the two spectra
    min_flux = 1.1
    ref_flux_sub_log[ref_flux_sub_log > min_flux] = 1.
    obs_flux_res_log[obs_flux_res_log > min_flux] = 1.
    corr_res = correlate(1. - ref_flux_sub_log,
                         1. - obs_flux_res_log,
                         mode='same',
                         method='fft')

    # create a correlation subset that will actually be analysed
    corr_w_size_wide = 135  # preform rough search of the local peak
    corr_w_size = 35  # narow down to the exact location of the CC peak
    corr_c_off = np.int64(len(corr_res) / 2.)
    corr_c_off += np.nanargmax(
        corr_res[corr_c_off - corr_w_size_wide:corr_c_off +
                 corr_w_size_wide]) - corr_w_size_wide
    corr_pos_min = corr_c_off - corr_w_size
    corr_pos_max = corr_c_off + corr_w_size

    # if plot is not None:
    #     plt.plot(corr_res, lw=1)
    #     plt.axvline(corr_pos_min, color='black')
    #     plt.axvline(corr_pos_max, color='black')
    #     plt.savefig(plot+'_1.png', dpi=300)
    #     plt.close()

    # print corr_pos_min, corr_pos_max
    corr_res_sub = corr_res[corr_pos_min:corr_pos_max]
    corr_res_sub -= np.median(corr_res_sub)
    corr_res_sub_x = np.arange(len(corr_res_sub))

    # analyze correlation function by fitting gaussian/voigt/lorentzian distribution to it
    peak_model = GaussianModel()
    additional_model = LinearModel()  # ConstantModel()
    parameters = additional_model.make_params(
        intercept=np.nanmin(corr_res_sub), slope=0)
    parameters += peak_model.guess(corr_res_sub, x=corr_res_sub_x)
    fit_model = peak_model + additional_model
    corr_fit_res = fit_model.fit(corr_res_sub, parameters, x=corr_res_sub_x)
    corr_center = corr_fit_res.params['center'].value
    corr_center_max = corr_res_sub_x[np.argmax(corr_res_sub)]

    # determine the actual shift
    idx_no_shift = np.int32(len(corr_res) / 2.)
    idx_center = corr_c_off - corr_w_size + corr_center
    idx_center_max = corr_c_off - corr_w_size + corr_center_max
    log_shift_px = idx_no_shift - idx_center
    log_shift_px_max = idx_no_shift - idx_center_max
    log_shift_wvl = log_shift_px * wvl_step

    wvl_log_new = wvl_log - log_shift_wvl
    rv_shifts = (wvl_log_new[1:] - wvl_log_new[:-1]
                 ) / wvl_log_new[:-1] * 299792.458 * log_shift_px
    rv_shifts_max = (wvl_log_new[1:] - wvl_log_new[:-1]
                     ) / wvl_log_new[:-1] * 299792.458 * log_shift_px_max
    rv_shifts_5 = (wvl_log_new[1:] -
                   wvl_log_new[:-1]) / wvl_log_new[:-1] * 299792.458 * 5

    if plot is not None:
        plt.plot(corr_res_sub, lw=1, color='C0')
        plt.axvline(corr_center_max, color='C0')
        plt.plot(corr_fit_res.best_fit, lw=1, color='C1')
        plt.axvline(corr_center, color='C1')
        plt.title(
            u'RV max: {:.2f}, RV fit: {:.2f}, $\Delta$RV per 5: {:.2f}, center wvl {:.1f}'
            .format(np.nanmedian(rv_shifts_max), np.nanmedian(rv_shifts),
                    np.nanmedian(rv_shifts_5), np.nanmean(obs_wvl)))
        plt.savefig(plot + '_2.png', dpi=200)
        plt.close()

    if log_shift_wvl < 5.:
        # return np.nanmedian(rv_shifts_max), np.nanmedian(ref_wvl)
        return np.nanmedian(rv_shifts), np.nanmedian(ref_wvl)
    else:
        # something went wrong
        return np.nan, np.nanmedian(ref_wvl)
예제 #29
0
                               amplitude=amplitude[1],
                               sigma=sigma[1])
    pars += L2_mod.make_params(center=center[2],
                               amplitude=amplitude[2],
                               sigma=sigma[2])
    pars += L3_mod.make_params(center=center[3],
                               amplitude=amplitude[3],
                               sigma=sigma[3])
    pars += L4_mod.make_params(center=center[4],
                               amplitude=amplitude[4],
                               sigma=sigma[4])
    pars += L5_mod.make_params(center=center[5],
                               amplitude=amplitude[5],
                               sigma=sigma[5])

    pars += c_mod.make_params(intercept=c, slope=slope)
    #pars+=c_mod.make_params(c=c)

    #Defino funcion
    mod = L0_mod + c_mod + L1_mod + L2_mod + L3_mod + L4_mod + L5_mod  #+L3_mod
    #Fiteo
    out = mod.fit(y, pars, x=x)

    #Imprimo el archivo fit log
    fit_log.write("\n\n\n\n\n\n Fitted from: %s %s \n" %
                  ("./19-04-26/", filename))
    now = datetime.datetime.now()
    fit_log.write(
        "At: %i/%i/%i, %i:%i:%i \n" %
        (now.day, now.month, now.year, now.hour, now.minute, now.second))
    fit_log.write(out.fit_report(min_correl=0.2))
예제 #30
0
# <examples/doc_builtinmodels_stepmodel.py>
import matplotlib.pyplot as plt
import numpy as np

from lmfit.models import LinearModel, StepModel

x = np.linspace(0, 10, 201)
y = np.ones_like(x)
y[:48] = 0.0
y[48:77] = np.arange(77-48)/(77.0-48)
np.random.seed(0)
y = 110.2 * (y + 9e-3*np.random.randn(len(x))) + 12.0 + 2.22*x

step_mod = StepModel(form='erf', prefix='step_')
line_mod = LinearModel(prefix='line_')

pars = line_mod.make_params(intercept=y.min(), slope=0)
pars += step_mod.guess(y, x=x, center=2.5)

mod = step_mod + line_mod
out = mod.fit(y, pars, x=x)

print(out.fit_report())

plt.plot(x, y, 'b')
plt.plot(x, out.init_fit, 'k--')
plt.plot(x, out.best_fit, 'r-')
plt.show()
# <end examples/doc_builtinmodels_stepmodel.py>
    def cHbeta_from_log(self, line_df, line_labels='all', temp=10000.0, den=100.0, ref_wave='H1_4861A',
                        comp_mode='auto', plot_address=False):

        # Use all hydrogen lines if none are defined
        if line_labels == 'all':
            idcs_H1 = line_df.ion == 'H1'
            line_labels = line_df.loc[idcs_H1].index.values
        assert line_labels.size > 0, f'- ERROR: No H1 ion transition lines were found in log. Check dataframe data.'

        # Loop through the input lines
        assert ref_wave in line_df.index, f'- ERROR: {ref_wave} not found in input lines log dataframe for c(Hbeta) calculation'

        # Label the lines which are found in the lines log
        idcs_lines = line_df.index.isin(line_labels) & (line_df.intg_flux > 0) & (line_df.gauss_flux > 0)
        line_labels = line_df.loc[idcs_lines].index.values
        ion_ref, waves_ref, latexLabels_ref = label_decomposition(ref_wave, scalar_output=True)
        ion_array, waves_array, latexLabels_array = label_decomposition(line_labels)

        # Observed ratios
        if comp_mode == 'auto':
            Href_flux, Href_err = line_df.loc[ref_wave, 'intg_flux'], line_df.loc[ref_wave, 'intg_err']
            obsFlux, obsErr = np.empty(line_labels.size), np.empty(line_labels.size)
            slice_df = line_df.loc[idcs_lines]
            idcs_intg = slice_df.blended_label == 'None'
            obsFlux[idcs_intg] = slice_df.loc[idcs_intg, 'intg_flux'].values
            obsErr[idcs_intg] = slice_df.loc[idcs_intg, 'intg_err'].values
            obsFlux[~idcs_intg] = slice_df.loc[~idcs_intg, 'gauss_flux'].values
            obsErr[~idcs_intg] = slice_df.loc[~idcs_intg, 'gauss_err'].values
            obsRatio_uarray = unumpy.uarray(obsFlux, obsErr) / ufloat(Href_flux, Href_err) # TODO unumpy this with your own model

        elif comp_mode == 'gauss':
            Href_flux, Href_err = line_df.loc[ref_wave, 'gauss_flux'], line_df.loc[ref_wave, 'gauss_err']
            obsFlux, obsErr = line_df.loc[idcs_lines, 'gauss_flux'], line_df.loc[idcs_lines, 'gauss_err']
            obsRatio_uarray = unumpy.uarray(obsFlux, obsErr) / ufloat(Href_flux, Href_err)

        else:
            Href_flux, Href_err = line_df.loc[ref_wave, 'intg_flux'], line_df.loc[ref_wave, 'intg_err']
            obsFlux, obsErr = line_df.loc[idcs_lines, 'intg_flux'], line_df.loc[idcs_lines, 'intg_err']
            obsRatio_uarray = unumpy.uarray(obsFlux, obsErr) / ufloat(Href_flux, Href_err)

        assert not np.any(np.isnan(obsFlux)) in obsFlux, '- ERROR: nan entry in input fluxes for c(Hbeta) calculation'
        assert not np.any(np.isnan(obsErr)) in obsErr, '- ERROR: nan entry in input uncertainties for c(Hbeta) calculation'

        # Theoretical ratios
        H1 = pn.RecAtom('H', 1)
        refEmis = H1.getEmissivity(tem=temp, den=den, wave=waves_ref)
        emisIterable = (H1.getEmissivity(tem=temp, den=den, wave=wave) for wave in waves_array)
        linesEmis = np.fromiter(emisIterable, float)
        theoRatios = linesEmis / refEmis

        # Reddening law
        rc = pn.RedCorr(R_V=self.R_v, law=self.red_curve)
        Xx_ref, Xx = rc.X(waves_ref), rc.X(waves_array)
        f_lines = Xx / Xx_ref - 1
        f_ref = Xx_ref / Xx_ref - 1

        # cHbeta linear fit values
        x_values = f_lines - f_ref
        y_values = np.log10(theoRatios) - unumpy.log10(obsRatio_uarray)

        # Perform fit
        lineModel = LinearModel()
        y_nom, y_std = unumpy.nominal_values(y_values), unumpy.std_devs(y_values)
        pars = lineModel.make_params(intercept=y_nom.min(), slope=0)
        output = lineModel.fit(y_nom, pars, x=x_values, weights=1 / np.sqrt(y_std))
        cHbeta, cHbeta_err = output.params['slope'].value, output.params['slope'].stderr
        intercept, intercept_err = output.params['intercept'].value, output.params['intercept'].stderr

        if plot_address:

            STANDARD_PLOT = {'figure.figsize': (14, 7), 'axes.titlesize': 12, 'axes.labelsize': 14,
                             'legend.fontsize': 10, 'xtick.labelsize': 10, 'ytick.labelsize': 10}

            axes_dict = {'xlabel': r'$f_{\lambda} - f_{H\beta}$',
                         'ylabel': r'$ \left(\frac{I_{\lambda}}{I_{\H\beta}}\right)_{Theo} - \left(\frac{F_{\lambda}}{F_{\H\beta}}\right)_{Obs}$',
                         'title': f'Logaritmic extinction coefficient calculation'}

            rcParams.update(STANDARD_PLOT)

            fig, ax = plt.subplots(figsize=(8, 4))
            fig.subplots_adjust(bottom=-0.7)

            # Data ratios
            err_points = ax.errorbar(x_values, y_nom, y_std, fmt='o')

            # Linear fitting
            linear_fit = cHbeta * x_values + intercept
            linear_label = r'$c(H\beta)={:.2f}\pm{:.2f}$'.format(cHbeta, cHbeta_err)
            ax.plot(x_values, linear_fit, linestyle='--', label=linear_label)
            ax.update(axes_dict)

            # Legend
            ax.legend(loc='best')
            ax.set_ylim(-0.5, 0.5)

            # Generate plot
            plt.tight_layout()
            if isinstance(plot_address, (str, pathlib.WindowsPath, pathlib.PosixPath)):
                # crs = mplcursors.cursor(ax, hover=True)
                # crs.connect("add", lambda sel: sel.annotation.set_text(sel.annotation))
                plt.savefig(plot_address, dpi=200, bbox_inches='tight')
            else:
                mplcursors.cursor(ax).connect("add", lambda sel: sel.annotation.set_text(latexLabels_array[sel.target.index]))
                plt.show()

        return cHbeta, cHbeta_err
# <examples/doc_builtinmodels_stepmodel.py>
import matplotlib.pyplot as plt
import numpy as np

from lmfit.models import LinearModel, StepModel

x = np.linspace(0, 10, 201)
y = np.ones_like(x)
y[:48] = 0.0
y[48:77] = np.arange(77-48)/(77.0-48)
np.random.seed(0)
y = 110.2 * (y + 9e-3*np.random.randn(x.size)) + 12.0 + 2.22*x

step_mod = StepModel(form='erf', prefix='step_')
line_mod = LinearModel(prefix='line_')

pars = line_mod.make_params(intercept=y.min(), slope=0)
pars += step_mod.guess(y, x=x, center=2.5)

mod = step_mod + line_mod
out = mod.fit(y, pars, x=x)

print(out.fit_report())

plt.plot(x, y, 'b')
plt.plot(x, out.init_fit, 'k--', label='initial fit')
plt.plot(x, out.best_fit, 'r-', label='best fit')
plt.legend(loc='best')
plt.show()
# <end examples/doc_builtinmodels_stepmodel.py>
예제 #33
0
def correlate_spectra(obs_flx, obs_wvl, ref_flx, ref_wvl,
                      plot=False, plot_path='plot.png', cont_value=1.):
    """

    :param obs_flx:
    :param obs_wvl:
    :param ref_flx:
    :param ref_wvl:
    :param plot:
    :param plot_path:
    :param cont_value:
    :return:
    """
    # convert spectra sampling to logspace
    obs_flux_res_log, _ = spectra_logspace(obs_flx, obs_wvl)
    ref_flux_sub_log, wvl_log = spectra_logspace(ref_flx, ref_wvl)
    wvl_step = ref_wvl[1] - ref_wvl[0]

    # fill missing values
    obs_flux_res_log[~np.isfinite(obs_flux_res_log)] = cont_value
    ref_flux_sub_log[~np.isfinite(ref_flux_sub_log)] = cont_value

    # correlate the two spectra
    # min_flux = 0.95
    # set near continuum spectral wiggles to the continuum level
    noRV_flux_mask = np.std(ref_flux_sub_log - cont_value)
    # print('No RV flux level:', noRV_flux_mask)
    ref_flux_sub_log[np.abs(ref_flux_sub_log - cont_value) < noRV_flux_mask] = cont_value
    # obs_flux_res_log[np.abs(obs_flux_res_log - cont_value) < noRV_flux_mask] = cont_value
    corr_res = correlate(cont_value - ref_flux_sub_log, cont_value - obs_flux_res_log,
                         mode='same', method='fft')
    # normalize correlation by the number of involved wavelength bins
    # corr_res /= len(corr_res)

    # create a correlation subset that will actually be analysed
    corr_w_size_wide = 130  # preform rough search of the local peak
    corr_w_size = 60  # narrow down to the exact location of the CC peak
    corr_c_off = np.int64(len(corr_res) / 2.)
    corr_c_off += np.nanargmax(corr_res[corr_c_off - corr_w_size_wide: corr_c_off + corr_w_size_wide]) - corr_w_size_wide
    corr_pos_min = corr_c_off - corr_w_size
    corr_pos_max = corr_c_off + corr_w_size

    if plot:
        w_multi = 6.
        x_corr_res = np.arange(len(corr_res))
        idx = (x_corr_res - corr_c_off) < w_multi*corr_w_size_wide
        plt.plot(x_corr_res[idx], corr_res[idx], lw=1)
        plt.axvline(corr_pos_min, color='black')
        plt.axvline(corr_pos_max, color='black')
        plt.xlim(corr_c_off - w_multi*corr_w_size_wide, corr_c_off + w_multi*corr_w_size_wide)
        plt.ylim(np.min(corr_res[idx])-1, np.max(corr_res[idx])+1)
        plt.tight_layout()
        plt.savefig(plot_path[:-4] + '_corr1.png', dpi=200)
        plt.close()

    # print corr_pos_min, corr_pos_max
    corr_res_sub = corr_res[corr_pos_min:corr_pos_max]
    corr_res_sub -= np.median(corr_res_sub)
    corr_res_sub_x = np.arange(len(corr_res_sub))

    # analyze correlation function by fitting gaussian/voigt/lorentzian distribution to it
    peak_model = GaussianModel()
    additional_model = LinearModel()  # ConstantModel()
    parameters = additional_model.make_params(intercept=np.nanmin(corr_res_sub), slope=0)
    parameters += peak_model.guess(corr_res_sub, x=corr_res_sub_x)
    fit_model = peak_model + additional_model
    corr_fit_res = fit_model.fit(corr_res_sub, parameters, x=corr_res_sub_x)
    corr_center = corr_fit_res.params['center'].value
    corr_center_max = corr_res_sub_x[np.argmax(corr_res_sub)]

    # determine the actual shift
    idx_no_shift = np.int32(len(corr_res) / 2.)
    idx_center = corr_c_off - corr_w_size + corr_center
    idx_center_max = corr_c_off - corr_w_size + corr_center_max
    log_shift_px = idx_no_shift - idx_center
    log_shift_px_max = idx_no_shift - idx_center_max
    log_shift_wvl = log_shift_px * wvl_step

    wvl_log_new = wvl_log - log_shift_wvl
    rv_shifts = (wvl_log_new[1:] - wvl_log_new[:-1]) / wvl_log_new[:-1] * c_val * log_shift_px
    rv_shifts_max = (wvl_log_new[1:] - wvl_log_new[:-1]) / wvl_log_new[:-1] * c_val * log_shift_px_max
    rv_shifts_5 = (wvl_log_new[1:] - wvl_log_new[:-1]) / wvl_log_new[:-1] * c_val * 5

    if plot:
        plt.plot(corr_res_sub, lw=1, color='C0')
        plt.axvline(corr_center_max, color='C0', label='max', ls='--')
        plt.plot(corr_fit_res.best_fit, lw=1, color='C1')
        plt.axvline(corr_center, color='C1', label='fit', ls='--')
        plt.legend()
        plt.title(u'RV max: {:.2f}, RV fit: {:.2f}, $\Delta$RV per 5: {:.2f}, center wvl {:.1f} \n chi2: {:.1f}, sigma: {:.1f}, amplitude: {:.1f}, slope: {:.2f}, $\Delta$y slope: {:.1f}'.format(np.nanmedian(rv_shifts_max), np.nanmedian(rv_shifts), np.nanmedian(rv_shifts_5), np.nanmean(obs_wvl), corr_fit_res.chisqr, corr_fit_res.params['sigma'].value, corr_fit_res.params['amplitude'].value, corr_fit_res.params['slope'].value, corr_fit_res.params['slope'].value*corr_w_size*2))
        plt.tight_layout()
        plt.savefig(plot_path[:-4] + '_corr2.png', dpi=200)
        plt.close()

    if log_shift_wvl < 5.:
        # return np.nanmedian(rv_shifts_max)
        return np.nanmedian(rv_shifts), np.nanmedian(ref_wvl), corr_fit_res.chisqr
    else:
        # something went wrong
        print('    Large wvl shift detected.')
        return np.nan, np.nanmedian(ref_wvl), np.nan
def spectrum_calibration(channel_width, energy_list, data_2_calibrate):
    import numpy as np
    import matplotlib.pyplot as plt
    #from scipy.optimize import curve_fit
    #from modelling import gauss
    import statsmodels.api as sm
    from lmfit.models import GaussianModel
    from lmfit.models import LinearModel
    '''
    The while loop goes through and identifies the largest peak in the
    spectrum and it records the position of that peak. It then removes
    the peak by removing 10 channels from the right and left of the peak.
    The code will then search for the next largest position.
    '''

    i = 0
    channel_max_list = []
    gauss_x = []
    gauss_y = []
    fit_channel = []
    while i < len(energy_list):
        channel_max = np.argmax(data_2_calibrate)
        data_left = channel_max - channel_width
        data_right = channel_max + channel_width
        channel_max_list.append(channel_max)
        iterator = data_left
        while iterator < (data_right):
            gauss_x.append(iterator)
            gauss_y.append(data_2_calibrate[iterator])
            x = np.asarray(gauss_x)
            y = np.asarray(gauss_y)
            fit_channel.append(data_2_calibrate[iterator])
            data_2_calibrate[iterator] = 0
            iterator += 1
        i += 1
        mod = GaussianModel(prefix='g1_')
        line_mod = LinearModel(prefix='line')
        pars = mod.guess(y, x=x)
        pars.update(line_mod.make_params(intercept=y.min(), slope=0))
        pars.update(mod.make_params())
        pars['g1_center'].set(gauss_x[np.argmax(gauss_y)], min=gauss_x[np.argmax(gauss_y)]\
        - 3)
        pars['g1_sigma'].set(3, min=0.25)
        pars['g1_amplitude'].set(max(gauss_y), min=max(gauss_y) - 10)
        mod = mod + line_mod
        out = mod.fit(y, pars, x=x)
        gauss_x = []
        gauss_y = []
        fit_channel = []
        #print(out.fit_report(min_correl=10))
        #for key in out.params:
        #    print(key, "=", out.params[key].value, "+/-", out.params[key].stderr)
    '''
    sorting channel number so the correct channel number corresponds with
    the correct energy.
    '''
    channel_number = sorted(channel_max_list, key=int)
    energy = energy_list
    results = sm.OLS(energy, sm.add_constant(channel_number)).fit()

    slope, intercept = np.polyfit(channel_number, energy, 1)

    abline_values = [slope * i + intercept for i in channel_number]
    #plt.plot(channel_number,energy, 'ro')
    #plt.plot(channel_number, abline_values, 'b')
    #plt.xlabel('Channel Number')
    #plt.ylabel('Energy [keV]')
    #plt.title('Best Fit Line')
    return slope, intercept
예제 #35
0
def fitModel(x, y, t1, t2, t3, t4, t5, t6, n, c1, c2, c3, c4, c5, c6, chck1,
             chck2, chck3):
    fitType1 = t1
    fitType2 = t2
    fitType3 = t3
    fitType4 = t4
    fitType5 = t5
    fitType6 = t6
    numPk = n
    cen = (c1, c2, c3, c4, c5, c6)
    fitstats = chck1
    eqWidth = chck2
    eqBeta = chck3

    Lin1 = LinearModel(prefix='BackG_')
    pars = Lin1.make_params()
    pars['BackG_slope'].set(0)  #, min=-0.001, max=0.001)
    pars['BackG_intercept'].set(2e7, min=0)

    if fitType1 == 'Lorentzian':
        pk1 = LorentzianModel(prefix='Peak1_')
    elif fitType1 == 'Gaussian':
        pk1 = GaussianModel(prefix='Peak1_')
    elif fitType1 == 'PseudoVoigt':
        pk1 = PseudoVoigtModel(prefix='Peak1_')
    elif fitType1 == 'GND':
        pk1 = gndModel(prefix='Peak1_')

    if fitType2 == 'Lorentzian':
        pk2 = LorentzianModel(prefix='Peak2_')
    elif fitType2 == 'Gaussian':
        pk2 = GaussianModel(prefix='Peak2_')
    elif fitType2 == 'PseudoVoigt':
        pk2 = PseudoVoigtModel(prefix='Peak2_')
    elif fitType2 == 'GND':
        pk2 = gndModel(prefix='Peak2_')

    if fitType3 == 'Lorentzian':
        pk3 = LorentzianModel(prefix='Peak3_')
    elif fitType3 == 'Gaussian':
        pk3 = GaussianModel(prefix='Peak3_')
    elif fitType3 == 'PseudoVoigt':
        pk3 = PseudoVoigtModel(prefix='Peak3_')
    elif fitType3 == 'GND':
        pk3 = gndModel(prefix='Peak3_')

    if fitType4 == 'Lorentzian':
        pk4 = LorentzianModel(prefix='Peak4_')
    elif fitType4 == 'Gaussian':
        pk4 = GaussianModel(prefix='Peak4_')
    elif fitType4 == 'PseudoVoigt':
        pk4 = PseudoVoigtModel(prefix='Peak4_')
    elif fitType4 == 'GND':
        pk4 = gndModel(prefix='Peak4_')

    if fitType5 == 'Lorentzian':
        pk5 = LorentzianModel(prefix='Peak5_')
    elif fitType5 == 'Gaussian':
        pk5 = GaussianModel(prefix='Peak5_')
    elif fitType5 == 'PseudoVoigt':
        pk5 = PseudoVoigtModel(prefix='Peak5_')
    elif fitType5 == 'GND':
        pk5 = gndModel(prefix='Peak5_')

    if fitType6 == 'Lorentzian':
        pk6 = LorentzianModel(prefix='Peak6_')
    elif fitType6 == 'Gaussian':
        pk6 = GaussianModel(prefix='Peak6_')
    elif fitType6 == 'PseudoVoigt':
        pk6 = PseudoVoigtModel(prefix='Peak6_')
    elif fitType6 == 'GND':
        pk6 = gndModel(prefix='Peak6_')

    pars.update(pk1.make_params())
    pars['Peak1_center'].set(cen[0], min=cen[0] - 10, max=cen[0] + 10)
    pars['Peak1_sigma'].set(20, min=0.01, max=50)
    pars['Peak1_amplitude'].set(1e7, min=0)
    if fitType1 == 'GND':
        pars['Peak1_beta'].set(1.5, min=1, max=2)

    if numPk == 2 or numPk == 3 or numPk == 4 or numPk == 5 or numPk == 6:
        pars.update(pk2.make_params())
        pars['Peak2_center'].set(cen[1], min=cen[1] - 10, max=cen[1] + 10)
        pars['Peak2_amplitude'].set(1e7, min=0)
        if eqWidth == 1:
            pars['Peak2_sigma'].set(expr='Peak1_sigma')
        elif eqWidth == 0:
            pars['Peak2_sigma'].set(30, min=0.01, max=50)
        if fitType2 == 'GND':
            if eqBeta == 1:
                pars['Peak2_beta'].set(expr='Peak1_beta')
            elif eqBeta == 0:
                pars['Peak2_beta'].set(1.5, min=1, max=2)

    if numPk == 3 or numPk == 4 or numPk == 5 or numPk == 6:
        pars.update(pk3.make_params())
        pars['Peak3_center'].set(cen[2], min=cen[2] - 10, max=cen[2] + 10)
        pars['Peak3_amplitude'].set(1e7, min=0)
        if eqWidth == 1:
            pars['Peak3_sigma'].set(expr='Peak1_sigma')
        elif eqWidth == 0:
            pars['Peak3_sigma'].set(30, min=0.01, max=50)
        if fitType2 == 'GND':
            if eqBeta == 1:
                pars['Peak3_beta'].set(expr='Peak1_beta')
            elif eqBeta == 0:
                pars['Peak3_beta'].set(1.5, min=1, max=2)

    if numPk == 4 or numPk == 5 or numPk == 6:
        pars.update(pk4.make_params())
        pars['Peak4_center'].set(cen[3], min=cen[3] - 10, max=cen[3] + 10)
        pars['Peak4_sigma'].set(15, min=0.01, max=50)
        pars['Peak4_amplitude'].set(1e7, min=0)
        if fitType4 == 'GND':
            pars['Peak4_beta'].set(1.5, min=1, max=2)

    if numPk == 5 or numPk == 6:
        pars.update(pk5.make_params())
        pars['Peak5_center'].set(cen[4], min=cen[4] - 10, max=cen[4] + 10)
        pars['Peak5_sigma'].set(15, min=0.01, max=50)
        pars['Peak5_amplitude'].set(1e7, min=0)
        if fitType5 == 'GND':
            pars['Peak5_beta'].set(1.5, min=1, max=2)

    if numPk == 6:
        pars.update(pk6.make_params())
        pars['Peak6_center'].set(cen[5], min=cen[5] - 10, max=cen[5] + 10)
        pars['Peak6_sigma'].set(15, min=0.01, max=50)
        pars['Peak6_amplitude'].set(1e7, min=0)
        if fitType6 == 'GND':
            pars['Peak6_beta'].set(1.5, min=1, max=2)

    #model definition
    pkModel = Lin1

    if numPk == 2:
        pkModel += pk1 + pk2
    elif numPk == 3:
        pkModel += pk1 + pk2 + pk3
    elif numPk == 4:
        pkModel += pk1 + pk2 + pk3 + pk4
    elif numPk == 5:
        pkModel += pk1 + pk2 + pk3 + pk4 + pk5
    elif numPk == 6:
        pkModel += pk1 + pk2 + pk3 + pk4 + pk5 + pk6

    out = pkModel.fit(y, pars, x=x, weights=1.0 / y)

    if fitstats == 1:
        print('\n', out.fit_report(show_correl=False))

    plt.figure(dpi=150, figsize=(3.5, 2.8))
    lwid = 2
    #plt.title('Radial Intensity distribution',fontsize=16)
    plt.plot(x, y, label='data', lw=2)
    plt.plot(x, out.best_fit, 'r-', lw=lwid, label='fit')
    plt.xlabel('Angle (\xb0)', fontsize=16)
    plt.ylabel('Intensity (a.u.)', fontsize=16)
    plt.xticks([0, 90, 180, 270, 360], fontsize=14)
    plt.locator_params('y', nbins=6)
    plt.ticklabel_format(axis='y', style='sci', scilimits=(0, 0))
    plt.yticks(fontsize=14)
    plt.tight_layout()
    plt.minorticks_on()
    #plt.legend(fontsize=10)

    plot_components = True
    if plot_components:
        comps = out.eval_components(x=x)
        plt.plot(x, comps['Peak1_'] + comps['BackG_'], 'c--', lw=lwid)
        plt.plot(x, comps['Peak2_'] + comps['BackG_'], 'b--', lw=lwid)
        if numPk == 3:
            plt.plot(x, comps['Peak3_'] + comps['BackG_'], 'y--', lw=lwid)
        if numPk == 4:
            plt.plot(x, comps['Peak3_'] + comps['BackG_'], 'y--', lw=lwid)
            plt.plot(x, comps['Peak4_'] + comps['BackG_'], 'g--', lw=lwid)
        if numPk == 5:
            plt.plot(x, comps['Peak3_'] + comps['BackG_'], 'y--', lw=lwid)
            plt.plot(x, comps['Peak4_'] + comps['BackG_'], 'g--', lw=lwid)
            plt.plot(x, comps['Peak5_'] + comps['BackG_'], 'm--', lw=lwid)
        if numPk == 6:
            plt.plot(x, comps['Peak3_'] + comps['BackG_'], 'y--', lw=lwid)
            plt.plot(x, comps['Peak4_'] + comps['BackG_'], 'g--', lw=lwid)
            plt.plot(x, comps['Peak5_'] + comps['BackG_'], 'm--', lw=lwid)
            plt.plot(x, comps['Peak6_'] + comps['BackG_'], 'k--', lw=lwid)
        plt.plot(x, comps['BackG_'], 'r--', lw=lwid)
        #plt.title('Radial Intensity Distribution', fontsize=14)
        #plt.xlabel('Angle (\xb0)', fontsize=28)
        #plt.ylabel('Intensity (a.u.)', fontsize=28)
    #plt.show()

    yBestFit = out.best_fit - comps['BackG_']

    return out, yBestFit
def compute_cHbeta(line_df,
                   reddening_curve,
                   R_v,
                   temp=10000.0,
                   den=100.0,
                   ref_wave='H1_4861A',
                   compMode='auto'):

    assert ref_wave in line_df.index, f'- ERROR: Reference line {ref_wave} is not in input dataframe index'

    # Create hydrogen recombination atom for emissivities calculation
    H1 = pn.RecAtom('H', 1)

    # Use all the lines from the input data frame
    line_labels = line_df.index.values
    ion_ref, waves_ref, latexLabels_ref = label_decomposition(
        ref_wave, scalar_output=True)
    ion_array, waves_array, latexLabels_array = label_decomposition(
        line_labels)

    # Mode 1: Distinguish between single (intg_flux) and  blended (gauss_flux) lines
    if compMode == 'auto':
        Href_flux, Href_err = line_df.loc[ref_wave,
                                          'intg_flux'], line_df.loc[ref_wave,
                                                                    'intg_err']

        obsFlux, obsErr = np.empty(line_labels.size), np.empty(
            line_labels.size)
        idcs_intg = (line_df.blended == 'None')

        obsFlux[idcs_intg], obsErr[idcs_intg] = line_df.loc[
            idcs_intg, ['intg_flux', 'intg_err']].values
        obsFlux[~idcs_intg], obsErr[~idcs_intg] = line_df.loc[
            ~idcs_intg, ['gauss_flux', 'gauss_err']].values

    # Mode 2: Use always the gaussian flux
    elif compMode == 'gauss':
        Href_flux, Href_err = line_df.loc[ref_wave, 'gauss_flux'], line_df.loc[
            ref_wave, 'gauss_err']
        obsFlux, obsErr = line_df['gauss_flux'].values, line_df[
            'gauss_err'].values

    # Ratio propagating the uncertainty between the lines
    obsFlux_norm = obsFlux / Href_flux
    obsErr_norm = obsFlux_norm * np.sqrt(
        np.square(obsErr / obsFlux) + np.square(Href_err / Href_flux))

    assert not np.any(
        np.isnan(obsFlux)
    ) in obsFlux, '- ERROR: nan entry in input fluxes for c(Hbeta) calculation'
    assert not np.any(
        np.isnan(obsErr)
    ) in obsErr, '- ERROR: nan entry in input uncertainties for c(Hbeta) calculation'

    # Theoretical ratios
    refEmis = H1.getEmissivity(tem=temp, den=den, wave=waves_ref)
    emisIterable = (H1.getEmissivity(tem=temp, den=den, wave=wave)
                    for wave in waves_array)
    linesEmis = np.fromiter(emisIterable, float)
    theoRatios = linesEmis / refEmis

    # Reddening law
    rc = pn.RedCorr(R_V=R_v, law=reddening_curve)
    Xx_ref, Xx = rc.X(waves_ref), rc.X(waves_array)
    f_lines = Xx / Xx_ref - 1
    f_ref = Xx_ref / Xx_ref - 1

    # cHbeta slope fit axes
    x_fred = f_lines - f_ref
    y_flux = np.log10(theoRatios) - np.log10(obsFlux_norm)
    y_err = (obsErr_norm / obsFlux_norm) * (1.0 / np.log(10))

    # Perform fit
    lineModel = LinearModel()
    pars = lineModel.make_params(intercept=y_flux.min(), slope=0)
    output = lineModel.fit(y_flux, pars, x=x_fred, weights=1 / np.sqrt(y_err))
    cHbeta, cHbeta_err = output.params['slope'].value, output.params[
        'slope'].stderr
    intercept, intercept_err = output.params['intercept'].value, output.params[
        'intercept'].stderr

    # Store the results
    output_dict = dict(cHbeta=cHbeta,
                       cHbeta_err=cHbeta_err,
                       intercept=intercept,
                       intercept_err=intercept_err,
                       obsRecomb=obsFlux_norm,
                       obsRecombErr=obsErr_norm,
                       y=y_flux,
                       y_err=y_err,
                       x=x_fred,
                       line_labels=latexLabels_array,
                       ref_line=latexLabels_ref)

    return output_dict