예제 #1
0
def find_fit_sigmoid(x, y):
    model_gompertz = lm.models.Model(gompertz)
    params_gompertz = lm.Parameters()
    params_gompertz.add('asymptote', value=1E-3, min=1E-8)
    params_gompertz.add('displacement', value=1E-3, min=1E-8)
    params_gompertz.add('step_center', value=1E-3, min=1E-8)

    result_gompertz = model_gompertz.fit(y, params_gompertz, x=x)

    step_mod = StepModel(form='erf', prefix='step_')
    line_mod = LinearModel(prefix='line_')

    params_stln = line_mod.make_params(intercept=y.min(), slope=0)
    params_stln += step_mod.guess(y, x=x, center=90)

    model_stln = step_mod + line_mod
    result_stln = model_stln.fit(y, params_stln, x=x)

    ret_result = None
    ret_model = None

    if result_stln.chisqr < result_gompertz.chisqr:
        ret_result = result_stln
        ret_model = model_stln
    else:
        ret_result = result_gompertz
        ret_model = model_gompertz

    return ret_result, ret_model
예제 #2
0
def correct_data(xData, yData, startIdx=0, endIdx=-1):
    y = yData
    # start x at zero
    x = xData - xData[0]

    # linear model
    lr = LinearModel()
    params = lr.make_params()

    # fit model
    xSelection = x[startIdx:endIdx]
    ySelection = y[startIdx:endIdx]
    outParams = lr.fit(ySelection, params, x=xSelection)

    # construct corrected data
    linear_trend = x * outParams.best_values['slope'] + outParams.best_values[
        'intercept']
    yCorrected = y - linear_trend + y[0]

    result = {
        "correctedData":
        json.loads(
            pd.DataFrame({
                "x": xData.tolist(),
                "y": yCorrected.tolist()
            }).to_json(orient='records'))
    }
    return result
예제 #3
0
def make_model(peak_positions,
               fwhm=0.05,
               max_fwhm=0.5,
               pos_range=0.5,
               amplitude=1000.):
    n_peaks = len(peak_positions)
    pars = Parameters()

    bg = LinearModel(prefix='bg_')
    pars.update(bg.make_params(slope=0, intercept=0))

    mod = bg
    #pars['bg_intercept'].set(vary=True)
    #pars['bg_slope'].set(vary=True)

    for i in range(n_peaks):
        prefix = 'pk{}_'.format(i)
        peak = PseudoVoigtModel(prefix=prefix)
        # Set this zero
        pars.update(peak.make_params())
        pars[prefix + 'center'].set(peak_positions[i],
                                    min=peak_positions[i] - pos_range,
                                    max=peak_positions[i] + pos_range,
                                    vary=True)
        pars[prefix + 'sigma'].set(fwhm, min=0., max=max_fwhm, vary=True)
        pars[prefix + 'amplitude'].set(amplitude, min=0., vary=True)
        pars[prefix + 'fraction'].set(0.0, min=0., max=1., vary=True)
        mod += peak
    return mod, pars
예제 #4
0
def line_fit(x, y, errors=True):
    """
    Simple helper function that speeds up line fitting. Uses lmfit
    
    Parameters
    ----------
    x, y (float)
        Sample length x, y arrays of data to be fitted

    Returns
    -------
    Returns slope and intercept, with uncertainties (use uncertainties package if availabe)
    Also returns fit object out, can be dropped
    """
    from lmfit.models import LinearModel
    mod = LinearModel()
    par = mod.guess(y, x=x)
    out = mod.fit(y, par, x=x)

    s = out.params['slope']
    i = out.params['intercept']

    if errors:
        try:
            from uncertainties import ufloat
            return ufloat(s.value, s.stderr), ufloat(i.value, i.stderr), out
        except:
            return s.value, s.stderr, i.value, i.stderr, out
    else:
        return s.value, i.value, out
예제 #5
0
def line_fit(x, y, errors=True):
    """
    Simple helper function that speeds up line fitting. Uses lmfit
    
    Parameters
    ----------
    x, y (float)
        Sample length x, y arrays of data to be fitted

    Returns
    -------
    Returns slope and intercept, with uncertainties (use uncertainties package if availabe)
    Also returns fit object out, can be dropped
    """
    from lmfit.models import LinearModel

    mod = LinearModel()
    par = mod.guess(y, x=x)
    out = mod.fit(y, par, x=x)

    s = out.params["slope"]
    i = out.params["intercept"]

    if errors:
        try:
            from uncertainties import ufloat

            return ufloat(s.value, s.stderr), ufloat(i.value, i.stderr), out
        except:
            return s.value, s.stderr, i.value, i.stderr, out
    else:
        return s.value, i.value, out
예제 #6
0
    def guess(self, data, x=None, **kwargs):
        lm = LinearModel()
        y_slope = lm.eval(x=x, params=lm.guess(np.abs(data), x=x))
        center, hwhm, height = guess_peak(np.abs(np.abs(data) - y_slope), x=x)

        pars = self.make_params(Ms=height, B_res=center, dB=hwhm)
        return pars
예제 #7
0
def find_peaks(filename,
               show_plots=False):  #subtract background and find peaks
    num_samp_left = 200
    num_samp_right = 200

    data = np.genfromtxt(filename)
    x = data[:, 0]
    y = data[:, 1]
    x_bg = np.hstack([x[:num_samp_left], x[-num_samp_right:]])
    y_bg = np.hstack([y[:num_samp_left], y[-num_samp_right:]])
    model = LinearModel()
    params = model.guess(y_bg, x=x_bg)
    out = model.fit(y_bg, x=x_bg, params=params)
    if show_plots:
        plt.plot(x, y)
        plt.plot(x_bg, y_bg, '.')
    y_fit = out.model.func(x, **out.best_values)
    data = y - y_fit
    if show_plots:
        plt.plot(x, data)
        plt.show()
    indexes = peakutils.indexes(-data, thres=0.25, min_dist=65)
    print(indexes)
    pplot(x, data, indexes)
    peaks_x = peakutils.interpolate(x, data, ind=indexes)
    print(peaks_x)
예제 #8
0
def rotational_temperature_analysis(L, E_upper):
    """
    Function that will perform a rotational temperature analysis. This will perform a least-squares fit of log(L),
    which is related to the theoretical line strength and integrated flux, and the upper state energy for the same
    transition.

    Parameters
    ----------
    L - 1D array
        Value L related to the line and theoretical line strength
    E_upper - 1D array
        The upper state energy in wavenumbers.

    Returns
    -------
    ModelResult - object
        Result of the least-squares fit
    """
    # Convert the upper state energy
    E_upper *= units.kbcm
    logL = np.log(L)
    model = LinearModel()
    params = model.make_params()
    result = model.fit(x=E_upper, y=logL, params=params)
    return result
예제 #9
0
def get_db_pixel(tths, xs, ys):
    """Get direct beam pixel at TTH = 0 degrees. This is done by taking a 
    total of 40 images around TTH = 0, finding the direct beam pixel for each of them
    and fitting these positions to a line to get the direct beam pixel position at TTH = 0

    Arguments:
        tths {[float]} -- array of 2th values
        xs {[float]} -- array of the x positions of the direct beam pixel
        ys {[float]} -- array of the y positions of the direct beam pixel

    Returns:
        tuple -- x0, y0 (direct beam pixel position at 2th = 0)
    """

    n0 = len(tths)//2
    s_ = np.s_[n0-20:n0+20]

    xs, ys, tths = xs[s_], ys[s_], tths[s_]

    mod_xs = LinearModel()
    params_xs = mod_xs.guess(xs, x=tths)
    fit_xs = mod_xs.fit(xs, params=params_xs, x=tths)
    fit_xs = mod_xs.fit(xs, params=fit_xs.params, x=tths)

    mod_ys = LinearModel()
    params_ys = mod_ys.guess(ys, x=tths)
    fit_ys = mod_ys.fit(ys, params=params_ys, x=tths)
    fit_ys = mod_ys.fit(ys, params=fit_ys.params, x=tths)
    
    x0 = fit_xs.eval(params=fit_xs.params, x=0.0)
    y0 = fit_ys.eval(params=fit_ys.params, x=0.0)

    return x0, y0
예제 #10
0
def basic_fit_FanoResonance(freq,
                            trace,
                            filename='untitled',
                            plot=True,
                            save=True):
    start, stop = None, None  #np.argmax(trace)-500,np.argmax(trace)+500# 27900,28200  #Specifies the window within the data to analyse. Set to None,None if you want the whole window
    Lin_mod = LinearModel(
    )  #Linear lmfit model for background offset and slope
    BW_mod = BreitWignerModel()  #Breit-Wigner-Fano model
    mod = BW_mod + Lin_mod
    x = freq[start:stop] / 1E6  #Convert frequencies to MHz
    trace = (10**(trace / 10))  #Convert decibel data to linear
    y = trace[start:stop]
    pars = BW_mod.guess(y, x=x)  #Initialize fit params
    pars += Lin_mod.guess(y, x=x, slope=0, vary=False)
    pars['center'].set(
        value=x[np.argmax(y)], vary=True, expr=''
    )  #Use numpy to find the highest transmission value. Corresponding frequency is used as a guess for the centre frequency
    pars['sigma'].set(value=0.1, vary=True, expr='')  #Linewidth
    pars['q'].set(
        value=1, vary=True,
        expr='')  #Fano factor (asymmetry term). q=infinite gives a Lorentzian
    pars['amplitude'].set(value=-0.03, vary=True, expr='')  #Amplitude
    out = mod.fit(y, pars, x=x)
    sigma = out.params['sigma']
    centre = out.params['center']
    return (x, y, out.best_fit, sigma.value, centre.value,
            centre.value / sigma.value
            )  #Returns linewidth in GHz, centre in GHz and Q factor
예제 #11
0
def MTF(Y, X):
    """
    Fit a polynomial to the MTF curve
    """
    pow_mod = PowerLawModel(prefix='pow_')
    lin_mod = LinearModel(prefix="lin_")
    const_mod = Model(sigmoid)
    poly_mod = PolynomialModel(3)
     
    #X = list(reversed(X))
     
    pars = poly_mod.guess(Y, x=X) + lin_mod.guess(Y, x=X)
    model = poly_mod + lin_mod
     
    result = model.fit(Y, pars, x=X)
    # write error report
    print result.fit_report()
     
    c0 = result.best_values['c0']
    c1 = result.best_values['c1']
    c2 = result.best_values['c2']
    slop = result.best_values['lin_slope']
    inter = result.best_values['lin_intercept']
#     c3 = result.best_values['c3']
#     c4 = result.best_values['c4']
#     c5 = result.best_values['c5']
#     c6 = result.best_values['c6']
#     A = result.best_values["amplitude"]
#     k = result.best_values["exponent"]
        
    limit = polynomial(c0,c1,c2,inter,slop,10.)
#     limit = A*9**k
    return result.best_fit, limit
예제 #12
0
def fitLorentzian(x,y):
	signalGuess = max(y)-min(y)
	# centerGuess = x[np.argmax(y)]
	centerGuess = (max(x)+min(x))/2.
	span = max(x)-min(x)
	sigmaGuess = span/10.
	x_bg = np.concatenate((x[:10],x[10:]))
	y_bg = np.concatenate((y[:10],y[10:]))
	background  = LinearModel()
	pars = background.guess(y_bg, x=x_bg)
	peak = LorentzianModel()
	pars.update( peak.make_params())
	pars['center'].set(centerGuess)#,min=min(x),max=max(x))
	pars['sigma'].set(sigmaGuess,max=span/2.)
	pars['amplitude'].set(signalGuess*sigmaGuess*np.pi,min=0.00000001)
	pars.add('signal', expr='amplitude/(sigma*pi)')
	pars.add('background', expr='intercept+slope*center')
	pars.add('contrast', expr='amplitude/(sigma*pi*background)')
	pars.add('centerDoubled', expr='2*center')
	pars.add('shift', expr='2*center-9192631770')
	pars.add('fwhmDoubled', expr='4*sigma')
	model = peak + background
	init = model.eval(pars, x=x)
	out = model.fit(y, pars, x=x)
	#print out.fit_report()
	return init,out
예제 #13
0
def calibrate_pitch(mono='111'):
    BMMuser = user_ns['BMMuser']
    # read content from INI file
    datafile = os.path.join(BMMuser.DATA, 'edges%s.ini' % mono)
    print(f'reading {datafile}')
    config.read_file(open(datafile))

    edges = dict()
    for i in config.items('edges'):
        el = i[0]
        vals = [float(j) for j in i[1].split(',')
                ]  # convert CSV string -> list of strings -> list of floats
        edges[el] = vals

    # organize the data from the INI file
    ordered = [y[1] for y in sorted([(edges[x][1], x) for x in edges.keys()])]
    ee = list()
    tt = list()
    for el in ordered:
        ee.append(edges[el][1])
        tt.append(edges[el][3])

    mod = LinearModel()
    pars = mod.guess(tt, x=ee)
    out = mod.fit(tt, pars, x=ee)
    print(whisper(out.fit_report(min_correl=0)))
    out.plot()
예제 #14
0
def lmDDOFit(xdata,
             ydata,
             params,
             ctr_range=1.2,
             amp_range=3,
             sig_range=6,
             weightexponential=0):

    x = xdata
    y = ydata
    #Define a linear model and a Damped Oscillator Model
    line_mod = LinearModel(prefix='line_')
    ddo_mod = DampedOscillatorModel(prefix='ddo_')
    #Initial Pars for Linear Model
    pars = line_mod.make_params(intercept=0, slope=0)
    pars['line_intercept'].set(0, vary=True)
    pars['line_slope'].set(0, vary=True)
    #Extend param list to use multiple peaks. Currently unused.
    peaks = []
    #Add fit parameters, Center, Amplitude, and Sigma
    for i in range(0, len(params) / 3):
        peaks.append(DampedOscillatorModel(prefix='ddo' + str(i) + '_'))
        pars.update(peaks[i].make_params())
        ctr = params[3 * i]
        amp = params[3 * i + 1]
        sig = params[3 * i + 2]
        pars['ddo' + str(i) + '_center'].set(ctr,
                                             min=ctr / ctr_range,
                                             max=ctr * ctr_range)
        pars['ddo' + str(i) + '_amplitude'].set(amp,
                                                min=amp / amp_range,
                                                max=amp * amp_range)
        pars['ddo' + str(i) + '_sigma'].set(sig,
                                            min=sig / sig_range,
                                            max=sig * sig_range)
#Create full model. Add linear model and all peaks
    mod = line_mod
    for i in xrange(len(peaks)):
        mod = mod + peaks[i]


#Initialize fit
    init = mod.eval(pars, x=x)
    #Do the fit. The weight exponential can weight the points porportional to the
    #amplitude of y point. In this way, points on peak can be given more weight.
    out = mod.fit(y, pars, x=x, weights=y**weightexponential)
    #Get the fit parameters
    fittedsigma = out.params['ddo0_sigma'].value
    fittedAmp = out.params['ddo0_amplitude'].value
    fittedCenter = out.params['ddo0_center'].value
    fittedIntercept = out.params['line_intercept'].value
    fittedSlope = out.params['line_slope'].value
    fittedQ = 1 / (2 * fittedsigma)
    #Returns the output fit as well as an array of the fit parameters
    """Returns output fit as will as list of important fitting parameters"""
    return out, [
        fittedCenter, fittedAmp, fittedsigma, fittedQ, fittedIntercept,
        fittedSlope
    ]
예제 #15
0
def gauss_peak_fit(energy_data, cnts_data, energy_spectrum, channel_width):
    '''
    spectrum_gauss_fit takes an input spectrum and finds the peaks of the
    spectrum and fits a gaussian curve to the photopeaks and returns the
    amplitude and sigma of the gaussian peak.
    Make sure the spectrum is calibrated first.

    sigma_list, amplitude_list = spectrum_gauss_fit(energy_data, cnts_data, energy_spectrum, channel_width)

    energy_data: .energies_kev that has been calibrated from becquerel
    cnts_data: .cps_vals from becquerel spectrum
    energy_spectrum: an array of gamma energies generated from gamma_energies
    channel_width: width of the peak for analysis purposes
    '''
    sigma_list = []
    amplitude_list = []
    for erg in energy_spectrum:
        x_loc = list(
            filter(lambda x: (erg - 3) < energy_data[x] < (erg + 3),
                   range(len(energy_data))))
        x_loc_pk = range(int(x_loc[0] - 5), int(x_loc[0] + 5))
        pk_cnt = np.argmax(cnts_data[x_loc_pk])
        ch_width = range(int(x_loc_pk[pk_cnt] - channel_width),
                         int(x_loc_pk[pk_cnt] + channel_width))

        calibration = energy_data[ch_width]
        real_y_gauss = cnts_data[ch_width]
        x = np.asarray(calibration)
        real_y = np.asarray(real_y_gauss)

        mod_gauss = GaussianModel(prefix='g1_')
        line_mod = LinearModel(prefix='line')
        pars = mod_gauss.guess(real_y, x=x)
        pars.update(line_mod.make_params(intercept=real_y.min(), slope=0))
        pars.update(mod_gauss.make_params())
        pars['g1_center'].set(x[np.argmax(real_y)], min=x[np.argmax(real_y)]\
        - 3)
        pars['g1_sigma'].set(3, min=0.25)
        pars['g1_amplitude'].set(max(real_y), min=max(real_y) - 10)
        mod = mod_gauss + line_mod
        out = mod.fit(real_y, pars, x=x)

        #print("The amplitude sum is %0.2f" % sum(real_y))
        gauss_x = []
        gauss_y = []
        parameter_list_1 = []
        real_y_gauss = []
        #print(out.fit_report(min_correl=10))
        sigma = out.params['g1_sigma'].value
        amplitude = out.params['g1_amplitude'].value
        sigma_list.append(sigma)
        amplitude_list.append(amplitude)
        fit_params = {}

        #gauss_fit_parameters = [out.params[key].value for k in out.params]
        #print(key, "=", out.params[key].value, "+/-", out.params[key].stderr)
        gauss_fit_parameters = []

    return sigma_list, amplitude_list
예제 #16
0
    def fitting_math(
        self,
        xfile: List[str],
        yfile: List[str],
        flag: int = 1,
    ) -> Any:
        """PeakLogic.fitting_math() fits the data to a cosh and a
        gaussian, then subtracts the cosh to find peak current.."""

        try:
            center: float = self.app.peak_center_.get()
            x: "np.ndarray[Any, np.dtype[np.float64]]" = np.array(
                xfile, dtype=np.float64)
            y: "np.ndarray[Any, np.dtype[np.float64]]" = np.array(
                yfile, dtype=np.float64)

            # cut out outliers
            passingx: "np.ndarray[Any, np.dtype[np.float64]]"
            passingy: "np.ndarray[Any, np.dtype[np.float64]]"
            passingx, passingy = self.trunc_edges(xfile, yfile)

            rough_peak_positions = [min(passingx), center]

            min_y = float(min(passingy))
            model = LinearModel(prefix="Background")
            params = model.make_params()  # a=0, b=0, c=0
            params.add("slope", 0, min=0)
            # params.add("b", 0, min=0)
            params.add("intercept", 0, min=min_y)

            for i, cen in enumerate(rough_peak_positions):
                peak, pars = self.add_lz_peak(f"Peak_{i+1}", cen)
                model = model + peak
                params.update(pars)

            _ = model.eval(params, x=passingx)
            result = model.fit(passingy, params, x=passingx)
            comps = result.eval_components()

            ip = float(max(comps["Peak_2"]))

            if flag == 1:
                return ip
            if flag == 0:
                return (
                    x,
                    y,
                    result.best_fit,
                    comps["Background"],
                    comps["Peak_1"],
                    comps["Peak_2"],
                    ip,
                    passingx,
                )

        except Exception:  # pragma: no cover
            print("Error Fitting")
            print(sys.exc_info())
            return -1
예제 #17
0
def poly_gaussian():
    gauss1 = Model(gaussian, independent_vars=['x'], prefix='gauss1_')
    gauss2 = Model(gaussian, independent_vars=['x'], prefix='gauss2_')
    gauss3 = Model(gaussian, independent_vars=['x'], prefix='gauss3_')
    gauss4 = Model(gaussian, independent_vars=['x'], prefix='gauss4_')
    linear1 = LinearModel(independent_vars=['x'], prefix='linear1_')
    linear2 = LinearModel(independent_vars=['x'], prefix='linear2_')
    model = gauss1 + gauss2 + linear1 + gauss3 + linear2 + gauss4
    return model
예제 #18
0
def lenar_calc(x,y):
    mod = LinearModel()
    pars = mod.guess(y, x=x)
    out  = mod.fit(y, pars, x=x)
    calc= out.best_values['slope']
    stress=calc*multi()
    stress=round(stress,3)
    #plt.plot(x,out.bes_fit)
    return stress, x , out.best_fit,out
    def LorentzianFit(self, freq, trace, plot=True):

        if np.any(np.iscomplex(trace)):
            trace = trace.real

        #print (len(trace))
        start, stop = None, None  #Specifies the window within the data to analyse.
        Lin_mod = LinearModel(
        )  #Linear lmfit model for background offset and slope
        BW_mod = BreitWignerModel()  #Breit-Wigner-Fano model
        mod = BW_mod + Lin_mod

        x = freq[start:stop] / 1E6  #Convert frequencies to MHz
        trace = (10**(trace / 10))  #Convert decibel data to linear
        y = trace[start:stop]

        pars = BW_mod.guess(y, x=x)  #Initialize fit params
        pars += Lin_mod.guess(y, x=x, slope=0, vary=False)
        pars['center'].set(
            value=x[np.argmax(y)], vary=True, expr=''
        )  #Find the highest transmission value. Corresponding frequency is used as a guess for the centre frequency
        pars['sigma'].set(value=0.05, vary=True, expr='')  #Linewidth
        pars['q'].set(
            value=0, vary=True,
            expr='')  #Fano factor (asymmetry term). q=0 gives a Lorentzian
        pars['amplitude'].set(value=-0.03, vary=True, expr='')  #Amplitude

        out = mod.fit(y, pars, x=x)
        #         print (out.fit_report())
        #print (out.params['amplitude'],out.params['q'],out.params['sigma'])
        sigma = out.params['sigma']
        centre = out.params['center']

        dic = {
            'x': x,
            'y': y,
            'fit': out.best_fit,
            'out': out,
            'sigma': sigma.value,
            'centre': centre.value,
            'Q': centre.value / sigma.value
        }

        df = pd.DataFrame(data=dic)

        if plot == True:
            print(out.params['amplitude'], out.params['q'],
                  out.params['sigma'])
            plt.plot(x, y, color='orange', label='Data')
            plt.plot(x,
                     out.best_fit,
                     color='darkslateblue',
                     label='Fano resonance fit')

#         return(sigma.value,centre.value,centre.value/sigma.value)       #Returns linewidth in GHz, centre in GHz and Q factor
        return df
예제 #20
0
파일: models.py 프로젝트: rhroberts/kfit
def line_mod(N):
    '''
        Returns a model consisting of N lines
    '''
    # initialize model
    model = LinearModel(prefix='lin1_')
    # Add N-1 lines
    for i in range(N - 1):
        model += LinearModel(prefix='lin' + str(i + 2) + '_')
    return model
예제 #21
0
    def lin_and_multi_gaussian(self, numOfComponents, cList, sList, aList, lS, lI, limits):
        """All lists should be the same length"""
        gList = []

        if self.xAxis == 'wave' and self.initVals == 'vel':
            cList = vel_to_wave(self.restWave, vel=np.array(cList), flux=0)[0]
            sList = vel_to_wave(self.restWave, vel=np.array(sList), flux=0, delta=True)[0]
            aList = vel_to_wave(self.restWave, vel=0, flux=np.array(aList))[1]
        elif self.xAxis == 'vel' and self.initVals == 'wave':
            cList = wave_to_vel(self.restWave, wave=np.array(cList), flux=0)[0]
            sList = wave_to_vel(self.restWave, wave=np.array(sList), flux=0, delta=True)[0]
            aList = wave_to_vel(self.restWave, wave=0, flux=np.array(aList))[1]

        lin = LinearModel(prefix='lin_')
        self.linGaussParams = lin.guess(self.flux, x=self.x)
        self.linGaussParams.update(lin.make_params())
        self.linGaussParams['lin_slope'].set(lS, vary=True)
        self.linGaussParams['lin_intercept'].set(lI, vary=True)

        for i in range(numOfComponents):
            if type(limits['c']) is list:
                cLimit = limits['c'][i]
            else:
                cLimit = limits['c']
            if type(limits['s']) is list:
                sLimit = limits['s'][i]
            else:
                sLimit = limits['s']
            if type(limits['a']) is list:
                aLimit = limits['a'][i]
            else:
                aLimit = limits['a']
            lims = {'c': cLimit, 's': sLimit, 'a': aLimit}
            prefix = 'g{0}_'.format(i+1)
            gList.append(self._gaussian_component(self.linGaussParams, prefix, cList[i], sList[i], aList[i], lims))
        gList = np.array(gList)
        mod = lin + gList.sum()

        init = mod.eval(self.linGaussParams, x=self.x)
        out = mod.fit(self.flux, self.linGaussParams, x=self.x, weights=self.weights)
        f = open(os.path.join(constants.OUTPUT_DIR, self.rp.regionName, "{0}_Log.txt".format(self.rp.regionName)), "a")
        print("######## %s %s Linear and Multi-gaussian Model ##########\n" % (self.rp.regionName, self.lineName))
        print(out.fit_report())
        f.write("######## %s %s Linear and Multi-gaussian Model ##########\n" % (self.rp.regionName, self.lineName))
        f.write(out.fit_report())
        f.close()
        components = out.eval_components()

        if not hasattr(self.rp, 'plotResiduals'):
            self.rp.plotResiduals = True
        self.plot_emission_line(numOfComponents, components, out, self.rp.plotResiduals, init=init, scaleFlux=self.rp.scaleFlux)

        self._get_amplitude(numOfComponents, out)

        return out, components
예제 #22
0
def get_linearmodel(slope=0.8, intercept=0.5, noise=1.5):
    # create data to be fitted
    np.random.seed(88)
    x = np.linspace(0, 10, 101)
    y = intercept + x*slope
    y = y + np.random.normal(size=len(x), scale=noise)

    model = LinearModel()
    params = model.make_params(intercept=intercept, slope=slope)

    return x, y, model, params
예제 #23
0
def get_linearmodel(slope=0.8, intercept=0.5, noise=1.5):
    # create data to be fitted
    np.random.seed(88)
    x = np.linspace(0, 10, 101)
    y = intercept + x * slope
    y = y + np.random.normal(size=len(x), scale=noise)

    model = LinearModel()
    params = model.make_params(intercept=intercept, slope=slope)

    return x, y, model, params
예제 #24
0
def fitsample(data, theta_initial, theta_final):        
    
    
    x = data[:,0]
    y = data[:,1]
    m = (x > theta_initial) & (x < theta_final)
    x_fit = x[m]
    y_fit = y[m]

    

    pseudovoigt1 = VoigtModel(prefix = 'pv1_')    
    pars= pseudovoigt1.make_params()
    pars['pv1_center'].set(13.5, min = 13.4, max = 13.6)
    pars['pv1_sigma'].set(0.05, min= 0.01, max = 0.1)
    pars['pv1_amplitude'].set(70, min = 1, max = 100)
    #pars['pv1_fraction'].set(0.5)
    

    lorentz2 = LorentzianModel(prefix = 'lor2_')
    pars.update(lorentz2.make_params())
    pars['lor2_center'].set(13.60, min = 13.4, max = 13.9)
    pars['lor2_sigma'].set(0.1, min= 0.01)
    pars['lor2_amplitude'].set(10, min = 1, max = 50 )
    #pars['lor2_fraction'].set(0.5)
    
    line1 = LinearModel(prefix ='l1_')
    pars.update(line1.make_params())
    pars['l1_slope'].set(0)
    pars['l1_intercept'].set(240, min = 200, max = 280)

    
    
    mod = pseudovoigt1 + lorentz2 + line1
    v = pars.valuesdict()
     
    result = mod.fit(y_fit, pars, x=x_fit)    

    #print(result.fit_report())    
    pv1_pos = result.params['pv1_center'].value
    pv1_height = result.params['pv1_height'].value
    lor2_pos = result.params['lor2_center'].value
    lor2_height = result.params['lor2_height'].value
    #peak_area = pars['gau1_fwhm'].value*peak_amp
    #plt.xlim([theta_initial, theta_final])
    #plt.ylim([100, 500])
    #plt.semilogy(x_fit, y_fit, 'bo')
    
    #plt.semilogy (x_fit, result.init_fit, 'k--')    
    #plt.semilogy(x_fit, result.best_fit, 'r-')
    #plt.show()
    return pv1_pos, pv1_height, lor2_pos, lor2_height
예제 #25
0
def lenar_calc(x, y):
    global dados
    mod = LinearModel()
    pars = mod.guess(y, x=x)
    out = mod.fit(y, pars, x=x)

    ##    print out.best_values
    plt.plot(x, out.best_fit)
    calc = out.best_values['slope']
    ##    print calc,multi()
    stresslocal = calc * multi()
    globalstress.append(stresslocal)
    print 'Value:', dados, round(stresslocal, 3)
def direct_method(x,
                  y,
                  bin_sizes,
                  fractions,
                  num_samples,
                  weights=None,
                  shuffle_threshold=0.02,
                  return_slope=False,
                  return_for_graph=False):
    if not x.shape[0] == y.shape[0]:
        raise ValueError("x and y must have same length in first dimension.")
    if y.ndim == 1:
        MIs = create_samples_1d(x,
                                y,
                                bin_sizes,
                                fractions,
                                num_samples,
                                weights=weights)
    else:
        MIs = create_samples_1d_dd(x,
                                   y,
                                   bin_sizes,
                                   fractions,
                                   num_samples,
                                   weights=weights)
    MIs = MIs.reshape((-1, len(fractions), 2))

    params = fit_linear_to_array(MIs[..., 0], 1 / fractions, MIs[..., 1])
    params = params.reshape((len(bin_sizes), 2, 4))

    where_less = params[:, 1, 2] < shuffle_threshold
    if where_less.sum() < 3:
        where_less = np.zeros(params.shape[0], dtype=bool)
        where_less[:3] = True

    model = LinearModel()
    result = model.fit(
        data=params[where_less, 0, 2],
        x=1 / bin_sizes[where_less]**2,
        weights=params[where_less, 0, 3]**2,
    )
    mi = result.params["intercept"].value
    mi_err = result.params["intercept"].stderr
    if not return_for_graph:
        if return_slope:
            return mi, mi_err, result.params["slope"].value
        else:
            return mi, mi_err
    else:
        return (mi, mi_err, MIs.reshape(len(bin_sizes), 2, len(fractions), 2),
                params, result.params["slope"].value, where_less)
예제 #27
0
def fF_plot(pressures, volumes, out_params):
    p = pressures[:, 0]
    sigP = pressures[:, 1]
    V = volumes[:, 0]
    sigV = volumes[:, 1]

    results = out_params.params.valuesdict()

    Vo = results['vo']
    ko = results['ko']
    kpo = results['kp']

    sigVo = out_params.params['vo'].stderr

    #ignore the divide by zero error if the first piece of data is at 0 GPa
    np.seterr(divide='ignore', invalid='ignore')

    #f = (1.0/2.0)*(((V/Vo)**(-2.0/3.0))-1.0)
    f = (((Vo / V)**(2. / 3.)) - 1.) / 2.
    F = p / (3. * f * (1. + (2. * f))**(5. / 2.))
    eta = V / Vo
    sigeta = np.abs(eta) * ((((sigV / V)**2.0) +
                             ((sigVo / Vo)**2))**(1.0 / 2.0))
    sigprime = ((7.0 * (eta**(-2.0 / 3.0)) - 5.0) *
                sigeta) / (2.0 * (1.0 - (eta**-2.0 / 3.0)) * eta)
    sigF = F * np.sqrt(((sigP / p)**2.0) + (sigprime**2))

    line_mod = LinearModel()
    pars = line_mod.guess(f)
    out = line_mod.fit(F, pars, x=f)

    plt.figure(4)
    plt.plot(f, out.best_fit, '-', color='black')

    plt.errorbar(f, F, fmt='ko', xerr=0, yerr=sigF, alpha=1.0, capsize=3.)

    plt.xlabel('Eulerian strain $\mathit{f_E}$', fontweight='bold')
    plt.ylabel('Normalized pressure $\mathbf{F_E}$ (GPa)', fontweight='bold')
    plt.tick_params(direction='in', bottom=1, top=1, left=1, right=1)
    plt.title("$\mathit{f_E}$-F", fontweight='bold')

    #plt.savefig('Ff-plot.png',dpi=600,bbox_inches='tight')

    print(out.fit_report())

    slope = uct.ufloat(out.params['slope'], out.params['slope'].stderr)
    inter = uct.ufloat(out.params['intercept'], out.params['intercept'].stderr)

    k_p = ((2.0 * slope) / (3 * inter)) + 4

    return k_p, f, F, sigF, out.best_fit
예제 #28
0
def plot_gauss(energy_data, cnts_data, energy_spectrum, channel_width):
    '''
    plot_gauss takes an input spectrum and plots the gaussian fit to the photopeaks
    Make sure the spectrum is calibrated first.
    plot_gauss(energy_data, cnts_data, energy_spectrum, channel_width)
    energy_data: .energies_kev that has been calibrated from becquerel
    cnts_data: .cps_vals from becquerel spectrum
    energy_spectrum: an array of gamma energies generated from gamma_energies
    channel_width: width of the peak for analysis purposes
    '''
    for erg in energy_spectrum:
        x_loc = list(
            filter(lambda x: (erg - 3) < energy_data[x] < (erg + 3),
                   range(len(energy_data))))
        x_loc_pk = range(int(x_loc[0] - 5), int(x_loc[0] + 5))
        pk_cnt = np.argmax(cnts_data[x_loc_pk])
        ch_width = range(int(x_loc_pk[pk_cnt] - channel_width),
                         int(x_loc_pk[pk_cnt] + channel_width))

        calibration = energy_data[ch_width]
        real_y_gauss = cnts_data[ch_width]
        x = np.asarray(calibration)
        real_y = np.asarray(real_y_gauss)

        mod_gauss = GaussianModel(prefix='g1_')
        line_mod = LinearModel(prefix='line')
        pars = mod_gauss.guess(real_y, x=x)
        pars.update(line_mod.make_params(intercept=real_y.min(), slope=0))
        pars.update(mod_gauss.make_params())
        pars['g1_center'].set(x[np.argmax(real_y)], min=x[np.argmax(real_y)]\
        - 3)
        pars['g1_sigma'].set(3, min=0.25)
        pars['g1_amplitude'].set(max(real_y), min=max(real_y) - 10)
        mod = mod_gauss + line_mod
        out = mod.fit(real_y, pars, x=x)

        plt.figure()
        plt.plot(x, real_y)
        plt.plot(x, out.best_fit, 'k--')
        energy_title = np.argmax(x)
        max_y = np.argmax(real_y)  # Find the maximum y value
        max_x = x[(
            max_y)]  # Find the x value corresponding to the maximum y value
        plt.title('Gaussian fit around %0.1f keV' % x[max_y])
        plt.xlabel('Energy (keV)')
        plt.ylabel('CPS')
        plt.show()
        gauss_x = []
        gauss_y = []
        parameter_list_1 = []
        real_y_gauss = []
예제 #29
0
def fit_voigt_over_linear(q,
                          I,
                          cen=1,
                          sig=0.002,
                          sigmin=1e-4,
                          sigmax=0.01,
                          amplmin=0,
                          amplmax=500,
                          trim=0.06,
                          plot=False):

    trim = logical_and(q < cen + trim, q > cen - trim)
    q = q[trim]
    I = I[trim]

    mod = LinearModel()
    mod.set_param_hint('slope', value=-20)
    mod.set_param_hint('intercept', value=10)
    lineout = mod.fit(I, x=q)
    pars = lineout.params

    mod += VoigtModel()
    pars.add('center', value=cen)
    pars.add('sigma', value=sig, max=sigmax, min=sigmin)
    pars.add('amplitude',
             value=amplmin / 2 + amplmax / 2,
             min=amplmin,
             max=amplmax)
    out = mod.fit(I, pars, x=q)

    return out
예제 #30
0
def linear_fit(x, y, u_y=None, slope_guess=None, intercept_guess=None):
    """ General purpose linear fit function.

    This function takes your x and y data (as numpy arrays) and returns a
    :py:class:`lmfit.model.ModelResult` object from the `lmfit`_ Python library.
    It attempts to fit your data to a model define by:
        :math:`y=mx+c`
    where :math:`m = slope` and :math:`c = intercept`.
    If guesses for the slope and intercept are not explicitly provided when
    calling this function, they will be inferred from the provided data arrays.

    Arguments:
        x: A 1D numpy array of x data points

        y: A 1D numpy array of y data points

    Keyword Arguments:
        u_y: An optional argument for providing a 1D numpy array of uncertainty 
             values for the y data points

        slope_guess: An optional argument for providing an initial guess for the
                     value of the slope parameter

        intercept_guess: An optional argument for providing an initial guess for the
                         value of the intercept parameter

    Returns:
        A :py:class:`lmfit.model.ModelResult` object from the `lmfit`_ Python library

    .. _`lmfit`: https://lmfit.github.io/lmfit-py/

    """
    # Create Model
    model = LinearModel()
    guess_kwargs = {}
    # Create parameter guesses
    if slope_guess is not None:
        guess_kwargs['slope'] = slope_guess
    if intercept_guess is not None:
        guess_kwargs['intercept'] = intercept_guess

    initial_parameters = model.guess(y, x=x, **guess_kwargs)

    fit_result = model_fit(model, initial_parameters, x, y, u_y)

    if not fit_result.success:
        raise MonashSPAFittingException("The call to 'linear_fit(...)' failed. Perhaps try specifying a good guess for the 'gradient_guess' and/or 'intercept_guess' keyword arguments? The error message returned by the fitting algorithm was: {error}".format(error = fit_result.message))

    return fit_result
예제 #31
0
def fit_GC_residual(x, y, peak, peak_center):
    mod = LinearModel(prefix='bkg_')

    pars = mod.guess(y, x)  # mod.make_params()
    #pars['bkg_intercept'].value = 1e5#
    #pars['bkg_slope'].value = 500

    out = mod.fit(y, pars, x=x)

    # if peak == 'H2':
    #     #print('Nfev = ', out.nfev)
    #     print(out.fit_report())
    #     #print(out.pars['peak_amplitude'].value)

    return out
예제 #32
0
def fit_adev(tau, adev, err_lo, err_high):

    fit_tau_over = 499

    # If there are at least 2 datapoints to fit at large tau_values, fit them
    if len(tau[np.where(tau > fit_tau_over)]) >= 2:

        # TODO: take into account asymmetric errorbars
        weights = (err_lo + err_high) / 2  # take naive 1-std errorbar average

        x = np.array([t for t in tau
                      if t > fit_tau_over])  # only fit long tau values
        y = np.array([a for i, a in enumerate(adev) if tau[i] > fit_tau_over
                      ])  # take equivalent in adev array
        w = np.array([
            h for i, h in enumerate(weights) if tau[i] > fit_tau_over
        ])  # take equivalent in weights array

        # Fit straight line on a log10 scale
        x = np.log10(x)
        y = np.log10(y)
        w_log = np.log10(np.exp(1)) * (w / y
                                       )  # error propagation for log10(y +- w)

        # Weighted Least Squares fit; ax + b
        model = LinearModel()

        params = model.make_params()
        params['intercept'].max = -10
        params['intercept'].value = -15
        params['intercept'].min = -19
        params['intercept'].brute_step = 0.005
        params[
            'slope'].value = -0.5  # assume white noise dominates on fitting range
        params['slope'].vary = False  # ... so we keep this parameter fixed

        res = model.fit(y, params, weights=1 / w_log**2, x=x)

        a = res.values['slope']
        b = res.values['intercept']

        x_smooth = np.logspace(0, 5, 20)
        return x_smooth, 10**(res.eval(x=np.log10(x_smooth))), a, b

    # Else if there are not enough large tau_values to fit, return empty arrays
    else:

        return [], [], 0.5, -1
    def twoPeakLorentzianFit(self):
        try:
            nRow, nCol = self.dockedOpt.fileInfo()

            self.gausFit.binFitData = zeros((nRow, 0))
            self.gausFit.TwoPkGausFitData = zeros((nCol, 12))  # Creates the empty 2D List
            for j in range(nCol):
                yy1 = []
                yy2 = []
                yy = self.dockedOpt.TT[:, j]
                i = 0
                for y in yy:
                    if i < len(yy)/2:
                        yy1.append(y)
                    else:
                        yy2.append(y)
                    i += 1

                xx = arange(0, len(yy))
                xx1 = arange(0, len(yy)/2)
                xx2 = arange(len(yy)/2, len(yy))

                x1 = xx[0]
                x2 = xx[-1]
                y1 = yy[0]
                y2 = yy[-1]
                m = (y2 - y1) / (x2 - x1)
                b = y2 - m * x2

                mod1 = LorentzianModel(prefix='p1_')
                mod2 = LorentzianModel(prefix='p2_')

                pars1 = mod1.guess(yy1, x=xx1)
                pars2 = mod2.guess(yy2, x=xx2)
                mod = mod1 + mod2 + LinearModel()
                pars = pars1 + pars2

                pars.add('intercept', value=b, vary=True)
                pars.add('slope', value=m, vary=True)
                out = mod.fit(yy, pars, x=xx, slope=m)



                self.gausFit.TwoPkGausFitData[j, :] = (out.best_values['p1_amplitude'], 0, out.best_values['p1_center'],
                                                       0, out.best_values['p1_sigma'], 0,
                                                       out.best_values['p2_amplitude'], 0, out.best_values['p2_center'],
                                                       0, out.best_values['p2_sigma'], 0)

                # Saves fitted data of each fit
                fitData = out.best_fit
                binFit = np.reshape(fitData, (len(fitData), 1))
                self.gausFit.binFitData = np.concatenate((self.gausFit.binFitData, binFit), axis=1)

                if self.gausFit.continueGraphingEachFit == True:
                    self.gausFit.graphEachFitRawData(xx, yy, out.best_fit, 'L')

            return False
        except Exception as e:
            QMessageBox.warning(self.myMainWindow, "Error", "There was an error \n\n Exception: " + str(e))
            return True
예제 #34
0
def init_dr_model(baseline=False, guess=None):
    """ Function to serialize a Model object for DR fits.
        Defaults to a Gaussian line shape, but the option to include
        a linear offset is available.

        Additionally, if a guess is provided in the
        lmfit convention then it will be incorporated.

        args:
        -----------------
        baseline - boolean indicating if linear offset is included
        guess - nested dict with keys corresponding to parameter name
                and values corresponding to dicts with key/value
                indicating the parameter value, range, and constraints
    """
    model = GaussianModel()
    if baseline:
        model += LinearModel()
    params = model.make_params()
    if guess:
        params.update(guess)
    # Constrain amplitude to always be negative
    # since we're measuring depletion.
    params["amplitude"].set(max=0.0)
    return model, params
예제 #35
0
def replaceSpike(x, y, I):
    """
    y = replaceSpike(x, y, I)
    Replace bad points in y by good ones.
    I is the index of bad points.
    Works by doing a linear fit over the data.
    """
    mod = LinearModel()
    params = mod.guess(data=np.delete(y, I), x=np.delete(x, I))
#    print(params)
#    print(np.delete(y, I))
    result = mod.fit(np.delete(y, I), params, x=np.delete(x, I))    
#    print(result.fit_report())
#    print(result.best_values)
    yy = mod.eval(x=x, slope=result.best_values['slope'], intercept=result.best_values['intercept'])
    y[I] = yy[I]
    return y
예제 #36
0
파일: plot.py 프로젝트: RFlehr/AccQ
 def calculateSlope(self, x, y, numPoints = 50):
     nop = self.__regPoints
     if nop > len(x):
         _x=x
         _y=y
     else:
         _x = x[-nop:]
         _y = y[-nop:]
     mod = LinearModel()
     pars = mod.make_params()
     pars['slope'].set(0.0)
     pars['intercept'].set(0.0)
     out = mod.fit(_y, pars, x=_x)
     slope = str("{0:.3f}".format(out.best_values['slope']*1000))
     self.returnSlope.emit(slope)
     _time = self.setTimeLabel(_x)
     self.__Regres.setData(_time, out.best_fit)
예제 #37
0
def lmDDOFit(xdata, ydata, params, ctr_range = 1.2, amp_range = 3 , sig_range= 6, weightexponential = 0):    
    
    
    x = xdata
    y = ydata
#Define a linear model and a Damped Oscillator Model    
    line_mod = LinearModel(prefix='line_')
    ddo_mod = DampedOscillatorModel(prefix='ddo_')
#Initial Pars for Linear Model
    pars =  line_mod.make_params(intercept=0, slope=0)
    pars['line_intercept'].set(0, vary=True)
    pars['line_slope'].set(0, vary=True)
#Extend param list to use multiple peaks. Currently unused.
    peaks=[]
#Add fit parameters, Center, Amplitude, and Sigma
    for i in range(0, len(params)/3):
        peaks.append(DampedOscillatorModel(prefix='ddo'+str(i)+'_'))
        pars.update(peaks[i].make_params())
        ctr=params[3*i]
        amp=params[3*i+1]
        sig=params[3*i+2]
        pars['ddo'+str(i)+'_center'].set(ctr, min=ctr/ctr_range, max=ctr*ctr_range)
        pars['ddo'+str(i)+'_amplitude'].set(amp,min=amp/amp_range, max=amp*amp_range)
        pars['ddo'+str(i)+'_sigma'].set(sig, min=sig/sig_range, max=sig*sig_range)
#Create full model. Add linear model and all peaks
    mod=line_mod
    for i in xrange(len(peaks)):
        mod=mod+peaks[i]
#Initialize fit
    init = mod.eval(pars, x=x)
#Do the fit. The weight exponential can weight the points porportional to the
#amplitude of y point. In this way, points on peak can be given more weight.     
    out=mod.fit(y, pars,x=x, weights=y**weightexponential)
#Get the fit parameters
    fittedsigma = out.params['ddo0_sigma'].value
    fittedAmp = out.params['ddo0_amplitude'].value
    fittedCenter = out.params['ddo0_center'].value
    fittedIntercept = out.params['line_intercept'].value
    fittedSlope = out.params['line_slope'].value
    fittedQ=1/(2*fittedsigma)
#Returns the output fit as well as an array of the fit parameters
    """Returns output fit as will as list of important fitting parameters"""
    return out, [fittedCenter, fittedAmp, fittedsigma, fittedQ, fittedIntercept, fittedSlope]
예제 #38
0
def fitDoubleLorentzian(x,y) :
	signalGuess = 0.5*(max(y)-min(y))
	centerGuess = (max(x)+min(x))/2.
	span = max(x)-min(x)
	sigmaGuess = span/10.
	x_bg = np.concatenate((x[:10],x[10:]))
	y_bg = np.concatenate((y[:10],y[10:]))
	background  = LinearModel()
	pars = background.guess(y_bg, x=x_bg)
	
	peak1 = LorentzianModel(prefix="p1_")
	pars.update(peak1.make_params())
	pars['p1_center'].set(centerGuess,min=min(x),max=max(x))
	pars['p1_sigma'].set(sigmaGuess,max=span/2.)
	pars['p1_amplitude'].set(signalGuess*sigmaGuess*np.pi,min=0.00000001)
	pars.add('p1_signal', expr='p1_amplitude/(p1_sigma*pi)')
	pars.add('background', expr='intercept+slope*p1_center')
	pars.add('p1_contrast', expr='p1_amplitude/(p1_sigma*pi*background)')
	pars.add('p1_centerDoubled', expr='2*p1_center')
	pars.add('p1_shift', expr='2*p1_center-9192631770')
	pars.add('p1_fwhmDoubled', expr='4*p1_sigma')
	
	peak2 = LorentzianModel(prefix="p2_")
	pars.update(peak2.make_params())
	pars['p2_center'].set(centerGuess,min=min(x),max=max(x))
	pars.add('broadScale',value=2.0, min=1.9, max=100.0)
	pars['p2_sigma'].set(sigmaGuess*2,max=span/2.,expr='p1_sigma*broadScale')
	pars['p2_amplitude'].set(signalGuess*sigmaGuess*np.pi,min=0.00000001)
	pars.add('p2_signal', expr='p2_amplitude/(p2_sigma*pi)')
	pars.add('p2_contrast', expr='p2_amplitude/(p2_sigma*pi*background)')
	pars.add('p2_centerDoubled', expr='2*p2_center')
	pars.add('p2_shift', expr='2*p2_center-9192631770')
	pars.add('p2_fwhmDoubled', expr='4*p2_sigma')
	
	model = peak1 + peak2 + background
	init = model.eval(pars, x=x)
	out = model.fit(y, pars, x=x)
	print out.fit_report()
	return init,out
예제 #39
0
def measure_line_index_recover_spectrum(wave, params, norm=False):
    """ recover the fitted line profile from params

    Parameters
    ----------
    wave: array-like
        the wavelength to which the recovered flux correspond

    params: 5-element tuple
        the 1 to 5 elements are:
        mod_linear_slope
        mod_linear_intercept
        mod_gauss_amplitude
        mod_gauss_center
        mod_gauss_sigma

    norm: bool
        if True, linear model (continuum) is deprecated
        else linear + Gaussian model is used

    """
    from lmfit.models import LinearModel, GaussianModel
    mod_linear = LinearModel(prefix='mod_linear_')
    mod_gauss = GaussianModel(prefix='mod_gauss_')
    par_linear = mod_linear.make_params()
    par_gauss = mod_gauss.make_params()
    par_linear['mod_linear_slope'].value = params[0]
    par_linear['mod_linear_intercept'].value = params[1]
    par_gauss['mod_gauss_amplitude'].value = params[2]
    par_gauss['mod_gauss_center'].value = params[3]
    par_gauss['mod_gauss_sigma'].value = params[4]
    if not norm:
        flux = 1 - mod_gauss.eval(params=par_gauss, x=wave)
    else:
        flux = \
            (1 - mod_gauss.eval(params=par_gauss, x=wave)) * \
            mod_linear.eval(params=par_linear, x=wave)
    return flux
예제 #40
0
    def __init__(self):
        
        self.Calibration        = None
        
        self.c_AperS            = 2.99792458e18 # A / s
        self.planckCGS          = 6.626068e-27  #erg s-1 cm-2
        
        self.DataRoot   = '/home/vital/git/Dazer/Dazer/dazer/libraries/Astro_Libraries/'
        self.lineal_mod = LinearModel(prefix='lineal_')

        gammafile = self.DataRoot + "HI_t3_elec.ascii"
        f = open(gammafile,'r')
        self.a_HI = f.readlines()
        f.close()
        
        gammafile = self.DataRoot + "HeI_t5_elec.ascii"
        f = open(gammafile,'r')
        self.a_HeI = f.readlines()
        f.close()
        
        gammafile = self.DataRoot + "HeII_t4_elec.ascii"
        f = open(gammafile,'r')
        self.a_HeII = f.readlines()
        f.close()
예제 #41
0
def measure_line_index(wave,
                       flux,
                       flux_err=None,
                       mask=None,
                       z=None,
                       line_info=None,
                       num_refit=(100, None),
                       filepath=None,
                       return_type='dict',
                       verbose=False):
    """ Measure line index / EW and have it plotted

    Parameters
    ----------
    wave: array-like
        wavelength vector

    flux: array-like
        flux vector

    flux_err: array-like
        flux error vector (optional)
        If un-specified, auto-generate an np.ones array

    mask: array-like
        andmask or ormask (optional)
        If un-specified, auto-generate an np.ones array (evenly weighted)

    line_info: dict
        information about spectral line, eg:
        line_info_dib5780 = {'line_center':         5780,
                             'line_range':          (5775, 5785),
                             'line_shoulder_left':  (5755, 5775),
                             'line_shoulder_right': (5805, 5825)}

    num_refit: non-negative integer
        number of refitting.
        If 0, no refit will be performed
        If positive, refits will be performed after adding normal random noise

    z: float
        redshift (only specify when z is large)

    filepath: string
        path of the diagnostic figure
        if None, do nothing, else print diagnostic figure

    return_type: string
        'dict' or 'array'
        if 'array', np.array(return dict.values())

    verbose: bool
        if True, print details

    Returns
    -------
    line_indx: dict
        A dictionary type result of line index.
        If any problem encountered, return the default result (filled with nan).

    """
    try:
        # 0. do some input check
        # 0.1> check line_info
        line_info_keys = line_info.keys()
        assert 'line_range' in line_info_keys
        assert 'line_shoulder_left' in line_info_keys
        assert 'line_shoulder_right' in line_info_keys
        # 0.2> check line range/shoulder in spectral range
        assert np.min(wave) <= line_info['line_shoulder_left'][0]
        assert np.max(wave) >= line_info['line_shoulder_right'][0]

        # 1. get line information
        # line_center = line_info['line_center']  # not used
        line_range = line_info['line_range']
        line_shoulder_left = line_info['line_shoulder_left']
        line_shoulder_right = line_info['line_shoulder_right']

        # 2. shift spectra to rest-frame
        wave = np.array(wave)
        flux = np.array(flux)
        if z is not None:
            wave /= 1. + z

        # 3. estimate the local continuum
        # 3.1> shoulder wavelength range
        ind_shoulder = np.any([
            np.all([wave > line_shoulder_left[0],
                    wave < line_shoulder_left[1]], axis=0),
            np.all([wave > line_shoulder_right[0],
                    wave < line_shoulder_right[1]], axis=0)], axis=0)
        wave_shoulder = wave[ind_shoulder]
        flux_shoulder = flux[ind_shoulder]

        # 3.2> integrated/fitted wavelength range
        ind_range = np.logical_and(wave > line_range[0], wave < line_range[1])
        wave_range = wave[ind_range]
        flux_range = flux[ind_range]
        # flux_err_range = flux_err[ind_range]  # not used
        mask_range = mask[ind_range]
        flux_err_shoulder = flux_err[ind_shoulder]
        # mask_shoulder = mask[ind_shoulder]    # not used

        # 4. linear model
        mod_linear = LinearModel(prefix='mod_linear_')
        par_linear = mod_linear.guess(flux_shoulder, x=wave_shoulder)
        # ############################################# #
        # to see the parameter names:                   #
        # model_linear.param_names                      #
        # {'linear_fun_intercept', 'linear_fun_slope'}  #
        # ############################################# #
        out_linear = mod_linear.fit(flux_shoulder,
                                    par_linear,
                                    x=wave_shoulder,
                                    method='leastsq')

        # 5. estimate continuum
        cont_shoulder = out_linear.best_fit
        noise_std = np.std(flux_shoulder / cont_shoulder)
        cont_range = mod_linear.eval(out_linear.params, x=wave_range)
        resi_range = 1 - flux_range / cont_range

        # 6.1 Integrated EW (
        # estimate EW_int
        wave_diff = np.diff(wave_range)
        wave_step = np.mean(np.vstack([np.hstack([wave_diff[0], wave_diff]),
                                       np.hstack([wave_diff, wave_diff[-1]])]),
                            axis=0)
        EW_int = np.dot(resi_range, wave_step)

        # estimate EW_int_err
        num_refit_ = num_refit[0]
        if num_refit_ is not None and num_refit_>0:
            EW_int_err = np.std(np.dot(
                (resi_range.reshape(1, -1).repeat(num_refit_, axis=0) +
                 np.random.randn(num_refit_, resi_range.size) * noise_std),
                wave_step))

        # 6.2 Gaussian model
        # estimate EW_fit
        mod_gauss = GaussianModel(prefix='mod_gauss_')
        par_gauss = mod_gauss.guess(resi_range, x=wave_range)
        out_gauss = mod_gauss.fit(resi_range, par_gauss, x=wave_range)
        line_indx = collections.OrderedDict([
            ('SN_local_flux_err',        np.median(flux_shoulder / flux_err_shoulder)),
            ('SN_local_flux_std',        1. / noise_std),
            ('num_bad_pixel',            np.sum(mask_range != 0)),
            ('EW_int',                   EW_int),
            ('EW_int_err',               EW_int_err),
            ('mod_linear_slope',         out_linear.params[mod_linear.prefix + 'slope'].value),
            ('mod_linear_slope_err',     out_linear.params[mod_linear.prefix + 'slope'].stderr),
            ('mod_linear_intercept',     out_linear.params[mod_linear.prefix + 'intercept'].value),
            ('mod_linear_intercept_err', out_linear.params[mod_linear.prefix + 'intercept'].stderr),
            ('mod_gauss_amplitude',      out_gauss.params[mod_gauss.prefix + 'amplitude'].value),
            ('mod_gauss_amplitude_err',  out_gauss.params[mod_gauss.prefix + 'amplitude'].stderr),
            ('mod_gauss_center',         out_gauss.params[mod_gauss.prefix + 'center'].value),
            ('mod_gauss_center_err',     out_gauss.params[mod_gauss.prefix + 'center'].stderr),
            ('mod_gauss_sigma',          out_gauss.params[mod_gauss.prefix + 'sigma'].value),
            ('mod_gauss_sigma_err',      out_gauss.params[mod_gauss.prefix + 'sigma'].stderr),
            ('mod_gauss_amplitude_std',  np.nan),
            ('mod_gauss_center_std',     np.nan),
            ('mod_gauss_sigma_std',      np.nan)])

        # estimate EW_fit_err
        num_refit_ = num_refit[1]
        if num_refit_ is not None and num_refit_ > 2:
            # {'mod_gauss_amplitude',
            #  'mod_gauss_center',
            #  'mod_gauss_fwhm',
            #  'mod_gauss_sigma'}
            out_gauss_refit_amplitude = np.zeros(num_refit_)
            out_gauss_refit_center = np.zeros(num_refit_)
            out_gauss_refit_sigma = np.zeros(num_refit_)
            # noise_fit = np.random.randn(num_refit,resi_range.size)*noise_std
            for i in range(int(num_refit_)):
                # resi_range_with_noise = resi_range + noise_fit[i,:]
                resi_range_with_noise = resi_range + \
                                        np.random.randn(resi_range.size) * noise_std
                out_gauss_refit = mod_gauss.fit(resi_range_with_noise,
                                                par_gauss,
                                                x=wave_range)
                out_gauss_refit_amplitude[i],\
                out_gauss_refit_center[i],\
                out_gauss_refit_sigma[i] =\
                    out_gauss_refit.params[mod_gauss.prefix + 'amplitude'].value,\
                    out_gauss_refit.params[mod_gauss.prefix + 'center'].value,\
                    out_gauss_refit.params[mod_gauss.prefix + 'sigma'].value
                print(out_gauss_refit_amplitude[i], out_gauss_refit_center[i], out_gauss_refit_sigma[i])
            line_indx.update({'mod_gauss_amplitude_std': np.nanstd(out_gauss_refit_amplitude),
                              'mod_gauss_center_std':    np.nanstd(out_gauss_refit_center),
                              'mod_gauss_sigma_std':     np.nanstd(out_gauss_refit_sigma)})

        # 7. plot and save image
        if filepath is not None and os.path.exists(os.path.dirname(filepath)):
            save_image_line_indice(filepath, wave, flux, ind_range, cont_range,
                                   ind_shoulder, line_info)

        # if necessary, convert to array
        # NOTE: for a non-ordered dict the order of keys and values may change!
        if return_type == 'array':
            return np.array(line_indx.values())
        return line_indx
    except Exception:
        return measure_line_index_null_result(return_type)
예제 #42
0
# <examples/doc_builtinmodels_stepmodel.py>
import matplotlib.pyplot as plt
import numpy as np

from lmfit.models import LinearModel, StepModel

x = np.linspace(0, 10, 201)
y = np.ones_like(x)
y[:48] = 0.0
y[48:77] = np.arange(77-48)/(77.0-48)
np.random.seed(0)
y = 110.2 * (y + 9e-3*np.random.randn(len(x))) + 12.0 + 2.22*x

step_mod = StepModel(form='erf', prefix='step_')
line_mod = LinearModel(prefix='line_')

pars = line_mod.make_params(intercept=y.min(), slope=0)
pars += step_mod.guess(y, x=x, center=2.5)

mod = step_mod + line_mod
out = mod.fit(y, pars, x=x)

print(out.fit_report())

plt.plot(x, y, 'b')
plt.plot(x, out.init_fit, 'k--')
plt.plot(x, out.best_fit, 'r-')
plt.show()
# <end examples/doc_builtinmodels_stepmodel.py>
예제 #43
0
params.add("fwhm2", expr="2.354820045 * sigma2")
params.add("FWHM2", expr="(fwhm2/mu2) * 2.99792458e5")
params.add("Flux2", expr="(A2*fwhm2)/(2.35*0.3989)")
params.add("z2", expr="mu2/6562.7 - 1.0")
params.add("Vshift2", expr="((mu2-6562.7*3.92)/(6562.7*3.92))*2.99792458e5")

# Declaring the wide component
params.add("A4", value=-7.54e-17)
params.add("mu4", value=6562, min=6561, max=6563)
params.add("sigma4", value=5.0, min=0, vary=True)

# Declaring the continuum
params.add("zerolev", value=8.7e-17)
# Declaring a linear continuum uppon which the line is located

lineal_mod = LinearModel(prefix="lineal_")
Lineal_parameters = lineal_mod.guess(np.hstack([blue_flux, red_flux]), x=np.hstack([blue_wave, red_wave]))
lineal_zerolev = Lineal_parameters["lineal_slope"].value * line_wave + Lineal_parameters["lineal_intercept"].value


# Make the fitting
out = minimize(CompResid, params, args=(Wavelength, Flux))
report_fit(out.params)

# Make the plots
plt.plot(Wavelength, Flux, "-", color="black", label="Complete spectrum")

# Resample range:

x_resample = np.linspace(line_wave[0], line_wave[-1], 100)
lineal_resample = Lineal_parameters["lineal_slope"].value * x_resample + Lineal_parameters["lineal_intercept"].value
예제 #44
0
def call_cont(x, y):
	cont = LinearModel(prefix='cont_')
	pars = cont.guess(y, x=x)
	return cont, pars
예제 #45
0
min_index = argrelextrema(line_flux, np.less)[0]
Minima_fluxes = line_flux[min_index]
Minima_waves  = line_wave[min_index]

#Lmfit parameters
Ncomps = 3
Initial_guesses_dic                     = OrderedDict()
Initial_guesses_dic['A']                = np.array([peak_fluxes[0], peak_fluxes[1], peak_fluxes[2], peak_fluxes[1]/20])
Initial_guesses_dic['mu']               = np.array([peak_waves[0], peak_waves[1], peak_waves[2], peak_waves[1]])
Initial_guesses_dic['sigma']            = np.array([1.0, 1.0, 1.0, 5.0])
Initial_guesses_dic['min_sigma']        = np.zeros(Ncomps + 1)

params = Load_lmfit_parameters(Ncomps, Initial_guesses_dic, wide_component = True, mu_precission = mu_precission)

#Declaring a linear continuum uppon which the line is located
lineal_mod                      = LinearModel(prefix='lineal_')
Continuum_wave, Continuum_flux  = np.hstack([blue_wave, red_wave]), np.hstack([blue_flux, red_flux])
Lineal_parameters               = lineal_mod.guess(Continuum_flux, x=Continuum_wave)
lineal_zerolev                  = Lineal_parameters['lineal_slope'].value * line_wave + Lineal_parameters['lineal_intercept'].value
err_continuum                   = np.std(Lineal_parameters['lineal_slope'].value * Continuum_wave + Lineal_parameters['lineal_intercept'].value - Continuum_flux)

Lineal_parameters_scale         = lineal_mod.guess(Continuum_flux * Max_Flux, x=Continuum_wave + Max_wavelength)
lineal_zerolev_scale            = Lineal_parameters['lineal_slope'].value * line_wave + Lineal_parameters['lineal_intercept'].value

#Make the fitting
out = minimize(CompResid_zerolev, params, args=(line_wave, line_flux, lineal_zerolev, Ncomps + 1, err_continuum))
report_fit(out.params)

#Rescale the parameters
scale_params = rescale_parameters(out.params, Max_wavelength, Max_Flux, Ncomps, wide_component = True)
예제 #46
0
def linFitFunc(fitOut,minimum,maximum,ptNumber=100):
	x = np.linspace(minimum,maximum,ptNumber)
	background  = LinearModel()
	bg = background.func(x=x,intercept=fitOut.best_values["intercept"],slope=fitOut.best_values["slope"])
	return x,bg
예제 #47
0
def produce_taufits(filepath,meth='iso',pulseperiod=None,snr_cut=None,
        verbose=True, plotparams=False, plotflux=False, savefigure=False):
        pulsar, nch, nbins,nsub, lm_rms, tsub = read_headerfull(filepath)

        if verbose == True:
            verboseTag = True
        else:
            verboseTag = False
        
        print0 = "Pulsar name: %s" %pulsar
        print1 = "Number of freq. channels: %d \nFreq channels will be labeled 0 - %d" %(nch, nch-1)
        print2 = "Number of bins: %d" %nbins
        print3 = "RMS: %.2f" %lm_rms
        print4 = "Tsub: %.2f sec" %tsub 
        for k in range(5):
              print eval('print{0}'.format(k))
        print"--------------------------------------------------------"
        
        if pulseperiod==None:
            ## Define time axis, and time/bins conversions
            print ("Using Tsub in header to convert bins to time. Assumption is that tsub corresponds to 1.0 phase, corresponding to nbins.  This should be adapted for search data.")
            pulseperiod = tsub
        else:
            pulseperiod = pulseperiod #set to provided pulseperiod in seconds
        
        profilexaxis = np.linspace(0,pulseperiod,nbins)
        pbs = pulseperiod/nbins
        tbs = tsub/nbins
        """Initialise vector outputs"""
        obtainedtaus, lmfittausstds = [], []
        """freqmsMHz will correctly associate scattering time values 
        (tau) with frequency, taking into account frequency integration 
        across a sub-band. Whereas freqcsMHz is the centre freq. to the subband"""
        
        freqmsMHz, freqcsMHz = [], []    
        noiselessmodels =[]
        results, datas, comp_SNRs, comp_rmss = [], [], [], []
        redchis, paramset, paramset_std, correls = [], [], [], []
        
        halfway = nbins/2.

        for i in range(nch):
            print"--------------------------------------------------------"
            print "Channel %d" %i
            """Read in (pdv) data""" 
            data, freqc, freqm = read_data(filepath,i,nbins)
            freqmsMHz.append(freqm)
            freqcsMHz.append(freqc)
            # roll the data of lowest freq channel to middle of bins 
            if i ==0:
                peakbin = np.argmax(data)
                shift = int(halfway -int(peakbin))
                if verboseTag:
                    print 'peak bin at lowest freq channel:%d' %peakbin
            else:
                peakbin = peakbin
                shift = int(halfway - int(peakbin))
            data = np.roll(data,shift)
            if verboseTag:
                print "Rolling data by -%d bins" %shift
            comp_rms = find_rms(data,nbins)

            if meth is None:
                        print "No fitting method was chosen. Will default to an isotropic fitting model. \n Use option -m with 'onedim' to change."
                        result, noiselessmodel, besttau, taustd, bestparams, bestparams_std, redchi, corsig = tau_fitter(data,nbins,verbose=verboseTag)

            elif meth == 'iso':
                        result, noiselessmodel, besttau, taustd, bestparams, bestparams_std, redchi, corsig = tau_fitter(data,nbins,verbose=verboseTag)

            elif meth == 'onedim':
                        result, noiselessmodel, besttau, taustd, bestparams, bestparams_std, redchi, corsig = tau_1D_fitter(data,nbins)         

            comp_SNR_model = find_peaksnr(noiselessmodel,comp_rms)

            if verboseTag:
                print 'Estimated SNR (from model peak and data rms): %.2f' % comp_SNR_model
            comp_SNR =  find_peaksnr_smooth(data,comp_rms)
            print 'Estimated SNR (from data peak and rms): %.2f' % comp_SNR
            print 'Channel Tau (ms): %.2f \pm %.2f ms' %(besttau,taustd)
            
           
            obtainedtaus.append(besttau)
            lmfittausstds.append(taustd)
            noiselessmodels.append(noiselessmodel)
            results.append(result)
            datas.append(data)
            comp_SNRs.append(comp_SNR)
            #new:
            comp_rmss.append(comp_rms)
            redchis.append(redchi)
            paramset.append(bestparams)
            paramset_std.append(bestparams_std)
        #    if plotflux == True:
        #        correls.append(corsig)
        
        
        #if plotflux == True:
        #    cor_sigA = np.zeros(len(correls))
        #    for i in range(len(correls)):
        #        cor_sigA[i] = correls[i]['A']

   
        paramset = np.transpose(paramset)
        paramset_std = np.transpose(paramset_std)
         
        """Next check if any of the subbands contain only zeros. This happens with high RFI excision in LOFAR bands"""
        zero_ch = []
        for i in range(nch):
            all_zeros = not np.any(datas[i])
            if all_zeros:
                zero_ch.append(i)
        
        print"--------------------------------------------------------"

        if zero_ch:
            print "\n"
            print "%d channels have all zeroes (channels(s):" %len(zero_ch), zero_ch,  ") and will be removed."
            if verboseTag:
                print "All zero channels are assigned SNR of 0"

          
        if snr_cut: 
            print "Using SNR cutoff of %.2f" %snr_cut
            comp_SNRs = np.nan_to_num(comp_SNRs)
            (ind_lowSNR,) = np.where(np.array(comp_SNRs) < snr_cut)
            print "SNR cutoff will exclude %d channels (channel(s): %s)" %(len(ind_lowSNR), ind_lowSNR)

            
            data_highsnr = np.delete(np.array(datas),ind_lowSNR,0)
            model_highsnr = np.delete(np.array(noiselessmodels),ind_lowSNR,0)
            taus_highsnr = np.delete(np.array(obtainedtaus),ind_lowSNR)
            lmfitstds_highsnr = np.delete(np.array(lmfittausstds),ind_lowSNR)
            freqMHz_highsnr = np.delete(np.array(freqmsMHz),ind_lowSNR)
            #New:
            comp_rmss_highsnr = np.delete(np.array(comp_rmss),ind_lowSNR)
            redchis_highsnr = np.delete(np.array(redchis),ind_lowSNR)
            #corsigA_highsnr = np.delete(np.array(cor_sigA),ind_lowSNR)
            
            paramset_highsnr = np.zeros([len(paramset),len(data_highsnr)])
            paramsetstd_highsnr = np.zeros([len(paramset),len(data_highsnr)])                
            for i in range(len(paramset)):
                paramset_highsnr[i]= np.delete(paramset[i],ind_lowSNR)
                paramsetstd_highsnr[i]= np.delete(paramset_std[i],ind_lowSNR)
                
            
        elif (snr_cut == None) and (zero_ch != []):       
            print "Used no SNR cutoff"          
            """Rename array to be same as when cut-off is used"""
            """If no SNR cutoff is used, remove channels with all zeroes 
            -- these will automatically be removed by any snr_cut > 0"""
            data_highsnr = np.delete(np.array(datas),zero_ch,0)
            model_highsnr = np.delete(np.array(noiselessmodels),zero_ch,0)
            taus_highsnr = np.delete(np.array(obtainedtaus),zero_ch)
            lmfitstds_highsnr = np.delete(np.array(lmfittausstds),zero_ch)
            freqMHz_highsnr = np.delete(np.array(freqmsMHz),zero_ch)
            # New:
            comp_rmss_highsnr = np.delete(np.array(comp_rmss),zero_ch)
            redchis_highsnr = np.delete(np.array(redchis),zero_ch)
            #corsigA_highsnr = np.delete(np.array(cor_sigA),zero_ch)
              
            paramset_highsnr = np.zeros([len(paramset),len(data_highsnr)])
            paramsetstd_highsnr = np.zeros([len(paramset),len(data_highsnr)])     
            for i in range(len(paramset)):
                paramset_highsnr[i]= np.delete(paramset[i],zero_ch)
                paramsetstd_highsnr[i]= np.delete(paramset_std[i],zero_ch)
                
                
        else:
            print "Used no SNR cutoff and there are no empty channels"          
            data_highsnr = np.array(datas)
            model_highsnr = np.array(noiselessmodels)
            taus_highsnr = np.array(obtainedtaus)
            lmfitstds_highsnr = np.array(lmfittausstds)
            freqMHz_highsnr = np.array(freqmsMHz)
            # New:
            comp_rmss_highsnr = np.array(comp_rmss)
            redchis_highsnr = np.array(redchis)
            paramset_highsnr = np.array(paramset)
            paramsetstd_highsnr = np.array(paramset_std)
            
            
            
        
        taussec_highsnr = taus_highsnr*pbs
        lmfitstdssec_highsnr = lmfitstds_highsnr*pbs
        number_of_plotted_channels = len(data_highsnr)
        npch = number_of_plotted_channels
        print "Will plot remaining %d/%d channels" %(npch, nch)
        

        """Plotting starts"""

        #plot onedim in blue dashed
        #else plot in red
        if meth == 'onedim':
            prof = 'b--'
            lcol='b'
        else:
            prof = 'r-'
            lcol ='r'

        """1. PLOT PROFILES"""
        dimx, dimy = 3., 3.
        numsubplots = dimx*dimy
        numplots = int(np.ceil(npch/numsubplots))
        print "Num profile plots:", numplots
        """Compute residuals"""


       #"""Plot 1: Pulse profiles and fits"""
    
        if npch > 0:
            resdata = data_highsnr - model_highsnr
            resnormed = (resdata-resdata.mean())/resdata.std()
            
            if taussec_highsnr[0] > 1:
                taulabel =  taussec_highsnr
                taulabelerr = lmfitstdssec_highsnr
                taustring = 'sec'
            else:
                taulabel = taussec_highsnr*1000
                taulabelerr = lmfitstdssec_highsnr*1000
                taustring = 'ms'

            for k in range(numplots):
                j = int(numsubplots*k)
                figg = plt.figure(k+1,figsize=(int(4*dimx),int(3*dimy)))
                plots_remaining = int(npch - numsubplots*k)
                #print "Plots remaining", plots_remaining 
                for i in range(np.min([int(numsubplots),int(plots_remaining)])):
                    figg.subplots_adjust(left = 0.08, right = 0.98, wspace=0.35,hspace=0.35,bottom=0.15)
                    #plt.rc('text', usetex=True)
                    plt.rc('font', family='serif')              
                    plt.subplot(dimx,dimy,i+1)
                    plt.plot(profilexaxis,data_highsnr[j+i],alpha = 0.20)    
                    plt.plot(profilexaxis,model_highsnr[j+i],lw = 2.0, alpha = 0.85, label=r'$\tau: %.2f \pm%.2f$ %s' %(taulabel[j+i], taulabelerr[j+i], taustring))
                    plt.title('%s at %.1f MHz' %(pulsar, freqMHz_highsnr[j+i]))
                    plt.ylim(ymax=1.3*np.max(data_highsnr[j+i]))
                    plt.xlim(xmax=pulseperiod)
                    plt.xticks(fontsize=11)
                    plt.yticks(fontsize=11)
                    plt.xlabel('time (s)',fontsize=11)
                    plt.legend(fontsize=11,numpoints=1)
                    plt.ylabel('normalized intensity',fontsize=11)
                    plt.tight_layout()
                
                if savefigure == True:
                    figname = '%s_%s_%s_%d.png' %(os.path.basename(filepath),'fitted_profiles', meth, k)
                    plt.savefig(figname, dpi=200)
                    print "Saved figure %s in ./" %figname
                    if noshow == False:
                        plt.show()

            if verboseTag:
                for i in range(npch):
                    print "Channel %d" %i
                    print'Tau (ms): %.2f' %(1000*taussec_highsnr[i])
                    tau1GHz = tauatfreq(freqMHz_highsnr[i]/1000.,taussec_highsnr[i],1.0,4)
                    print 'tau1GHz_alpha_4 (ms) ~ %.4f' %(tau1GHz*1000)

            lmfitstdssec_highsnr = lmfitstdssec_highsnr[np.nonzero(lmfitstdssec_highsnr)]
            taussec_highsnr = taussec_highsnr[np.nonzero(lmfitstdssec_highsnr)]
            freqMHz_highsnr = freqMHz_highsnr[np.nonzero(lmfitstdssec_highsnr)]
            
            """Plot 2: Plot Gaussian fitting parameters and DM if selected"""
        
            if plotparams==True:
                print "\nPlotting Gaussian fit parameters w.r.t frequency\n"
                """Set plotting parameters"""
                alfval = 0.6
                markr= '*'
                msize=12
                plt.figure(numplots+1, figsize=(12,8))
                plt.subplots_adjust(left = 0.055, right=0.98,wspace=0.35,hspace=0.4,bottom=0.08)               
                """Fit models to sigma"""
                powmod = PowerLawModel()
                powpars = powmod.guess(paramset_highsnr[0], x=freqMHz_highsnr)
                powout = powmod.fit(paramset_highsnr[0], powpars, x=freqMHz_highsnr, weights=1/((paramsetstd_highsnr[0])**2))

                linmod = LinearModel()
                
                if len(freqMHz_highsnr) < 3:
                    raise RuntimeError("plotparams == True: Less than three frequency channels. Cannot compute quadratic or exponential fit for width evolution. Consider lowering snr_cut.")
                    
                else:
                    quadmod = QuadraticModel()          
                    quadpars = quadmod.guess(paramset_highsnr[0], x=freqMHz_highsnr)
                    quadout  = quadmod.fit(paramset_highsnr[0], quadpars, x=freqMHz_highsnr, weights=1/((paramsetstd_highsnr[0])**2))

                    expmod = ExponentialModel()
                    exppars = expmod.guess(paramset_highsnr[0], x=freqMHz_highsnr)
                    expout = expmod.fit(paramset_highsnr[0], exppars, x=freqMHz_highsnr, weights=1/((paramsetstd_highsnr[0])**2))


                """Fit a DM model to delta mu"""
                delnuarray = [-(1/freqMHz_highsnr[-1]**2-1/freqMHz_highsnr[i]**2) for i in range(npch)] ##in MHz
                delmuarray = [(paramset_highsnr[1][-1] - paramset_highsnr[1][i])*pbs for i in range(npch)] ##in seconds
                delmu_stdarray = [(paramsetstd_highsnr[1][-1] - paramsetstd_highsnr[1][i])*pbs for i in range(npch)]

                DM_linpars = linmod.guess(delmuarray, x=delnuarray)
                DM_linout  = linmod.fit(delmuarray, DM_linpars, x=delnuarray)

                DM_CCval = DM_linout.best_values['slope']
                DM_CCvalstd = DM_linout.params['slope'].stderr

                DMmodelfit = DM_linout.best_fit ##model gives deltime in seconds (used to shift data)

                DMconstant = 4148.808
                #uncertainty in the constant is 0.003 - only affects the Delta DM value in the 9th decimal
                DMval = (DM_CCval/DMconstant)
                DMvalstd = (DM_CCvalstd/DMconstant)
                #DMcheck = psr.DM_checker(freqmsMHz,bestpT_highSNR[1]*pbs)
                
                
                ## Plot reduced chi square:
                
                plt.subplot(2,3,1)
                plt.plot(freqMHz_highsnr, redchis_highsnr/np.power(comp_rmss_highsnr,2), markr,alpha=alfval,markersize = msize)
                plt.title(r'Reduced $\chi^2$ values', fontsize=12)
                plt.yticks(fontsize=12)
                plt.xticks(fontsize=12)
                plt.xlabel(r'$\nu$ MHz',fontsize=12)
                plt.ylabel(r'$\chi^2$',fontsize=12)
                
                ## Plot sigma:
                
                plt.subplot(2,3,2)
                #plt.errorbar(freqMHz_highsnr,paramset_highsnr[0]*pbs)
                plt.errorbar(freqMHz_highsnr,paramset_highsnr[0]*pbs,yerr =paramsetstd_highsnr[0]*pbs, fmt = markr,markersize=msize,capthick=2,linewidth=1.5,alpha=alfval)
                plt.plot(freqMHz_highsnr,powout.best_fit*pbs,'-', alpha=alfval,label='pow = %.2f' %powout.best_values['exponent'])
                plt.plot(freqMHz_highsnr,quadout.best_fit*pbs,'-',alpha=alfval, label='quad: a,b = %.3f,%.3f' %(quadout.best_values['a'],quadout.best_values['b']))
                plt.ylabel(r'$\sigma$ (sec)')
                plt.title(r'Width evolution', fontsize=12)
                plt.yticks(fontsize=12)
                plt.xticks(fontsize=12)
                plt.xlabel(r'$\nu$ MHz',fontsize=14)
                plt.legend(fontsize = 10, loc='best')
                
                 ## Plot mean:
                
                plt.subplot(2,3,3)
                #plt.errorbar(freqMHz_highsnr,paramset_highsnr[1]*pbs)
                plt.errorbar(freqMHz_highsnr,paramset_highsnr[1]*pbs,yerr =paramsetstd_highsnr[1]*pbs, fmt = markr,markersize=msize,capthick=2,linewidth=1.5,alpha=alfval)
                plt.ylabel(r'$\mu$ (sec)')
                plt.title(r'Centroid evolution', fontsize=12)
                plt.yticks(fontsize=12)
                plt.xticks(fontsize=12)
                plt.xlabel(r'$\nu$ MHz',fontsize=14)
                #plt.legend(fontsize = 9, loc='best')
                
                ## Plot amplitude:
                
                plt.subplot(2,3,4)
                #plt.errorbar(freqMHz_highsnr,paramset_highsnr[2]*pbs)
                plt.errorbar(freqMHz_highsnr,paramset_highsnr[2]*pbs,yerr =paramsetstd_highsnr[2]*pbs, fmt = markr,markersize=msize,capthick=2,linewidth=1.5,alpha=alfval)
                plt.ylabel(r'$\mu$ (sec)')
                plt.title(r'Amplitude evolution', fontsize=12)
                plt.yticks(fontsize=12)
                plt.xticks(fontsize=12)
                plt.xlabel(r'$\nu$ MHz',fontsize=14)
                #plt.legend(fontsize = 9, loc='best')
                
                ## Plot DC:
                
                plt.subplot(2,3,5)
                #plt.errorbar(freqMHz_highsnr,paramset_highsnr[2]*pbs)
                plt.errorbar(freqMHz_highsnr,paramset_highsnr[3]*pbs,yerr =paramsetstd_highsnr[3]*pbs, fmt = markr,markersize=msize,capthick=2,linewidth=1.5,alpha=alfval)
                plt.ylabel(r'$\mu$ (sec)')
                plt.title(r'DC offset', fontsize=12)
                plt.yticks(fontsize=12)
                plt.xticks(fontsize=12)
                plt.xlabel(r'$\nu$ MHz',fontsize=14)
                #plt.legend(fontsize = 9, loc='best')
                
                 ## Plot DM:
                plt.subplot(2,3,6)
                plt.errorbar(delmuarray,freqMHz_highsnr, fmt = markr, xerr=delmu_stdarray, alpha = alfval, markersize=msize)
                plt.plot(DMmodelfit,freqMHz_highsnr, '-', label=r'DM: $%.3f \pm %.3f$ $\rm{pc.cm}^{-3}$' %(DMval,DMvalstd), alpha = alfval)
                plt.xlabel(r'$\Delta \mu$ (sec)', fontsize =12)
                plt.yticks(fontsize=12)
                plt.xticks(fontsize=12)
                plt.title('Delta DM', fontsize=12)
                plt.ylabel(r'$\nu$ (MHz)',fontsize=14)
                plt.ticklabel_format(style='sci', axis='x',scilimits=(0,0))
                plt.legend(fontsize = 10, loc='best')
                plt.tight_layout()

                if savefigure == True:
                    figname2 = '%s_%s.png' %(os.path.basename(filepath),'fitting_parameters')
                    plt.savefig(figname2, dpi=200)
                    print "Saved figure %s in ./" %figname2
                if noshow == False: 
                    plt.show()

            if plotflux == True:  ##Flux section needs debugging
                    ls = 'solid'
                    """Plot flux, and corrected flux spectrum"""
                    """Create unscattered profiles, i.e. Guassians"""   
                    
                    bins, profiles = [],[makeprofile(nbins = nbins, ncomps = 1, amps = paramset_highsnr[2][j], means = paramset_highsnr[1][j], sigmas = paramset_highsnr[0][j]) for j in range(npch)]
                    
                    unscatflux = []
                    for i in range(npch):
                        unscatfl = np.sum(profiles[j])/nbins
                        unscatflux.append(unscatfl)
                        
                     #smootheddata = smooth(data_highsnr[j],int(0.05*nbins))     
                    scatflux = [find_modelflux(model_highsnr[i],nbins) for i in range(npch)]
                    
                    climbvals = []
                    for i in range(npch):
                        climb = returnclimb(np.linspace(1,nbins,nbins),paramset_highsnr[1][i],paramset_highsnr[0][i],paramset_highsnr[2][i],taussec_highsnr[i],paramset_highsnr[3][i],nbins)
                        climbvals.append(climb)
                    
                    correctedflux = np.array([scatflux[i] + climbvals[i] for i in range(npch)])
                    print scatflux
                    print climbvals
                    
                    #per bin
                    meancorflux = np.mean(correctedflux)
                    meancorfluxerr = np.sqrt(np.sum(correctedflux**2))/len(correctedflux)

                    """Calculate error in Flux"""

                    sigmaWIDTH = paramsetstd_highsnr[0]*pbs #in seconds
                    sigmaAMP = paramsetstd_highsnr[2]  #in mJy
                    WIDTHS =paramset_highsnr[0]*pbs #in seconds
                    AMPS =paramset_highsnr[2] #in mJy

                    Expr1 = np.sqrt(2*np.pi)*AMPS
                    Expr2 = np.sqrt(WIDTHS)
                    AreaExpression = Expr1*Expr2

                    sigmaEx1 = np.sqrt(2*np.pi)*sigmaAMP
                    sigmaEx2 = Expr2*0.5*sigmaWIDTH/WIDTHS
                    
                    sigmaFlux =AreaExpression*np.sqrt(np.power(sigmaEx1/Expr1,2)+ np.power(sigmaEx2/Expr2,2))#+ 2*corsigA_highsnr*sigmaEx1*sigmaEx2/(Expr1*Expr2)

                    plt.figure(figsize=(10,6))
                    plt.plot(freqMHz_highsnr, correctedflux,'k-', linewidth=2.0)
                    plt.plot(freqMHz_highsnr, scatflux,'r--', linewidth=2.0)                
                    plt.fill_between(freqMHz_highsnr,scatflux,correctedflux, alpha=alfval,facecolor='r')
                    eb = plt.errorbar(freqMHz_highsnr,correctedflux,yerr=sigmaFlux, fmt=markr,markersize=10.0, alpha=1.0,capthick=2,linewidth=1.5)
                    eb[-1][0].set_linestyle(ls)
                    #plt.errorbar(freqMHz_highsnr, unscatflux,yerr=sigmaFlux, fmt=markr,markersize=10.0, alpha=alfval)
                    plt.title('Flux Spectrum', fontsize=12)
                    plt.xticks(fontsize=12)
                    plt.yticks(fontsize=12)
                    plt.xlabel(r'$\nu$ (MHz)',fontsize=12)
                    plt.ylabel(r'Calibrated flux (mJy)',fontsize=12)
                
        
        return freqMHz_highsnr, taussec_highsnr, lmfitstdssec_highsnr
예제 #48
0
"""Array with all the other fitting parameters: sigma, A, etc."""
bestpT = np.transpose(bestparamsall)
bestpT_std = np.transpose(bestparams_stdall)

print "Number of plotted channels: %d/%d" %(npch, nch)

bestpT_highSNR = bestpT
bestpT_std_highSNR = bestpT_std

"""Calculate fits for parameters sigma and mu"""

"""Fit a DM model to delta mu"""
delnuarray = [-(1/freqMHz_highsnr[-1]**2-1/freqMHz_highsnr[i]**2) for i in range(npch)] ##in MHz
delmuarray = [(bestpT_highSNR[1][-1] - bestpT_highSNR[1][i])*pbs for i in range(npch)] ##in seconds
delmu_stdarray = [(bestpT_std_highSNR[1][-1] - bestpT_std_highSNR[1][i])*pbs for i in range(npch)]
linmod = LinearModel()
DM_linpars = linmod.guess(delmuarray, x=delnuarray)
#	DM_linout  = linmod.fit(delmuarray, DM_linpars, x=delnuarray, weights=1/(np.power(delmu_stdarray,2)))
DM_linout  = linmod.fit(delmuarray, DM_linpars, x=delnuarray)

DM_CCval = DM_linout.best_values['slope']
DM_CCvalstd = DM_linout.params['slope'].stderr

DMmodelfit = DM_linout.best_fit ##model gives deltime in seconds (used to shift data)

DMconstant = 4148.808
#uncertainty in the constant is 0.003 - only affects the Delta DM value in the 9th decimal
DMval = (DM_CCval/DMconstant)
DMvalstd = (DM_CCvalstd/DMconstant)
DMcheck = psr.DM_checker(freqmsMHz,bestpT_highSNR[1]*pbs)
예제 #49
0
class Nebular_Bayesian():

    def __init__(self):
        
        self.Calibration        = None
        
        self.c_AperS            = 2.99792458e18 # A / s
        self.planckCGS          = 6.626068e-27  #erg s-1 cm-2
        
        self.DataRoot   = '/home/vital/git/Dazer/Dazer/dazer/libraries/Astro_Libraries/'
        self.lineal_mod = LinearModel(prefix='lineal_')

        gammafile = self.DataRoot + "HI_t3_elec.ascii"
        f = open(gammafile,'r')
        self.a_HI = f.readlines()
        f.close()
        
        gammafile = self.DataRoot + "HeI_t5_elec.ascii"
        f = open(gammafile,'r')
        self.a_HeI = f.readlines()
        f.close()
        
        gammafile = self.DataRoot + "HeII_t4_elec.ascii"
        f = open(gammafile,'r')
        self.a_HeII = f.readlines()
        f.close()
    
    def calculate_wavelength_intervals(self, total_wavelength, total_flux, lineslog_frame):

        #Another value
        empty_array = zeros(len(total_wavelength), dtype = bool)
        for j in range(len(lineslog_frame.index)):
    
            blue_limit  = lineslog_frame.iloc[j]['Wave3']  
            red_limit   = lineslog_frame.iloc[j]['Wave4']
            
            if lineslog_frame.index.values[j] in ['O2_3726A', 'O3_4959A', 'O3_5007A', 'H1_6563A']:
                tolerance = 15
            else:
                tolerance = 4
                
            indeces_line    = (total_wavelength >= blue_limit - tolerance) & (total_wavelength <= red_limit + tolerance)   
            empty_array     = empty_array + indeces_line
                            
        #Extra for weird lines
        for interval in [[3630,3640],[3655, 3677], [3816, 3824], [3537, 3542], [6990,7010], [5238, 5330]]:
    
            blue_limit  = interval[0] 
            red_limit   = interval[1]
            tolerance   = 0

            indeces_line = (total_wavelength >= blue_limit - tolerance) & (total_wavelength <= red_limit + tolerance)   
            empty_array = empty_array + indeces_line
            
        self.clean_indeces = invert(empty_array)
        
        #Calculating the error on the continuum
        self.linear_indeces = argwhere(diff(r_[False, self.clean_indeces, False])).reshape(-1, 2)
        self.linear_indeces[:, 1] -= 1

        std_arrays = []
        for interval in self.linear_indeces:
            #dz.data_plot(wave_obs[interval[0]:interval[1]], flux_obs[interval[0]:interval[1]], 'initial', 'blue', markerstyle='o')
            #dz.data_plot(wave_obs[interval[0]], flux_obs[interval[0]], 'initial', 'blue', markerstyle='o')
            #dz.data_plot(wave_obs[interval[1]], flux_obs[interval[1]], 'final', 'red', markerstyle='o')
            try:
                if (interval[1] - interval[0]) > 1:
                    x_region    = total_wavelength[interval[0]:interval[1]]
                    y_region    = total_flux[interval[0]:interval[1]]
                    lin_param   = self.lineal_mod.guess(y_region, x=x_region)
                    y_lineal    = lin_param['lineal_slope'].value * x_region + lin_param['lineal_intercept'].value
                    std_arrays.append(std(y_lineal - y_region))
                    #dz.data_plot(x_region, y_lineal, 'lineal fit', 'black')

            except:
                print 'fallo', interval[1] - interval[0]
            
        self.continuum_error = mean(std_arrays)   
                    
        return total_wavelength[self.clean_indeces], total_flux[self.clean_indeces]

    def model_parameters(self, obs_wave, obs_flux, err_obs):
    
        y_plus      = pymc.Uniform('He_abud', 0.050, 0.15)
#         Te          = pymc.Uniform('Te', self.ObjectData_dict['TOIII'] -2000, self.ObjectData_dict['TOIII']+2000)
        Te          = pymc.TruncatedNormal('Te', self.ObjectData_dict['TOIII'], self.ObjectData_dict['TOIII_error']**-2, a=self.ObjectData_dict['TOIII']- 6 * self.ObjectData_dict['TOIII_error'], b=self.ObjectData_dict['TOIII'] + 6 * self.ObjectData_dict['TOIII_error'])
#         Flux_Recomb = pymc.Uniform('Flux_Recomb', self.ObjectData_dict['Flux_Hbeta_Normalize'] - 4*self.ObjectData_dict['Error_Hbeta_Normalize'], self.ObjectData_dict['Flux_Hbeta_Normalize']+4 * self.ObjectData_dict['Error_Hbeta_Normalize'])
        Flux_Recomb = pymc.Normal('Flux_Recomb', self.ObjectData_dict['Flux_Hbeta_Normalize'], self.ObjectData_dict['Error_Hbeta_Normalize']**-2)
        
        #Calculate nebular continuum
        @pymc.deterministic
        def Calculate_Continuum(y_plus=y_plus, Te=Te, Flux_Recomb=Flux_Recomb, Wavelength_Range = obs_wave):

            return self.Calculate_Nebular_gamma(Te, Flux_Recomb, y_plus, HeIII_HII = 0.0, Wavelength_Range = Wavelength_Range)
         
        #Likelihood
        @pymc.stochastic(observed=True)
        def Likelihood_model(value=obs_flux, nebInt_cont = Calculate_Continuum, sigmaLines = err_obs):
            chi_F = np_sum(square(nebInt_cont - value) / square(sigmaLines))
            
            return - chi_F / 2
    
        return locals()
          
    def Calculate_Nebular_gamma(self, Te, Flux_Recomb, HeII_HII, HeIII_HII, Wavelength_Range):
        
        HII_HI          = 1.0
        
        Frac            = 1 + HeII_HII*4 + HeIII_HII*4
        
        Gamma_2q        = self.TwoPhotonContinuum(Te, Wavelength_Range)
             
        Gamma_FF_HI     = self.FreeFreeContinuum('HI', Te, Wavelength_Range)
        Gamma_FF        = Frac * Gamma_FF_HI
            
        Gamma_FB_HI     = self.FreeBoundContinuum_EP("HI", Te, Wavelength_Range)
        Gamma_FB_HeI    = self.FreeBoundContinuum_EP("HeI", Te, Wavelength_Range)
        Gamma_FB_HeII   = self.FreeBoundContinuum_EP("HeII", Te, Wavelength_Range)
            
        Gamma_FB        = HII_HI * Gamma_FB_HI + HeII_HII * Gamma_FB_HeI + HeIII_HII * Gamma_FB_HeII
    
        Gamma_Total     = Gamma_FB + Gamma_FF + Gamma_2q
        
        Gamma_lambda    = Gamma_Total * (self.c_AperS / power(Wavelength_Range,2))
        
#         NebularFlux_lambda    = self.Zanstra_Calibration_Hbeta(Te, Flux_Recomb, Gamma_Total, Wavelength_Range)
        NebularFlux_lambda    = self.Zanstra_Calibration_Halpha(Te, Flux_Recomb, Gamma_Total, Wavelength_Range)
        
        return NebularFlux_lambda
    
    def TwoPhotonContinuum(self, Te, Wavelength_Range):
        
        Gamma_2q = []
        
        #Coefficients for calculating A_2q The total radiative probability 2s -> 1s (s^-1)
        alpha_A = 0.88                                                                          
        beta_A  = 1.53
        gamma_A = 0.8
        C_A = 202.0                                                     # (s^-1)
        c_CGS = 2.99792458e10                                           #cm / s  
        h = 6.626068e-27                                                #erg s
        H0_Ionization_Energy = 13.6                                     #eV
        eV2erg = 1.602177e-12
        nu_0 = H0_Ionization_Energy * eV2erg / h                        #Hz
        
        A2q = 8.2249                                                    #(s^-1) Transition probability at lambda = 1215.7                                                              
        alpha_eff_2q = 6.5346e-11 * Te**-0.72315                   #(cm^3 s^-1) Effective Recombination coefficient (Este es mi ajuste. El de Molla es: 0.647e-10 * Te**-0.722)
        q2 = 5.92e-4 - 6.1e-9 * Te                                 #(cm^3 s^-1) Collisional transition rate coefficient for protons and electrons
        Lambda_0 =  1215.7
        
        for i in range(len(Wavelength_Range)):
            Lambda = float(Wavelength_Range[i])
            if Lambda > 1215.7:
                nu = c_CGS / (Lambda * 1e-8)
                nu_0 = 3.e18/Lambda_0 
                y = nu / nu_0
                
                A_y = C_A * (
                             y*(1-y) * (1-(4*y*(1-y))**gamma_A)
                             + alpha_A*(y*(1-y))**beta_A * (4*y*(1-y))**gamma_A
                             )
                
                g_nu = h * nu / nu_0 / A2q * A_y                                                    # (erg Hz^-1)
                Gamma_2q.append(alpha_eff_2q * g_nu / (1+q2/A2q))                                # (erg cm^3 s^-1) We take the density of protons and electrons = n_e
            else:
                Gamma_2q.append(0.0)
    
        Np_Gamma_2q = array(Gamma_2q)    
        Np_Gamma_2q[isnan(Np_Gamma_2q)] = 0.0    
        
        return Np_Gamma_2q   

    def FreeFreeContinuum(self, Ion, Te, Wavelength_Range):
        
        #Browns and Seaton methodology
        h = 6.626068e-27                                                #erg s
        c_CGS = 2.99792458e10                                           #cm / s   
        eV2erg = 1.602177e-12
        pi = 3.141592
        masseCGS = 9.1096e-28
        e_proton = 4.80320425e-10                                       #statCoulomb = 1 erg^1/2 cm^1/2     # Eperez definition    electronCGS = 1.60217646e-19 * 3.e9 # Coulomb  # 1eV = 1.60217646e-19 Jul
        k = 1.3806503e-16                                               #erg / K
        H0_Ionization_Energy = 13.6057                                  #eV
        nu_0 = H0_Ionization_Energy * eV2erg / h                        #Hz
        H_RydbergEnergy = h * nu_0
    
        Flux_List = []
        
        if Ion == "HI":
            Z = 1
         
        CteTotal = (32 * (Z**2) * (e_proton**4)*h) / (3*(masseCGS**2)*(c_CGS**3)) * ((pi * H_RydbergEnergy / (3 * k * Te))**0.5) 
               
        for i in range(len(Wavelength_Range)):
            Wave = float(Wavelength_Range[i])
            nu = 2.99792458e18 / Wave  
            
            Flux_Comp1 = exp(((-1*h*nu)/(k*Te)))                                                                    #No unit    
            Flux_Comp2 = h*nu/(Z**2*e_proton*13.6057)
            Flux_Comp3 = k*Te/(h*nu)
    
            gff_mio = 1 + 0.1728*(Flux_Comp2)**0.33333*(1+2*Flux_Comp3) - 0.0496*(Flux_Comp2)**0.66667*(1+0.66667*Flux_Comp3+1.33333*(Flux_Comp3)**2)
            
            FluxValue = CteTotal * Flux_Comp1 * gff_mio                 # This is gamma_nu
            Flux_List.append(FluxValue)
             
        return array(Flux_List)

    def FreeBoundContinuum_EP(self, Ion, Te, Wavelength_Range):
        
        planckCGS = 6.626068e-27 # cm2 g / s  ==  erg s
        cspeedA = 3.e18 # A / s
        rydbergerg = 2.1798741e-11 # Rydberg to erg
        t4 = Te/1.e4
    
        PixelNumber = len(Wavelength_Range)
        
        nu          = zeros(PixelNumber)
        eryd        = zeros(PixelNumber)
        eryd_low    = zeros(PixelNumber)
        
        for i in range(len(Wavelength_Range)): 
            wave = Wavelength_Range[i]                             # wavelength en Angstroms
            nu[i] = cspeedA/wave                                        # frecuencia en Hz
            eryd[i] = planckCGS*cspeedA/(rydbergerg*wave)               # energia en Rydbergs
        
        if Ion == "HI":
            a = self.a_HI
        if Ion == "HeI":
            a = self.a_HeI
        if Ion == "HeII":
            a = self.a_HeII
    

        nTe = int(split(a[0])[0])                                       # number of Te columns
        nener = int(split(a[0])[1])                                     # number of energy points rows 
        skip = int(1+ceil(nTe/8.))                                      # 8 es el numero de valores de Te por fila.
        
        temp = zeros(nTe)
        for i in range(1,skip) :
            tt = split(a[i])
            for j in range(0,len(tt)) :
                temp[8*(i-1)+j] = tt[j]
        
        rte2 = 0

        if bisect_right(temp,log10(Te)) - bisect_left(temp,log10(Te)) == 1 : 
            rte1 = bisect_left(temp,log10(Te))  # Te existe
        elif bisect_right(temp,log10(Te)) - bisect_left(temp,log10(Te)) == 0 :
            rte1 = bisect_left(temp,log10(Te))-1 # interpola Te
            rte2 = bisect_right(temp,log10(Te))  # 
        else :
            print 'ERROR IN Te COLUMN DETECTION FOR Gamma_fb', Te
    
        Gamma_fb_f = zeros(nener)
        ener = zeros(nener)
        gamma = zeros(nener)
        thresh = zeros(nener) 
        ener_low = zeros(nener) 
           
        for i in range(skip,skip+nener) :
            thresh[i-skip] = int(split(a[i])[0])         # indicador de threshold (1) o de punto nodal (0)
            ener[i-skip] = float(split(a[i])[1])         # energia en Ryd
            gamma[i-skip] = float(split(a[i])[2+rte1])   # [22] es para Te=1e4
            if rte2 > 0 : 
                gamma[i-skip] = float(split(a[i])[2+rte1]) + (float(split(a[i])[2+rte2])-float(split(a[i])[2+rte1]))/(10**temp[rte2]-10**temp[rte1]) * (Te-10**temp[rte1])
            
            ener_low = thresh*ener
            atmp=thresh.nonzero()[0]
            for i in range(1,len(atmp),2) : ener_low[atmp[i]] = ener_low[atmp[i-1]]
            for i in range(0,len(ener_low)) : 
                if ener_low[i] == 0 : ener_low[i] = ener_low[i-1]
            
            eryd_low = interp(eryd, ener, ener_low)
            Gamma_fb_f = interp(eryd, ener, gamma)  # linear interpolation at grid of interest
            Gamma_fb_f = Gamma_fb_f * 1e-40*t4**-1.5*exp(-15.7887*(eryd-eryd_low)/t4)
    
        return Gamma_fb_f      

    def Zanstra_Calibration_Hbeta(self, Te, Flux_EmLine, Gamma_nu, Wavelength_Range):
        
        # Pequignot et al. 1991
        t4 = Te / 10000
        
        alfa_eff = 0.668e-13 * t4**-0.507 / (1 + 1.221*t4**0.653)
        lambda_EmLine = 4862.683

        NebularFlux_lambda = Gamma_nu * lambda_EmLine * Flux_EmLine / (alfa_eff * self.planckCGS * power(Wavelength_Range,2)) # erg s-1 [cm-2] A-1 
        
        return NebularFlux_lambda

    def Zanstra_Calibration_Halpha(self, Te, Flux_EmLine, Gamma_nu, Wavelength_Range):
        
        # Pequignot et al. 1991
        t4              = Te / 10000
        
        alfa_eff        = 2.708e-13 * t4**-0.648 / (1 + 1.315*t4**0.523)
        lambda_EmLine   = 6562.819

        NebularFlux_lambda = Gamma_nu * lambda_EmLine * Flux_EmLine / (alfa_eff * self.planckCGS * power(Wavelength_Range,2)) # erg s-1 [cm-2] A-1 
        
        return NebularFlux_lambda
예제 #50
0
                color = Grid_Values['zGas'].index(parameter_divider)
                label = r'$Z = {logage}$'.format(logage = Model_dict['zGas'])
                  
                #Calculate the grid point abundances
                #x_values, y_values         = Ar_S_model(Line_dict, threshold = 4, z = float(Model_dict['zGas']))
                x_values, y_values          = Ar_S_abundances_model(Line_dict, diags, Ar3, Ar4, S3, S4, 3)
                  
                if (x_values != None) and (y_values != None):
                  
                    dz.data_plot(x_values, y_values, color=dz.ColorVector[2][color], label=label, markerstyle='o')
     
                    x_linealFitting = hstack([x_linealFitting, x_values])
                    y_linealFitting = hstack([y_linealFitting, y_values])
  
#Lineal model
lineal_mod          = LinearModel(prefix='lineal_')
Lineal_parameters   = lineal_mod.guess(y_linealFitting, x=x_linealFitting)
x_lineal            = linspace(0, np_max(x_linealFitting), 100)
y_lineal            = Lineal_parameters['lineal_slope'].value * x_lineal + Lineal_parameters['lineal_intercept'].value
dz.data_plot(x_lineal, y_lineal, label='Lineal fitting', color = 'black', linestyle='-')
  
# #Plot fitting formula
formula = r"$log\left(Ar^{{+2}}/Ar^{{+3}}\right) = {m} \cdot log\left(S^{{+2}}/S^{{+3}}\right) + {n}$".format(m=round(Lineal_parameters['lineal_slope'].value,3),
                                                                                                            n=round(Lineal_parameters['lineal_intercept'].value, 3))
dz.Axis.text(0.50, 0.15, formula, transform=dz.Axis.transAxes, fontsize=20) 
  
#Plot wording
xtitle  =   r'$log\left(S^{{+2}}/S^{{+3}}\right)$'
ytitle  =   r'$log\left(Ar^{{+2}}/Ar^{{+3}}\right)$'
title   =   'Argon - Sulfur ionic abundances for several cluster ages and masses'
dz.FigWording(xtitle, ytitle, title, axis_Size = 20.0, title_Size = 20.0, legend_size=20.0, legend_loc='upper left')
예제 #51
0
plt.plot(line_wave[max_index], line_flux[max_index], 'o', color = 'purple', label = 'Maxima')
plt.plot(line_wave[min_index], line_flux[min_index], 'o', color = 'orange', label = 'Minima')

#Lmfit parameters
Ncomps = 3
Initial_guesses_dic                     = OrderedDict()
Initial_guesses_dic['A']                = np.array([peak_fluxes[0], peak_fluxes[1], peak_fluxes[2], peak_fluxes[1]/50])
Initial_guesses_dic['mu']               = np.array([peak_waves[0], peak_waves[1], peak_waves[2], peak_waves[1]])
Initial_guesses_dic['sigma']            = np.array([1.0, 1.0, 1.0, 5.0])

Initial_guesses_dic['min_sigma']        = np.zeros(Ncomps + 1)
Initial_guesses_dic['min_sigma'][-1]    = 5
params = Load_lmfit_parameters(Ncomps, Initial_guesses_dic, wide_component = True, mu_precission = mu_precission)

#Declaring a linear continuum uppon which the line is located
lineal_mod                      = LinearModel(prefix='lineal_')
Continuum_wave, Continuum_flux  = np.hstack([blue_wave, red_wave]), np.hstack([blue_flux, red_flux])
Lineal_parameters               = lineal_mod.guess(Continuum_flux, x=Continuum_wave)
lineal_zerolev                  = Lineal_parameters['lineal_slope'].value * line_wave + Lineal_parameters['lineal_intercept'].value
err_continuum                   = np.std(Lineal_parameters['lineal_slope'].value * Continuum_wave + Lineal_parameters['lineal_intercept'].value - Continuum_flux)
print 'error', err_continuum

#Make the fitting
out = minimize(CompResid_zerolev, params, args=(line_wave, line_flux, lineal_zerolev, Ncomps + 1, err_continuum))
report_fit(out.params)

#Make the plots
x_resample      = np.linspace(line_wave[0], line_wave[-1], 100)
lineal_resample = Lineal_parameters['lineal_slope'].value * x_resample + Lineal_parameters['lineal_intercept'].value
plt.plot(Wavelength, Flux,  '-', color= 'black', label = 'Complete spectrum')
# plt.plot(x_resample, CompositeModel_zerolev(out.params, x_resample, lineal_resample, Ncomps + 1), 'r-', label = 'Fitted line')