コード例 #1
0
ファイル: analysis.py プロジェクト: lowks/OpOpGadget
    def softening_scale(self,mq=70,auto=True,r=None,dens=None,mass=None,kernel='Gadget'):

        opt_dict={'Gadget':0.698352, 'spline':0.977693}
        rq=self.qmass(mq)

        if auto==True:
            prof=Profile(self.p,Ngrid=512,xmin=0.001*rq,xmax=10*rq,kind='lin')
            r=prof.grid.gx
            dens=prof.dens



        dens_spline=UnivariateSpline(r, dens, k=1,s=0,ext=1)
        der_dens=dens_spline.derivative()




        derdens=der_dens(r)
        ap=UnivariateSpline(r, r*r*dens*derdens*derdens, k=1,s=0,ext=1)
        bp=UnivariateSpline(r, r*r*dens*dens, k=1,s=0,ext=1)

        B=bp.integral(0,rq)
        A=ap.integral(0,rq)/(mass*mq/100.)
        C=(B/(A))**(1/5)

        cost=opt_dict[kernel]
        N=len(self.p.Id)**(1/5)

        return C*cost/N
コード例 #2
0
ファイル: helper.py プロジェクト: SamWitte/SubhaloDetection
def integrated_rate_test(mx=100., annih_prod='BB'):
    # This currently doesn't work
    file_path = MAIN_PATH + "/Spectrum/"
    file_path += '{}'.format(int(mx)) + 'GeV_' + annih_prod + '_DMspectrum.dat'

    spectrum = np.loadtxt(file_path)
    imax = 0
    for i in range(len(spectrum)):
        if spectrum[i, 1] < 10 or i == (len(spectrum) - 1):
            imax = i
            break
    spectrum = spectrum[0:imax, :]
    Nevents = 10. ** 5.
    spectrum[:, 1] /= Nevents
    test = interp1d(np.log10(spectrum[:, 0] / mx), np.log10(mx * np.log(10.) * spectrum[:, 1]), kind='cubic', bounds_error=False, fill_value=0.)
    test2 = interp1d(spectrum[:, 0], spectrum[:, 0] * spectrum[:, 1], kind='cubic', bounds_error=False, fill_value=0.)
    e_gamma_tab = np.logspace(0., np.log10(spectrum[-1, 0]), 200)
    print np.column_stack((np.log10(spectrum[:, 0] / mx), np.log10(mx * np.log(10.) * spectrum[:, 1])))
    xtab = np.linspace(np.log10(1. / mx), 0., 200)
    ng2 = np.trapz(10.**test(xtab) / 10. ** xtab, xtab) / np.log(10.)
    mean_e2 = np.trapz(test2(e_gamma_tab), e_gamma_tab)
    rate_interp = UnivariateSpline(spectrum[:, 0], spectrum[:, 1])
    avg_e_interp = UnivariateSpline(spectrum[:, 0], spectrum[:, 0] * spectrum[:, 1])
    num_gamma = rate_interp.integral(1., spectrum[-1, 0])
    mean_e = avg_e_interp.integral(1., spectrum[-1, 0])


    print 'DM Mass: ', mx
    print 'Annihilation Products: ', annih_prod
    print 'Number of Gammas > 1 GeV: ', num_gamma, ng2
    print '<E> Gamma: ', mean_e, mean_e2

    return
コード例 #3
0
ファイル: analysis.py プロジェクト: lowks/OpOpGadget
    def softening_scale(self,
                        mq=70,
                        auto=True,
                        r=None,
                        dens=None,
                        mass=None,
                        kernel='Gadget',
                        type=None):
        """
        Calculate the optimal softening scale following Dehnen, 2012 eps=cost*a(dens)*N^-0.2. The output will be in unit
        of r. If Auto==True, r and dens will be not considered.
        :param mq: Mass fraction where calcualte the softening_scale.
        :param auto: If True calculate the r-dens gride using the grid and Profile class
                wit 512 points from 0.001*rq to 10*rq.
        :param r: Array with the sampling radii.
        :param dens: Array with the density at the sampling radii. Its unity need to be the same of mass/r^3
        :param mass: Total mass of the system, the method will calculate in automatic the fraction mq/100*mass
        :param kernel: Kernel to use. Different kernel have different constant C. The implemented kernels are:
                        -spline: generic cubic spline (as in Dehnen, 2012)
                        -Gadget: to calculate the softening_scale using the spline kernel of Gadget2
        :return: the softening scale.
        """
        opt_dict = {'Gadget': 0.698352, 'spline': 0.977693}
        rq = self.qmass(mq, type=type)

        if auto == True:
            prof = Profile(self.p,
                           Ngrid=512,
                           xmin=0.01 * rq,
                           xmax=10 * rq,
                           kind='lin',
                           type=type)
            r = prof.grid.gx
            dens = prof.dens

        dens_spline = UnivariateSpline(r, dens, k=1, s=0, ext=1)
        der_dens = dens_spline.derivative()

        derdens = der_dens(r)
        ap = UnivariateSpline(r,
                              r * r * dens * derdens * derdens,
                              k=1,
                              s=0,
                              ext=1)
        bp = UnivariateSpline(r, r * r * dens * dens, k=1, s=0, ext=1)

        B = bp.integral(0, rq)
        A = ap.integral(0, rq) / (mass * mq / 100.)
        C = (B / (A))**(1 / 5)

        cost = opt_dict[kernel]
        N = len(self.p.Id)**(1 / 5)

        return C * cost / N
コード例 #4
0
def omega_phi_kerr_converge(time, data, N=1, minN=0):
    splrep = UnivariateSpline(time, data, s=0, k=4)
    tpks = splrep.derivative().roots()[1:]
    deltat = tpks[N] - tpks[minN]
    defint = splrep.integral(tpks[minN], tpks[N])
    omega_phi = defint / deltat
    return omega_phi
コード例 #5
0
def normalize_eigenvectors(vectors, x_list):
    ## integrate over spline interpolation of each vector, return the normalization factor, divide each vector by the normalization constant

    print(a)
    normalized_vectors = []
    ## note, for infinite well normalization coefficient = sqrt(2/a)
    for eig_function in vectors:
        spl = UnivariateSpline(x_list, np.absolute(eig_function)**2, s=0)
        integral = spl.integral(x_min, x_max)
        coefficient = 1 / np.sqrt(integral)
        ## since integral over psi*psi should be 1
        print("spline.integral : " + str(coefficient))

        #plot_eigenvectors([eig_function], x_list);
        #plot_spline(spl, x_list);

        if (False):
            integral = simps(np.absolute(eig_function)**2, x_list)
            coefficient = 1 / np.sqrt(integral)
            ## since integral over psi*psi should be 1
            print("simpso.integral : " + str(coefficient))

        normalized_vectors.append(eig_function * coefficient)

    return normalized_vectors
コード例 #6
0
def Ninteg(x, func, x0, xf, dx):
    spl = UnivariateSpline(x, func, k=5, s=0)
    intfunc = np.zeros(np.size(xf))
    for i in range(np.size(xf)):
        intfunc[i] = spl.integral(x0, xf[i])

    return intfunc
コード例 #7
0
ファイル: STApar.py プロジェクト: MendezV/QSZBOA
def NintegXarr(func):
    spl = UnivariateSpline(x, func, k=3, s=0)
    intfunc = np.zeros(np.size(xf))
    for i in range(np.size(xf)):
        intfunc[i] = spl.integral(0, x[i])

    return intfunc
コード例 #8
0
def cns_total_rate_integrated(Tmin, Z, N, nu_spec, Tmax=roi_max):
    if (Tmin >= Tmax):
        return 0.
    x = np.linspace(Tmin, Tmax, 1000.)
    y = dsigmadT_cns_rate(x, Z, N, nu_spec)
    spl = UnivariateSpline(x, y)
    res = spl.integral(Tmin, Tmax)
    return res
    '''res = spint.quad(lambda T_: dsigmadT_cns_rate(T_, Z, N, nu_spec),
コード例 #9
0
def model(T):
    stellar_flux = np.zeros(len(ws))
    f = spec_get(wavs, fluxes, temperatures, T)
    s = UnivariateSpline(wavs, f)
    for i in range(len(ws)):
        delta_wav = (ws[i] + ws_err[i]) - (ws[i] - ws_err[i])
        stellar_flux[i] = s.integral(ws[i] - ws_err[i],
                                     ws[i] + ws_err[i]) / delta_wav
    return stellar_flux
コード例 #10
0
ファイル: analysis.py プロジェクト: iogiul/OpOpGadget
    def softening_scale(self,mq=70,auto=True,r=None,dens=None,mass=None,kernel='Gadget',type=None):
        """
        Calculate the optimal softening scale following Dehnen, 2012 eps=cost*a(dens)*N^-0.2. The output will be in unit
        of r. If Auto==True, r and dens will be not considered.
        :param mq: Mass fraction where calcualte the softening_scale.
        :param auto: If True calculate the r-dens gride using the grid and Profile class
                wit 512 points from 0.001*rq to 10*rq.
        :param r: Array with the sampling radii.
        :param dens: Array with the density at the sampling radii. Its unity need to be the same of mass/r^3
        :param mass: Total mass of the system, the method will calculate in automatic the fraction mq/100*mass
        :param kernel: Kernel to use. Different kernel have different constant C. The implemented kernels are:
                        -spline: generic cubic spline (as in Dehnen, 2012)
                        -Gadget: to calculate the softening_scale using the spline kernel of Gadget2
        :return: the softening scale.
        """
        opt_dict={'Gadget':0.698352, 'spline':0.977693}
        rq=self.qmass(mq,type=type)

        if auto==True:
            prof=Profile(self.p,Ngrid=512,xmin=0.01*rq,xmax=10*rq,kind='lin',type=type)
            r=prof.grid.gx
            dens=prof.dens



        dens_spline=UnivariateSpline(r, dens, k=1,s=0,ext=1)
        der_dens=dens_spline.derivative()




        derdens=der_dens(r)
        ap=UnivariateSpline(r, r*r*dens*derdens*derdens, k=1,s=0,ext=1)
        bp=UnivariateSpline(r, r*r*dens*dens, k=1,s=0,ext=1)

        B=bp.integral(0,rq)
        A=ap.integral(0,rq)/(mass*mq/100.)
        C=(B/(A))**(1/5)

        cost=opt_dict[kernel]
        N=len(self.p.Id)**(1/5)

        return C*cost/N
コード例 #11
0
ファイル: galaxy_model.py プロジェクト: vvasill/astrosoma19
 def M_H2(self, t):
     ZZsun = self.MZ / self.Mg / self.Zsun
     rd = self.R_d(t)
     rg = np.linspace(0., 20. * rd, 50)
     Sigma0 = self.Mg / (2. * np.pi * rd**2)
     sgd = Sigma0 * np.exp(-rg / rd)
     sgd = rg * sgd * self.f_H2(sgd, ZZsun)
     sgsp = UnivariateSpline(rg, sgd, s=0.0)
     dummy = 2.0 * np.pi * sgsp.integral(0., rg[-1])
     return dummy
コード例 #12
0
def Cl_aa_spline1(l_index,nb,B_lamba):
    integrand = []
    for tt in range(len(k_array1)):
       integrand.append(Cl_aa_integrand1(tt,l_index,nb,B_lamba))
       #print(jj)
    integrand = np.array(integrand)
    abs_integrand = np.abs(integrand)
    #embed()
    sp = UnivariateSpline(k_array1[np.argmax(abs_integrand):-1], abs_integrand[np.argmax(abs_integrand):-1], k=3, s=0)
    return sp.integral(k_array1[np.argmax(abs_integrand)],k_array1[-1])
コード例 #13
0
ファイル: recipe_extract_base.py プロジェクト: henryroe/plp
    def get_profile_func(self, profile_x, profile_y):

        from scipy.interpolate import UnivariateSpline
        profile_ = UnivariateSpline(profile_x, profile_y, k=3, s=0,
                                    bbox=[0, 1])
        integ = profile_.integral(0, 1)

        def profile(o, x, slitpos):
            return profile_(slitpos) / integ

        return profile
コード例 #14
0
def read_cfd_data(cfd_data_file):
    """read cfd results force coefficients data"""
    cf_array = []
    with open(cfd_data_file) as csv_file:
        csv_reader = csv.reader(csv_file, delimiter='\t')
        line_count = 0

        for row in csv_reader:
            if line_count <= 14:
                line_count += 1
            else:
                cf_array.append([
                    float(row[0]),
                    float(row[1]),
                    float(row[2]),
                    float(row[3]),
                    float(row[4]),
                    float(row[5]),
                    float(row[6])
                ])
                line_count += 1

        print(f'Processed {line_count} lines in {cfd_data_file}')

    cf_array = np.array(cf_array)
    cl_spl = UnivariateSpline(cf_array[:, 0], cf_array[:, 3], s=0)
    cd_spl = UnivariateSpline(cf_array[:, 0], cf_array[:, 1], s=0)

    mcl_s = cl_spl.integral(0.0, 1.0)
    mcl_w = cl_spl.integral(1.0, 1.2) / 0.2
    # mcl_w = cl_spl(1.2)
    mcd_s = cd_spl.integral(0.0, 1.0)
    mcd_w = cd_spl.integral(1.0, 1.2) / 0.2

    ratio_l = mcl_w / mcl_s
    ratio_d = mcd_w / mcd_s
    ratio_ld = mcl_w / mcd_w

    mcf_array = [mcl_s, mcl_w, ratio_l, mcd_s, mcd_w, ratio_d, ratio_ld]

    return mcf_array
コード例 #15
0
 def work(z):
     Lband, M_AB, S_nu, Phi = _qlf.qlf(band, z, Lbolbins)
     Phi /= (U.MPC ** 3)
     Dc = cosmology.Dc(z)
     DL = Dc * (1 + z)
     distance_modulus = 5 * numpy.log10(DL / (0.01 * U.KPC))
     m_AB = M_AB + distance_modulus
     spl = UnivariateSpline(-m_AB, Phi, k=5)
     print z, DL, m_AB[0], m_AB[-1], Phi.sum(), m_AB.max(), m_AB.min()
     integrated = spl.integral(-faint, -bright)
     if integrated < 0: integrated = 0
     return integrated
コード例 #16
0
ファイル: atom.py プロジェクト: gmatteo/pseudo_dojo
    def integral3d(self, a=None, b=None):
        """
        Return definite integral of the spline of (r**2 values**2) between two given points a and b
        Args:
            a: First point. rmesh[0] if a is None
            b: Last point. rmesh[-1] if a is None
        """
        a = self.rmesh[0] if a is None else a
        b = self.rmesh[-1] if b is None else b
        r2v2_spline = UnivariateSpline(self.rmesh, (self.rmesh * self.values) ** 2, s=0)

        return r2v2_spline.integral(a, b)
コード例 #17
0
ファイル: apecore.py プロジェクト: srirampr/pseudo_dojo
    def check_logders(self):
        """Check the quality of the log derivatives."""
        merits = {}
        for (state, pp_ld) in self.pp_logders.items():
            ae_ld = self.ae_logders[state]
            rmesh = ae_ld.rmesh
            f = np.abs(np.tan(ae_ld.values) - np.tan(pp_ld.values))
            from scipy.interpolate import UnivariateSpline
            spline = UnivariateSpline(rmesh, f, s=0)
            merits[state] = spline.integral(rmesh[0], rmesh[-1])  / (rmesh[-1] - rmesh[0])

        return merits
コード例 #18
0
    def _build_energy_curve(self):
        x, y = bezier(self.control_polygon.points).T
        t = np.linspace(0, 1, num=200)
        bezier_spline_x = UnivariateSpline(t, x, k=5)
        bezier_spline_y = UnivariateSpline(t, y, k=5)
        dxdt = bezier_spline_x.derivative(n=self.control_polygon.degree)(t)
        dydt = bezier_spline_y.derivative(n=self.control_polygon.degree)(t)
        norm = dxdt * dxdt + dydt * dydt

        norm_spline = UnivariateSpline(t, norm, k=5)
        energy = norm_spline.integral(0, 1)
        return x, y, norm, energy
コード例 #19
0
    def softening_scale(self,
                        mq=70,
                        auto=True,
                        r=None,
                        dens=None,
                        mass=None,
                        kernel='Gadget'):

        opt_dict = {'Gadget': 0.698352, 'spline': 0.977693}
        rq = self.qmass(mq)

        if auto == True:
            prof = Profile(self.p,
                           Ngrid=512,
                           xmin=0.001 * rq,
                           xmax=10 * rq,
                           kind='lin')
            r = prof.grid.gx
            dens = prof.dens

        dens_spline = UnivariateSpline(r, dens, k=1, s=0, ext=1)
        der_dens = dens_spline.derivative()

        derdens = der_dens(r)
        ap = UnivariateSpline(r,
                              r * r * dens * derdens * derdens,
                              k=1,
                              s=0,
                              ext=1)
        bp = UnivariateSpline(r, r * r * dens * dens, k=1, s=0, ext=1)

        B = bp.integral(0, rq)
        A = ap.integral(0, rq) / (mass * mq / 100.)
        C = (B / (A))**(1 / 5)

        cost = opt_dict[kernel]
        N = len(self.p.Id)**(1 / 5)

        return C * cost / N
コード例 #20
0
ファイル: opacity.py プロジェクト: brycem1/CMB
def Cl_aa_spline(l):
    k_test = np.logspace(-3., 1., num=2000)
    integrand = []
    for jj in range(len(k_test)):
        integrand.append(Cl_aa_integrand(k_test[jj], l))
        #print(jj)
    integrand = np.array(integrand)
    abs_integrand = np.abs(integrand)
    sp = UnivariateSpline(k_test[np.argmax(abs_integrand):-1],
                          abs_integrand[np.argmax(abs_integrand):-1],
                          k=3,
                          s=0)
    return sp.integral(k_test[np.argmax(abs_integrand)], k_test[-1])
コード例 #21
0
def calculate_integrated_square_errors(estimation, methods):

    integrated_square_errors = {}

    for method in methods:
        x = estimation.index.to_numpy()
        square_error = (estimation.loc[:, method] -
                        estimation.loc[:, 'actual'])**2
        spline = UnivariateSpline(x, square_error)
        integrated_square_errors[method] = spline.integral(
            np.min(x), np.max(x))

    return integrated_square_errors
コード例 #22
0
ファイル: final_tau.py プロジェクト: brycem1/CMB
def Cl_aa_spline(l_index,nb,B_lamba):
    integrand = []
    for jj in range(len(k_array)):
       integrand.append(Cl_aa_integrand(jj,l_index,nb,B_lamba))
       #print(jj)
    integrand = np.array(integrand)
    abs_integrand = np.abs(integrand)
    #print(abs_integrand[np.argmax(abs_integrand):-1])
    #embed()
    #sp = UnivariateSpline(k_array[np.argmax(abs_integrand):-1], abs_integrand[np.argmax(abs_integrand):-1], k=1, s=0)
    #return sp.integral(k_array[np.argmax(abs_integrand)],k_array[-1])
    sp = UnivariateSpline(k_array, abs_integrand, k=1, s=0)
    return sp.integral(k_array[np.argmax(abs_integrand)],k_array[-1])
コード例 #23
0
ファイル: atom.py プロジェクト: rahul1126/pseudo_dojo
    def integral3d(self, a=None, b=None):
        """
        Return definite integral of the spline of (r**2 values**2) between two given points a and b
        Args:
            a: First point. rmesh[0] if a is None
            b: Last point. rmesh[-1] if a is None
        """
        a = self.rmesh[0] if a is None else a
        b = self.rmesh[-1] if b is None else b
        r2v2_spline = UnivariateSpline(self.rmesh,
                                       (self.rmesh * self.values)**2,
                                       s=0)

        return r2v2_spline.integral(a, b)
コード例 #24
0
ファイル: recipe_extract_base.py プロジェクト: zyajay/plp
    def get_profile_func(self, profile_x, profile_y):

        from scipy.interpolate import UnivariateSpline
        profile_ = UnivariateSpline(profile_x,
                                    profile_y,
                                    k=3,
                                    s=0,
                                    bbox=[0, 1])
        integ = profile_.integral(0, 1)

        def profile(o, x, slitpos):
            return profile_(slitpos) / integ

        return profile
コード例 #25
0
 def _set_efield(self, inp):
     try:
         try:
             E_r_fit = UnivariateSpline(inp.er_data[:, 0],
                                        inp.er_data[:, 1] * inp.Er_scale,
                                        k=3,
                                        s=0)
         except (AttributeError, TypeError):
             E_r_fit = UnivariateSpline(inp.er_data[:, 0],
                                        inp.er_data[:, 1],
                                        k=3,
                                        s=0)
         self.E_r = TwoDProfile(self.psi,
                                E_r_fit(self.rho),
                                self.R,
                                self.Z,
                                wall=self.wall_line,
                                units=r"$V/m")
         E_pot = np.zeros(self.rho.shape)
         try:
             for i, rhoval in enumerate(self.rho[:, 0]):
                 E_pot[i] = E_r_fit.integral(rhoval, self.sep_val)
             self.E_pot = TwoDProfile(self.psi,
                                      E_pot,
                                      self.R,
                                      self.Z,
                                      wall=self.wall_line,
                                      units=r"$V/m")
         except:
             print("Error in E_pot integration. Setting to zeros")
             self.E_pot = TwoDProfile(self.psi,
                                      np.zeros(self.rho.shape),
                                      self.R,
                                      self.Z,
                                      wall=self.wall_line,
                                      units=r"$V/m")
     except:
         print('Er data not supplied. Setting E_r and E_pot to zero.')
         self.E_r = TwoDProfile(self.psi,
                                np.zeros(self.rho.shape),
                                self.R,
                                self.Z,
                                wall=self.wall_line,
                                units=r"$V/m")
         self.E_pot = TwoDProfile(self.psi,
                                  np.zeros(self.rho.shape),
                                  self.R,
                                  self.Z,
                                  wall=self.wall_line,
                                  units=r"$V")
コード例 #26
0
ファイル: bounded_splines.py プロジェクト: refnx/refnx
    def integral(self, a, b):
        # capturing contributions outside domain of interpolation
        below_dx = np.max([0.0, self.bnds[0] - a])
        above_dx = np.max([0.0, b - self.bnds[1]])

        outside_contribution = (below_dx + above_dx) * self.fill_value

        # adjusting interval to spline domain
        a_f = np.max([a, self.bnds[0]])
        b_f = np.min([b, self.bnds[1]])

        if a_f >= b_f:
            return outside_contribution
        else:
            return outside_contribution + UnivariateSpline.integral(self, a_f, b_f)
コード例 #27
0
    def get_retention(self, fb, vesc):

        if fb == 1.0:
            return 1.0

        v_space = np.linspace(0, 1000, 1000)
        kick_spl_loop = UnivariateSpline(
            x=v_space,
            y=self.maxwellian(v_space, 265 * (1 - fb)),
            s=0,
            k=3,
        )
        retention = kick_spl_loop.integral(0, vesc)

        return retention
コード例 #28
0
    def integral(self, a, b):
        # capturing contributions outside domain of interpolation
        below_dx = np.max([0., self.bnds[0] - a])
        above_dx = np.max([0., b - self.bnds[1]])

        outside_contribution = (below_dx + above_dx) * self.fill_value

        # adjusting interval to spline domain
        a_f = np.max([a, self.bnds[0]])
        b_f = np.min([b, self.bnds[1]])

        if a_f >= b_f:
            return outside_contribution
        else:
            return (outside_contribution +
                    UnivariateSpline.integral(self, a_f, b_f))
コード例 #29
0
def histogram_to_spline(data, bins=None):
    """ Create an approximation of the pdf from the observed data
    """
    if bins is None:
        bins = np.linspace(0, 10, num=501, endpoint=True)

    counts, boundaries = np.histogram(data, bins=bins)

    bins_mid = (bins[1:] + bins[:-1]) / 2
    spl = UnivariateSpline(bins_mid, counts, k=3, s=0)

    norm = spl.integral(bins[0], bins[-1])
    counts = counts / norm
    spl = UnivariateSpline(bins_mid, counts, k=3, s=0)

    return spl
コード例 #30
0
def integrate_spline_approx(f, right_endpts, npts=100):
    x_min = min(0, *right_endpts)
    x_max = max(right_endpts)
    pad = (x_max - x_min) / 10
    x_min -= pad
    x_max += pad

    x_space = np.linspace(x_min, x_max, npts)

    # Get a spline interpolation of f over x_space
    # with no smoothing (s=0). This is probably the
    # speed bottleneck of this function.
    spl = UnivariateSpline(x_space, f(x_space), s=0)

    # the spline can be integrated analytically (fast)
    return np.array(
        [spl.integral(0, right_endpt) for right_endpt in right_endpts])
コード例 #31
0
class Interpolated(ABCRate):
    """Rate defined by linear interpolation of the given points
    
    Args:
        x(1D array-like): time values
        y(1D array-like): rate values
        **kwargs(dict): additional parameters to pass to :class:`scipy.interp.UnivariateSpline`
            default = `dict(ext=1, k=1, s=1)` is for linear interpolation.
    """
    def __init__(self,x,y,**kwargs):
        self.range = (x[0],x[-1])
        kwargs.setdefault('ext',1)
        kwargs.setdefault('k',1)
        kwargs.setdefault('s',0)
        self.f = UnivariateSpline(x,y,**kwargs)
    def __call__(self,t):
        return self.f(t)
    def integral(self, t0,t1):
        return self.f.integral(t0,t1)
コード例 #32
0
def modulus_squared_Psi_at(x, t, N=False, orders_of_magnitude=False):
    ## for k going from negative infinity to infinity, integrate dPsi_over_dk
    ## that is: integral from -infinity to infinity dPsi/dk * dk = Psi
    ## see page 1.6 and 1.8 for where this is explicitly defined.

    ## infinity should be defined relative to the values K is working on
    ## | k = inf | >>> h_bar/(am), h_bar*t, k_o
    orders_of_magnitude = orders_of_magnitude
    ## defines how much greater infinity is related to maximum comparable value. Trade of "infinitness" of infinity w/ number of computations required for granulatiry
    max_of_comparables = np.abs(
        np.max([h_bar / float(a * m), h_bar * t, k_o, x_o]))
    inf_mag = (max_of_comparables) * 10**orders_of_magnitude
    ## 6 orders of magnitude greater than max of the list k should be much greater than
    step_size = 2 * inf_mag / float(N)
    #print(inf_mag);
    #print(N);
    #print("step size = " + str(step_size));
    #print("max of comparables = " + str(max_of_comparables));

    if (step_size > max_of_comparables):
        comparable_N = 2 * inf_mag / float(max_of_comparables)
        print(
            "(!) -- warning - stepsize is greater than max val of comparables. N would need to be "
            + str(comparable_N) + " to be comparable")

    k_values = np.arange(-1 * inf_mag, inf_mag + 1, step_size)
    ## + 1 to max so that max val is generated and not cutoff
    integrand = [dPsi_over_dk(x, t, k) for k in k_values]

    #print(k_values[0:25]);
    #print(integrand[0:25]);
    #print(len(k_values))

    ## note, since we can not spline complex values - we can only numerically compute the modulus_squared_Psi = integral of Psi* Psi at every dk
    mod_squared_integrand = (np.conj(integrand) * integrand).real
    ## note, imag is zero but grab real explicitly to remove concerning errors in future

    s = UnivariateSpline(k_values, mod_squared_integrand)
    result = s.integral(0, np.inf)
    print(result)
    return result
コード例 #33
0
def splitsum(t1,t2,shift):
    tmpf = UnivariateSpline(np.array(range(len(t2)))+shift,t2,ext=2,k=1,s=0)
    ### DAm bug in UnivariateSpline.intergal assume ext=0 even if not..
    
    #~ ttmp = [tmpf.integral(t,t+1) + t1_val for t,t1_val in enumerate(t1)]
    ttmp =[]
    pp =0
    pp0 =0
    for t,t1_val in enumerate(t1):
        #print(tmpf.integral(t,t+1))
        try :
            pp = tmpf(t+1)
            pp0 = tmpf(t)
            tttmp = tmpf.integral(t,t+1) + t1_val
        except ValueError:
            tttmp = t1_val + pp
            
        ttmp.append(tttmp)
    #plt.plot(tmpf(np.arange(0,60*4+50,1)))
    #plt.show()
    return ttmp
コード例 #34
0
ファイル: recipe_extract_base.py プロジェクト: henryroe/plp
    def get_profile_func_ab(self, profile_x, profile_y):
        from scipy.interpolate import UnivariateSpline
        profile_ = UnivariateSpline(profile_x, profile_y, k=3, s=0,
                                    bbox=[0, 1])

        roots = list(profile_.roots())
        #assert(len(roots) == 1)
        integ_list = []
        from itertools import izip, cycle
        for ss, int_r1, int_r2 in izip(cycle([1, -1]),
                                       [0] + roots,
                                       roots + [1]):
            #print ss, int_r1, int_r2
            integ_list.append(profile_.integral(int_r1, int_r2))

        integ = np.abs(np.sum(integ_list))

        def profile(o, x, slitpos):
            return profile_(slitpos) / integ

        return profile
コード例 #35
0
def find_widths(profile):
    """
    Attempts to find the W_10, W_50 and equivalent width of a profile by using a spline approach

    Parameters:
    -----------
    profile: list
        The profile to find the widths of

    Return:
    W10: float
        The W10 width of the profile measured in number of bins
    W50: float
        The W50 width of the profile measured in number of bins
    Weq: float
        The equivalent width of the profile measured in number of bins
    """
    #perform spline operations
    x = np.array(list(range(len(profile))))
    spline0 = UnivariateSpline(x, profile, s=0)
    spline10 = UnivariateSpline(x, profile - np.full(len(x), 0.1), s=0)
    spline50 = UnivariateSpline(x, profile - np.full(len(x), 0.5), s=0)

    #find Weq
    integral = spline0.integral(0, len(profile)-1)
    Weq = integral/max(profile)

    #find W10 and W50
    W10_roots = spline10.roots()
    W50_roots = spline50.roots()
    W10=0
    W50=0
    for i in range(0, len(W10_roots), 2):
        W10 += W10_roots[i+1] - W10_roots[i]
    for i in range(0, len(W50_roots), 2):
        W50 += W50_roots[i+1] - W50_roots[i]

    return W10, W50, Weq
コード例 #36
0
ファイル: recipe_extract_base.py プロジェクト: zyajay/plp
    def get_profile_func_ab(self, profile_x, profile_y):
        from scipy.interpolate import UnivariateSpline
        profile_ = UnivariateSpline(profile_x,
                                    profile_y,
                                    k=3,
                                    s=0,
                                    bbox=[0, 1])

        roots = list(profile_.roots())
        #assert(len(roots) == 1)
        integ_list = []
        from itertools import izip, cycle
        for ss, int_r1, int_r2 in izip(cycle([1, -1]), [0] + roots,
                                       roots + [1]):
            #print ss, int_r1, int_r2
            integ_list.append(profile_.integral(int_r1, int_r2))

        integ = np.abs(np.sum(integ_list))

        def profile(o, x, slitpos):
            return profile_(slitpos) / integ

        return profile
コード例 #37
0
ファイル: postborn.py プロジェクト: emmacossette/CosmoMC_DDM
def get_field_rotation_power_from_PK(params,
                                     PK,
                                     chi_source,
                                     lmax=20000,
                                     acc=1,
                                     lsamp=None):
    results = camb.get_background(params)
    nz = int(100 * acc)
    if lmax < 3000:
        raise ValueError('field rotation assumed lmax > 3000')
    ls = np.hstack((np.arange(2, 400, 1), np.arange(401, 2600, int(10. / acc)),
                    np.arange(2650, lmax, int(50. / acc)),
                    np.arange(lmax, lmax + 1))).astype(np.float64)

    # get grid of C_L(chi_s,k) for different redshifts
    chimaxs = np.linspace(0, chi_source, nz)
    cls = np.zeros((nz, ls.size))
    for i, chimax in enumerate(chimaxs[1:]):
        cl = cl_kappa_limber(results, PK, ls, nz, chimax)
        cls[i + 1, :] = cl
    cls[0, :] = 0
    cl_chi = RectBivariateSpline(chimaxs, ls, cls)

    # Get M(l,l') matrix
    chis = np.linspace(0, chi_source, nz, dtype=np.float64)
    zs = results.redshift_at_comoving_radial_distance(chis)
    dchis = (chis[2:] - chis[:-2]) / 2
    chis = chis[1:-1]
    zs = zs[1:-1]
    win = (1 / chis - 1 / chi_source)**2 / chis**2
    w = np.ones(chis.shape)
    cchi = cl_chi(chis, ls, grid=True)
    M = np.zeros((ls.size, ls.size))
    for i, l in enumerate(ls):
        k = (l + 0.5) / chis
        w[:] = 1
        w[k < 1e-4] = 0
        w[k >= PK.kmax] = 0
        cl = np.dot(dchis * w * PK.P(zs, k, grid=False) * win / k**4, cchi)
        M[i, :] = cl * l**4  # note we don't attempt to be accurate beyond lowest Limber
    Mf = RectBivariateSpline(ls, ls, np.log(M))

    # L sampling for output
    if lsamp is None:
        lsamp = np.hstack((np.arange(2, 20, 2), np.arange(25, 200, 10 // acc),
                           np.arange(220, 1200, 30 // acc),
                           np.arange(1300, min(lmax // 2, 2600), 150 // acc),
                           np.arange(3000, lmax // 2 + 1, 1000 // acc)))

    # Get field rotation (curl) spectrum.
    diagm = np.diag(M)
    diagmsp = UnivariateSpline(ls, diagm, s=0)

    def high_curl_integrand(ll, lp):
        lp = lp.astype(np.int)
        r2 = (np.float64(ll) / lp)**2
        return lp * r2 * diagmsp(lp) / np.pi

    clcurl = np.zeros(lsamp.shape)
    lsall = np.arange(2, lmax + 1, dtype=np.float64)

    for i, ll in enumerate(lsamp):

        l = np.float64(ll)
        lmin = lsall[0]
        lpmax = min(lmax, int(max(1000, l * 2)))
        if ll < 500:
            lcalc = lsall[0:lpmax - 2]
        else:
            # sampling in l', with denser around l~l'
            lcalc = np.hstack(
                (lsall[0:20:4], lsall[29:ll - 200:35],
                 lsall[ll - 190:ll + 210:6], lsall[ll + 220:lpmax + 60:60]))

        tmps = np.zeros(lcalc.shape)
        for ix, lp in enumerate(lcalc):
            llp = int(lp)
            lp = np.float64(lp)
            if abs(ll - llp) > 200 and lp > 200:
                nphi = 2 * int(min(lp / 10 * acc, 200)) + 1
            elif ll > 2000:
                nphi = 2 * int(lp / 10 * acc) + 1
            else:
                nphi = 2 * int(lp) + 1
            dphi = 2 * np.pi / nphi
            phi = np.linspace(dphi, (nphi - 1) / 2 * dphi,
                              (nphi - 1) // 2)  # even and don't need zero
            w = 2 * np.ones(phi.size)
            cosphi = np.cos(phi)
            lrat = lp / l
            lfact = np.sqrt(1 + lrat**2 - 2 * cosphi * lrat)
            lnorm = l * lfact
            lfact[lfact <= 0] = 1
            w[lnorm < lmin] = 0
            w[lnorm > lmax] = 0

            lnorm = np.maximum(lmin, np.minimum(lmax, lnorm))
            tmps[ix] += lp * np.dot(w, (np.sin(phi) / lfact**2 *
                                        (cosphi - lrat))**2 *
                                    np.exp(Mf(lnorm, lp, grid=False))) * dphi

        sp = UnivariateSpline(lcalc, tmps, s=0)
        clcurl[i] = sp.integral(2, lpmax - 1) * 4 / (2 * np.pi)**2

        if lpmax < lmax:
            tail = np.sum(high_curl_integrand(ll, lsall[lpmax - 2:]))
            clcurl[i] += tail

    return lsamp, clcurl
コード例 #38
0
ファイル: recipe_extract.py プロジェクト: naraehwang/plp
    def process(self, recipe, band, obsids, frametypes):

        igr_path = self.igr_path
        igr_storage = self.igr_storage

        if recipe == "A0V_AB":

            DO_STD = True
            #FIX_TELLURIC=False

        elif recipe == "STELLAR_AB":

            DO_STD = False
            #FIX_TELLURIC=True

        elif recipe == "EXTENDED_AB":

            DO_STD = False
            #FIX_TELLURIC=True

        elif recipe == "EXTENDED_ONOFF":

            DO_STD = False
            #FIX_TELLURIC=True


        if 1:

            obj_filenames = igr_path.get_filenames(band, obsids)

            master_obsid = obsids[0]

            tgt_basename = os.path.splitext(os.path.basename(obj_filenames[0]))[0]

            db = {}
            basenames = {}

            db_types = ["flat_off", "flat_on", "thar", "sky"]

            for db_type in db_types:

                db_name = igr_path.get_section_filename_base("PRIMARY_CALIB_PATH",
                                                            "%s.db" % db_type,
                                                            )
                db[db_type] = ProductDB(db_name)


            # db on output path
            db_types = ["a0v"]

            for db_type in db_types:

                db_name = igr_path.get_section_filename_base("OUTDATA_PATH",
                                                            "%s.db" % db_type,
                                                            )
                db[db_type] = ProductDB(db_name)

            # to get basenames
            db_types = ["flat_off", "flat_on", "thar", "sky"]
            # if FIX_TELLURIC:
            #     db_types.append("a0v")

            for db_type in db_types:
                basenames[db_type] = db[db_type].query(band, master_obsid)



        if 1: # make aperture
            from libs.storage_descriptions import SKY_WVLSOL_JSON_DESC

            sky_basename = db["sky"].query(band, master_obsid)
            wvlsol_products = igr_storage.load([SKY_WVLSOL_JSON_DESC],
                                               sky_basename)[SKY_WVLSOL_JSON_DESC]

            orders_w_solutions = wvlsol_products["orders"]
            wvl_solutions = map(np.array, wvlsol_products["wvl_sol"])

            from libs.storage_descriptions import ONED_SPEC_JSON_DESC

            raw_spec_products = igr_storage.load([ONED_SPEC_JSON_DESC],
                                                 sky_basename)

            from recipe_wvlsol_sky import load_aperture2

            ap = load_aperture2(igr_storage, band, master_obsid,
                                db["flat_on"],
                                raw_spec_products[ONED_SPEC_JSON_DESC]["orders"],
                                orders_w_solutions)


            # This should be saved somewhere and loaded, instead of making it every time.
            order_map = ap.make_order_map()
            slitpos_map = ap.make_slitpos_map()
            order_map2 = ap.make_order_map(mask_top_bottom=True)


        if 1:

            from libs.storage_descriptions import (HOTPIX_MASK_DESC,
                                                   DEADPIX_MASK_DESC,
                                                   ORDER_FLAT_IM_DESC,
                                                   ORDER_FLAT_JSON_DESC,
                                                   FLAT_MASK_DESC)

            hotpix_mask = igr_storage.load([HOTPIX_MASK_DESC],
                                           basenames["flat_off"])[HOTPIX_MASK_DESC]

            deadpix_mask = igr_storage.load([DEADPIX_MASK_DESC],
                                            basenames["flat_on"])[DEADPIX_MASK_DESC]

            pix_mask  = hotpix_mask.data | deadpix_mask.data



            # aperture_solution_products = PipelineProducts.load(aperture_solutions_name)


            orderflat_ = igr_storage.load([ORDER_FLAT_IM_DESC],
                                         basenames["flat_on"])[ORDER_FLAT_IM_DESC]


            orderflat = orderflat_.data
            orderflat[pix_mask] = np.nan

            orderflat_json = igr_storage.load([ORDER_FLAT_JSON_DESC],
                                              basenames["flat_on"])[ORDER_FLAT_JSON_DESC]
            order_flat_meanspec = np.array(orderflat_json["mean_order_specs"])

            # flat_normed = igr_storage.load([FLAT_NORMED_DESC],
            #                                basenames["flat_on"])[FLAT_NORMED_DESC]

            flat_mask = igr_storage.load([FLAT_MASK_DESC],
                                         basenames["flat_on"])[FLAT_MASK_DESC]
            bias_mask = flat_mask.data & (order_map2 > 0)

            SLITOFFSET_FITS_DESC = ("PRIMARY_CALIB_PATH", "SKY_", ".slitoffset_map.fits")
            prod_ = igr_storage.load([SLITOFFSET_FITS_DESC],
                                     basenames["sky"])[SLITOFFSET_FITS_DESC]
            #fn = sky_path.get_secondary_path("slitoffset_map.fits")
            slitoffset_map = prod_.data

        if 1:

            abba_names = obj_filenames

            def filter_abba_names(abba_names, frametypes, frametype):
                return [an for an, ft in zip(abba_names, frametypes) if ft == frametype]


            a_name_list = filter_abba_names(abba_names, frametypes, "A")
            b_name_list = filter_abba_names(abba_names, frametypes, "B")

            if recipe in ["A0V_AB", "STELLAR_AB"]:
                IF_POINT_SOURCE = True
            elif recipe in ["EXTENDED_AB", "EXTENDED_ONOFF"]:
                IF_POINT_SOURCE = False
            else:
                print "Unknown recipe : %s" % recipe

            if 1:
                #ab_names = ab_names_list[0]

                # master_hdu = pyfits.open(a_name_list[0])[0]

                a_list = [pyfits.open(name)[0].data \
                          for name in a_name_list]
                b_list = [pyfits.open(name)[0].data \
                          for name in b_name_list]


                # we may need to detrip

                # first define extract profile (gaussian).


                # dx = 100

                if IF_POINT_SOURCE: # if point source
                    # for point sources, variance estimation becomes wrong
                    # if lenth of two is different,
                    assert len(a_list) == len(b_list)

                # a_b != 1 for the cases when len(a) != len(b)
                a_b = float(len(a_list)) / len(b_list)

                a_data = np.sum(a_list, axis=0)
                b_data = np.sum(b_list, axis=0)

                data_minus = a_data - a_b*b_data
                #data_minus0 = data_minus

                from libs.destriper import destriper
                if 1:

                    data_minus = destriper.get_destriped(data_minus,
                                                         ~np.isfinite(data_minus),
                                                         pattern=64)

                data_minus_flattened = data_minus / orderflat
                data_minus_flattened[~flat_mask.data] = np.nan
                #data_minus_flattened[order_flat_meanspec<0.1*order_flat_meanspec.max()] = np.nan


                # for variance, we need a square of a_b
                data_plus = (a_data + (a_b**2)*b_data)

                import scipy.ndimage as ni
                bias_mask2 = ni.binary_dilation(bias_mask)

                from libs import instrument_parameters
                gain =  instrument_parameters.gain[band]

                # random noise
                variance0 = data_minus

                variance_ = variance0.copy()
                variance_[bias_mask2] = np.nan
                variance_[pix_mask] = np.nan

                mm = np.ma.array(variance0, mask=~np.isfinite(variance0))
                ss = np.ma.median(mm, axis=0)
                variance_ = variance_ - ss

                # iterate over fixed number of times.
                # need to be improved.
                for i in range(5):
                    st = np.nanstd(variance_, axis=0)
                    variance_[np.abs(variance_) > 3*st] = np.nan
                    #st = np.nanstd(variance_, axis=0)

                variance = destriper.get_destriped(variance0,
                                                    ~np.isfinite(variance_),
                                                   pattern=64)

                variance_ = variance.copy()
                variance_[bias_mask2] = np.nan
                variance_[pix_mask] = np.nan

                st = np.nanstd(variance_)
                st = np.nanstd(variance_[np.abs(variance_) < 3*st])

                variance_[np.abs(variance_-ss) > 3*st] = np.nan

                x_std = ni.median_filter(np.nanstd(variance_, axis=0), 11)

                variance_map0 = np.zeros_like(variance) + x_std**2



                variance_map = variance_map0 + np.abs(data_plus)/gain # add poison noise in ADU
                # we ignore effect of flattening

                # now estimate lsf


                # estimate lsf
                ordermap_bpixed = order_map.copy()
                ordermap_bpixed[pix_mask] = 0
                ordermap_bpixed[~np.isfinite(orderflat)] = 0
            #


            if IF_POINT_SOURCE: # if point source

                x1, x2 = 800, 1200
                bins, lsf_list = ap.extract_lsf(ordermap_bpixed, slitpos_map,
                                                data_minus_flattened,
                                                x1, x2, bins=None)


                hh0 = np.sum(lsf_list, axis=0)
                peak1, peak2 = max(hh0), -min(hh0)
                lsf_x = 0.5*(bins[1:]+bins[:-1])
                lsf_y = hh0/(peak1+peak2)

                from scipy.interpolate import UnivariateSpline
                lsf_ = UnivariateSpline(lsf_x, lsf_y, k=3, s=0,
                                        bbox=[0, 1])
                roots = list(lsf_.roots())
                #assert(len(roots) == 1)
                integ_list = []
                from itertools import izip, cycle
                for ss, int_r1, int_r2 in izip(cycle([1, -1]),
                                                      [0] + roots,
                                                      roots + [1]):
                    #print ss, int_r1, int_r2
                    integ_list.append(lsf_.integral(int_r1, int_r2))
                integ = np.abs(np.sum(integ_list))

                def lsf(o, x, slitpos):
                    return lsf_(slitpos) / integ

                # make weight map
                profile_map = ap.make_profile_map(order_map, slitpos_map, lsf)

                # extract spec

                s_list, v_list = ap.extract_stellar(ordermap_bpixed,
                                                    profile_map,
                                                    variance_map,
                                                    data_minus_flattened,
                                                    slitoffset_map=slitoffset_map)

                # make synth_spec : profile * spectra
                synth_map = ap.make_synth_map(order_map, slitpos_map,
                                              profile_map, s_list,
                                              slitoffset_map=slitoffset_map)

                sig_map = (data_minus_flattened - synth_map)**2/variance_map
                ## mark sig_map > 100 as cosmicay. The threshold need to be fixed.


                # reextract with new variance map and CR is rejected
                variance_map_r = variance_map0 + np.abs(synth_map)/gain
                variance_map2 = np.max([variance_map, variance_map_r], axis=0)
                variance_map2[np.abs(sig_map) > 100] = np.nan

                # extract spec

                s_list, v_list = ap.extract_stellar(ordermap_bpixed, profile_map,
                                                    variance_map2,
                                                    data_minus_flattened,
                                                    slitoffset_map=slitoffset_map)


            else: # if extended source
                from scipy.interpolate import UnivariateSpline
                if recipe in ["EXTENDED_AB", "EXTENDED_ABBA"]:
                    delta = 0.01
                    lsf_ = UnivariateSpline([0, 0.5-delta, 0.5+delta, 1],
                                            [1., 1., -1., -1.],
                                            k=1, s=0,
                                            bbox=[0, 1])
                else:
                    lsf_ = UnivariateSpline([0, 1], [1., 1.],
                                            k=1, s=0,
                                            bbox=[0, 1])

                def lsf(o, x, slitpos):
                    return lsf_(slitpos)

                profile_map = ap.make_profile_map(order_map, slitpos_map, lsf)

                # we need to update the variance map by rejecting
                # cosmicray sources, but it is not clear how we do this
                # for extended source.
                variance_map2 = variance_map
                s_list, v_list = ap.extract_stellar(ordermap_bpixed,
                                                    profile_map,
                                                    variance_map2,
                                                    data_minus_flattened,
                                                    slitoffset_map=slitoffset_map
                                                    )



            if 1:
                # calculate S/N per resolution
                sn_list = []
                for wvl, s, v in zip(wvl_solutions,
                                     s_list, v_list):

                    dw = np.gradient(wvl)
                    pixel_per_res_element = (wvl/40000.)/dw
                    #print pixel_per_res_element[1024]
                    # len(pixel_per_res_element) = 2047. But we ignore it.
                    sn = (s/v**.5)*(pixel_per_res_element**.5)

                    sn_list.append(sn)



        if 1: # save the product
            from libs.storage_descriptions import (COMBINED_IMAGE_DESC,
                                                   VARIANCE_MAP_DESC)
            from libs.products import PipelineImage

            r = PipelineProducts("1d specs")

            r.add(COMBINED_IMAGE_DESC, PipelineImage([],
                                                     data_minus_flattened))
            r.add(VARIANCE_MAP_DESC, PipelineImage([],
                                                   variance_map2))

            # r.add(VARIANCE_MAP_DESC, PipelineImage([],
            #                                        variance_map.data))

            igr_storage.store(r,
                              mastername=obj_filenames[0],
                              masterhdu=None)



        if 1: # save spectra, variance, sn
            from libs.storage_descriptions import SKY_WVLSOL_FITS_DESC
            fn = igr_storage.get_path(SKY_WVLSOL_FITS_DESC,
                                      basenames["sky"])

            # fn = sky_path.get_secondary_path("wvlsol_v1.fits")
            f = pyfits.open(fn)

            d = np.array(s_list)
            f[0].data = d.astype("f32")

            from libs.storage_descriptions import (SPEC_FITS_DESC,
                                                   VARIANCE_FITS_DESC,
                                                   SN_FITS_DESC)

            fout = igr_storage.get_path(SPEC_FITS_DESC,
                                        tgt_basename)

            f.writeto(fout, clobber=True)


            d = np.array(v_list)
            f[0].data = d.astype("f32")
            fout = igr_storage.get_path(VARIANCE_FITS_DESC,
                                        tgt_basename)

            f.writeto(fout, clobber=True)

            d = np.array(sn_list)
            f[0].data = d.astype("f32")
            fout = igr_storage.get_path(SN_FITS_DESC,
                                        tgt_basename)

            f.writeto(fout, clobber=True)




        if 1: #
            from libs.storage_descriptions import ORDER_FLAT_JSON_DESC
            prod = igr_storage.load([ORDER_FLAT_JSON_DESC],
                                    basenames["flat_on"])[ORDER_FLAT_JSON_DESC]

            new_orders = prod["orders"]
            # fitted_response = orderflat_products["fitted_responses"]
            i1i2_list = prod["i1i2_list"]



            order_indices = []

            for o in ap.orders:
                o_new_ind = np.searchsorted(new_orders, o)
                order_indices.append(o_new_ind)


            if DO_STD:
                # a quick and dirty flattening for A0V stars

                from libs.master_calib import get_master_calib_abspath
                fn = get_master_calib_abspath("A0V/vegallpr25.50000resam5")
                d = np.genfromtxt(fn)

                wvl_a0v, flux_a0v, cont_a0v = (d[:,i] for i in [0, 1, 2])
                wvl_a0v = wvl_a0v/1000.

                wvl_limits = []
                for wvl_ in wvl_solutions:
                    wvl_limits.extend([wvl_[0], wvl_[-1]])

                dwvl = abs(wvl_[0] - wvl_[-1])*0.1 # padding

                mask_wvl1 = min(wvl_limits) - dwvl
                mask_wvl2 = max(wvl_limits) + dwvl

                #print mask_wvl1, mask_wvl2

                # if band == "H":
                #     mask_wvl1, mask_wvl2 = 1.450, 1.850
                # else:
                #     mask_wvl1, mask_wvl2 = 1.850, 2.550

                mask_igr = (mask_wvl1 < wvl_a0v) & (wvl_a0v < mask_wvl2)

                fn = get_master_calib_abspath("telluric/LBL_A15_s0_w050_R0060000_T.fits")
                telluric = pyfits.open(fn)[1].data
                telluric_lam = telluric["lam"]
                tel_mask_igr = (mask_wvl1 < telluric_lam) & (telluric_lam < mask_wvl2)
                #plot(telluric_lam[tel_mask_H], telluric["trans"][tel_mask_H])
                from scipy.interpolate import interp1d, UnivariateSpline
                # spl = UnivariateSpline(telluric_lam[tel_mask_igr],
                #                        telluric["trans"][tel_mask_igr],
                #                        k=1,s=0)

                spl = interp1d(telluric_lam[tel_mask_igr],
                               telluric["trans"][tel_mask_igr],
                               bounds_error=False
                               )

                trans = spl(wvl_a0v[mask_igr])
                # ax1.plot(wvl_a0v[mask_igr], flux[mask_igr]/cont[mask_igr]*trans,
                #          color="0.5", zorder=0.5)


                trans_m = ni.maximum_filter(trans, 128)
                trans_mg = ni.gaussian_filter(trans_m, 32)

                zzz0 = (flux_a0v/cont_a0v)[mask_igr]
                zzz = zzz0*trans
                mmm = trans/trans_mg > 0.95
                zzz[~mmm] = np.nan

                wvl_zzz = wvl_a0v[mask_igr]
                #ax2.plot(, zzz)

                # #ax2 = subplot(212)
                # if DO_STD:
                #     telluric_cor = []


                a0v_flattened = []

                for o_index, wvl, s in zip(order_indices, wvl_solutions, s_list):

                    i1, i2 = i1i2_list[o_index]
                    #sl = slice(i1, i2)
                    wvl1, wvl2 = wvl[i1], wvl[i2]
                    #wvl1, wvl2 = wvl[0], wvl[-1]
                    z_m = (wvl1 < wvl_zzz) & (wvl_zzz < wvl2)

                    wvl1, wvl2 = min(wvl), max(wvl)
                    z_m2 = (wvl1 < wvl_zzz) & (wvl_zzz < wvl2)

                    #z_m = z_m2

                    ss = interp1d(wvl, s)

                    s_interped = ss(wvl_zzz[z_m])

                    xxx, yyy = wvl_zzz[z_m], s_interped/zzz[z_m]

                    from astropy.modeling import models, fitting
                    p_init = models.Chebyshev1D(domain=[xxx[0], xxx[-1]],
                                                degree=6)
                    fit_p = fitting.LinearLSQFitter()
                    x_m = np.isfinite(yyy)
                    p = fit_p(p_init, xxx[x_m], yyy[x_m])
                    #ax2.plot(xxx, yyy)
                    #ax2.plot(xxx, p(xxx))

                    res_ = p(wvl)


                    z_interp = interp1d(wvl_zzz[z_m], zzz0[z_m],
                                        bounds_error=False)
                    A0V = z_interp(wvl)
                    #res_[res_<0.3*res_.max()] = np.nan

                    s_f = (s/res_)/A0V
                    s_f[:i1] = np.nan
                    s_f[i2:] = np.nan
                    a0v_flattened.append(s_f)


                d = np.array(a0v_flattened)
                #d[~np.isfinite(d)] = 0.
                f[0].data = d.astype("f32")

                from libs.storage_descriptions import SPEC_FITS_FLATTENED_DESC
                fout = igr_storage.get_path(SPEC_FITS_FLATTENED_DESC,
                                            tgt_basename)

                f.writeto(fout, clobber=True)

                db["a0v"].update(band, tgt_basename)
コード例 #39
0
ファイル: libbsmbh.py プロジェクト: kobyafrank/kali
	def estimate(self, observedLC):
		"""!
		Estimate intrinsicFlux, period, eccentricity, omega, tau, & a2sini 
		"""
		## intrinsicFluxEst
		maxPeriodFactor = 10.0
		model = LombScargleFast().fit(observedLC.t, observedLC.y, observedLC.yerr)
		periods, power = model.periodogram_auto(nyquist_factor = observedLC.numCadences)
		model.optimizer.period_range = (2.0*np.mean(observedLC.t[1:] - observedLC.t[:-1]), maxPeriodFactor*observedLC.T)
		periodEst = model.best_period
		numIntrinsicFlux = 100
		lowestFlux = np.min(observedLC.y[np.where(observedLC.mask == 1.0)])
		highestFlux = np.max(observedLC.y[np.where(observedLC.mask == 1.0)])
		intrinsicFlux = np.linspace(np.min(observedLC.y[np.where(observedLC.mask == 1.0)]), np.max(observedLC.y[np.where(observedLC.mask == 1.0)]), num = numIntrinsicFlux)
		intrinsicFluxList = list()
		totalIntegralList = list()
		for f in xrange(1, numIntrinsicFlux - 1):
			beamedLC = observedLC.copy()
			beamedLC.x = np.require(np.zeros(beamedLC.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
			for i in xrange(beamedLC.numCadences):
				beamedLC.y[i] = observedLC.y[i]/intrinsicFlux[f]
				beamedLC.yerr[i] = observedLC.yerr[i]/intrinsicFlux[f]
			dopplerLC = beamedLC.copy()
			dopplerLC.x = np.require(np.zeros(dopplerLC.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
			for i in xrange(observedLC.numCadences):
				dopplerLC.y[i] = math.pow(beamedLC.y[i], 1.0/3.44)
				dopplerLC.yerr[i] = (1.0/3.44)*math.fabs(dopplerLC.y[i]*(beamedLC.yerr[i]/beamedLC.y[i]))
			dzdtLC = dopplerLC.copy()
			dzdtLC.x = np.require(np.zeros(dopplerLC.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
			for i in xrange(observedLC.numCadences):
				dzdtLC.y[i] = 1.0 - (1.0/dopplerLC.y[i])
				dzdtLC.yerr[i] = math.fabs((-1.0*dopplerLC.yerr[i])/math.pow(dopplerLC.y[i], 2.0))
			foldedLC = dzdtLC.fold(periodEst)
			foldedLC.x = np.require(np.zeros(foldedLC.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
			integralSpline = UnivariateSpline(foldedLC.t[np.where(foldedLC.mask == 1.0)], foldedLC.y[np.where(foldedLC.mask == 1.0)], 1.0/foldedLC.yerr[np.where(foldedLC.mask == 1.0)], k = 3, s = None, check_finite = True)
			totalIntegral = math.fabs(integralSpline.integral(foldedLC.t[0], foldedLC.t[-1]))
			intrinsicFluxList.append(intrinsicFlux[f])
			totalIntegralList.append(totalIntegral)
		intrinsicFluxEst = intrinsicFluxList[np.where(np.array(totalIntegralList) == np.min(np.array(totalIntegralList)))[0][0]]

		## periodEst
		for i in xrange(beamedLC.numCadences):
			beamedLC.y[i] = observedLC.y[i]/intrinsicFluxEst
			beamedLC.yerr[i] = observedLC.yerr[i]/intrinsicFluxEst
			dopplerLC.y[i] = math.pow(beamedLC.y[i], 1.0/3.44)
			dopplerLC.yerr[i] = (1.0/3.44)*math.fabs(dopplerLC.y[i]*(beamedLC.yerr[i]/beamedLC.y[i]))
			dzdtLC.y[i] = 1.0 - (1.0/dopplerLC.y[i])
			dzdtLC.yerr[i] = math.fabs((-1.0*dopplerLC.yerr[i])/math.pow(dopplerLC.y[i], 2.0))
		model = LombScargleFast().fit(dzdtLC.t, dzdtLC.y, dzdtLC.yerr)
		periods, power = model.periodogram_auto(nyquist_factor = dzdtLC.numCadences)
		model.optimizer.period_range = (2.0*np.mean(dzdtLC.t[1:] - dzdtLC.t[:-1]), maxPeriodFactor*dzdtLC.T)
		periodEst = model.best_period

		## eccentricityEst & omega2Est
		# First find a full period going from rising to falling. 
		risingSpline = UnivariateSpline(dzdtLC.t[np.where(dzdtLC.mask == 1.0)], dzdtLC.y[np.where(dzdtLC.mask == 1.0)], 1.0/dzdtLC.yerr[np.where(dzdtLC.mask == 1.0)], k = 3, s = None, check_finite = True)
		risingSplineRoots = risingSpline.roots()
		firstRoot = risingSplineRoots[0]
		if risingSpline.derivatives(risingSplineRoots[0])[1] > 0.0:
			tRising = risingSplineRoots[0]
		else:
			tRising = risingSplineRoots[1]
		# Now fold the LC starting at tRising and going for a full period.
		foldedLC = dzdtLC.fold(periodEst, tStart = tRising)
		foldedLC.x = np.require(np.zeros(foldedLC.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
		# Fit the folded LC with a spline to figure out alpha and beta
		fitLC = foldedLC.copy()
		foldedSpline = UnivariateSpline(foldedLC.t[np.where(foldedLC.mask == 1.0)], foldedLC.y[np.where(foldedLC.mask == 1.0)], 1.0/foldedLC.yerr[np.where(foldedLC.mask == 1.0)], k = 3, s = 2*foldedLC.numCadences, check_finite = True)
		for i in xrange(fitLC.numCadences):
			fitLC.x[i] = foldedSpline(fitLC.t[i])
		# Now get the roots and find the falling root
		tZeros = foldedSpline.roots()
		if tZeros.shape[0] == 1: # We have found just tFalling
			tFalling = tZeros[0]
			tRising = fitLC.t[0]
			startIndex = 0
			tFull = fitLC.t[-1]
			stopIndex = fitLC.numCadences
		elif tZeros.shape[0] == 2: # We have found tFalling and one of tRising or tFull
			if foldedSpline.derivatives(tZeros[0])[1] < 0.0:
				tFalling = tZeros[0]
				tFull = tZeros[1]
				stopIndex = np.where(fitLC.t < tFull)[0][-1]
				tRising = fitLC.t[0]
				startIndex = 0
			elif foldedSpline.derivatives(tZeros[0])[1] > 0.0:
				if foldedSpline.derivatives(tZeros[1])[1] < 0.0:
					tRising = tZeros[0]
					startIndex = np.where(fitLC.t > tRising)[0][0]
					tFalling = tZeros[1]
					tFull = fitLC.t[-1]
					stopIndex = fitLC.numCadences
				else:
					raise RuntimeError('Could not determine alpha & omega correctly because the first root is rising but the second root is not falling!')
		elif tZeros.shape[0] == 3:
			tRising = tZeros[0]
			startIndex = np.where(fitLC.t > tRising)[0][0]
			tFalling = tZeros[1]
			tFull = tZeros[2]
			stopIndex = np.where(fitLC.t < tFull)[0][-1]
		else:
			raise RuntimeError('Could not determine alpha & omega correctly because tZeros has %d roots!'%(tZeros.shape[0]))
		# One full period now goes from tRising to periodEst. The maxima occurs between tRising and tFalling while the minima occurs between tFalling and tRising + periodEst  
		# Find the minima and maxima
		alpha = math.fabs(fitLC.x[np.where(np.max(fitLC.x[startIndex:stopIndex]) == fitLC.x)[0][0]])
		beta = math.fabs(fitLC.x[np.where(np.min(fitLC.x[startIndex:stopIndex]) == fitLC.x)[0][0]])
		peakLoc = fitLC.t[np.where(np.max(fitLC.x[startIndex:stopIndex]) == fitLC.x)[0][0]]
		troughLoc = fitLC.t[np.where(np.min(fitLC.x[startIndex:stopIndex]) == fitLC.x)[0][0]]
		KEst = 0.5*(alpha + beta)
		delta2 = (math.fabs(foldedSpline.integral(tRising, peakLoc)) + math.fabs(foldedSpline.integral(troughLoc, tFull)))/2.0
		delta1 = (math.fabs(foldedSpline.integral(peakLoc, tFalling)) + math.fabs(foldedSpline.integral(tFalling, troughLoc)))/2.0
		eCosOmega2 = (alpha - beta)/(alpha + beta)
		eSinOmega2 = ((2.0*math.sqrt(alpha*beta))/(alpha + beta))*((delta2 - delta1)/(delta2 + delta1))
		eccentricityEst = math.sqrt(math.pow(eCosOmega2, 2.0) + math.pow(eSinOmega2, 2.0))
		tanOmega2 = math.fabs(eSinOmega2/eCosOmega2)
		if (eCosOmega2/math.fabs(eCosOmega2) == 1.0) and (eSinOmega2/math.fabs(eSinOmega2) == 1.0):
			omega2Est = math.atan(tanOmega2)*(180.0/math.pi)
		if (eCosOmega2/math.fabs(eCosOmega2) == -1.0) and (eSinOmega2/math.fabs(eSinOmega2) == 1.0):
			omega2Est = 180.0 - math.atan(tanOmega2)*(180.0/math.pi)
		if (eCosOmega2/math.fabs(eCosOmega2) == -1.0) and (eSinOmega2/math.fabs(eSinOmega2) == -1.0):
			omega2Est = 180.0 + math.atan(tanOmega2)*(180.0/math.pi)
		if (eCosOmega2/math.fabs(eCosOmega2) == 1.0) and (eSinOmega2/math.fabs(eSinOmega2) == -1.0):
			omega2Est = 360.0 - math.atan(tanOmega2)*(180.0/math.pi)
		omega1Est = omega2Est - 180.0

		## tauEst
		zDot = KEst*(1.0 + eccentricityEst)*(eCosOmega2/eccentricityEst)
		zDotLC = dzdtLC.copy()
		for i in xrange(zDotLC.numCadences):
			zDotLC.y[i] = zDotLC.y[i] - zDot
		zDotSpline = UnivariateSpline(zDotLC.t[np.where(zDotLC.mask == 1.0)], zDotLC.y[np.where(zDotLC.mask == 1.0)], 1.0/zDotLC.yerr[np.where(zDotLC.mask == 1.0)], k = 3, s = 2*zDotLC.numCadences, check_finite = True)
		for i in xrange(zDotLC.numCadences):
			zDotLC.x[i] = zDotSpline(zDotLC.t[i])
		zDotZeros = zDotSpline.roots()
		zDotFoldedLC = dzdtLC.fold(periodEst)
		zDotFoldedSpline = UnivariateSpline(zDotFoldedLC.t[np.where(zDotFoldedLC.mask == 1.0)], zDotFoldedLC.y[np.where(zDotFoldedLC.mask == 1.0)], 1.0/zDotFoldedLC.yerr[np.where(zDotFoldedLC.mask == 1.0)], k = 3, s = 2*zDotFoldedLC.numCadences, check_finite = True)
		for i in xrange(zDotFoldedLC.numCadences):
			zDotFoldedLC.x[i] = zDotFoldedSpline(zDotFoldedLC.t[i])
		tC = zDotFoldedLC.t[np.where(np.max(zDotFoldedLC.x) == zDotFoldedLC.x)[0][0]]
		nuC = (360.0 - omega2Est)%360.0
		tE = zDotFoldedLC.t[np.where(np.min(zDotFoldedLC.x) == zDotFoldedLC.x)[0][0]]
		nuE = (180.0 - omega2Est)%360.0
		if math.fabs(360.0 - nuC) < math.fabs(360 - nuE):
			tauEst = zDotZeros[np.where(zDotZeros > tC)[0][0]]
		else:
			tauEst = zDotZeros[np.where(zDotZeros > tE)[0][0]]

		## a2sinInclinationEst
		a2sinInclinationEst = ((KEst*periodEst*self.Day*self.c*math.sqrt(1.0 - math.pow(eccentricityEst, 2.0)))/self.twoPi)/self.Parsec

		return intrinsicFluxEst, periodEst, eccentricityEst, omega1Est, tauEst, a2sinInclinationEst
def preprocess(filename, num_resamplings = 25):

	# read data
	#filename = "../data/MarieTherese_jul31_and_Aug07_all.pkl"

	pkl_file = open(filename, 'rb')
        data1 = cPickle.load(pkl_file)
        num_strokes = len(data1)

        # get the unique stroke labels, map to class labels (ints) for later using dictionary
        stroke_dict = dict()
        value_index = 0
        for i in range(0,num_strokes):
                current_key = data1[i][0]
                if current_key not in stroke_dict:
                        stroke_dict[current_key] = value_index
                        value_index = value_index + 1

        # save the dictionary to file, for later use
        dict_filename = "../data/stroke_label_mapping.pkl"
        dict_file = open(dict_filename, 'wb')
        pickle.dump(stroke_dict, dict_file)

	# - smooth data
	# 	for each stroke, get the vector of data, smooth/interpolate it over time, store sampling from smoothed signal in vector
	# - sample at regular intervals (1/30 of total time, etc.) -> input vector X


	num_params = len(data1[0][1][0]) #accelx, accely, etc.
	#num_params = 16 #accelx, accely, etc.

        # re-sample the interpolated spline this many times (25 or so seems ok, since most letters have this many points)


        # build an output array large enough to hold the vectors for each stroke and the (unicode -> int) stroke value (1 elts)
#        output_array = np.zeros((num_strokes, (num_resamplings_2 + num_resamplings) * num_params + 1))
        output_array = np.zeros((num_strokes, (5 * num_resamplings) * num_params + 1))
        print output_array.size

        print filename
        print num_params
        print num_resamplings_2
        print

	for i in range(0, num_strokes):

                # how far?
                if (i % 100 == 0):
                        print float(i)/num_strokes
	
		X_matrix = np.zeros((num_params, num_resamplings * 5)) # the array to store in (using original data and 2 derivs, 2 integrals)

                # the array to store reshaped resampled vector in
		X_2_vector_scaled = np.zeros((num_params, num_resamplings_2)) 

                # the array to store the above 2 concatenated
#		concatenated_X_X_2 = np.zeros((num_params, num_resamplings_2 + num_resamplings)) 
		concatenated_X_X_2 = np.zeros((num_params, num_resamplings * 5)) # the array to store in (using original data and 2 derivs, 2 integrals)

		# for each parameter (accelX, accelY, ...)

                # map the unicode character to int
                curr_stroke_val = stroke_dict[data1[i][0]]
                                        
                #print(len(curr_stroke))
                #print(curr_stroke[0])
                #print(curr_stroke[1])

		curr_data = data1[i][1]

                # fix if too short for interpolation - pad current data with 3 zeros
                if(len(curr_data) <= 3):
                        curr_data = np.concatenate([curr_data, np.zeros((3,num_params))])

		time = np.arange(0, len(curr_data), 1) # the sample 'times' (0 to number of samples)
		time_new = np.arange(0, len(curr_data), float(len(curr_data))/num_resamplings) # the resampled time points

		for j in range(0, num_params): # iterate through parameters

			signal = curr_data[:,j] # one signal (accelx, etc.) to interpolate
			# interpolate the signal using a spline or so, so that arbitrary points can be used 
			# (~30 seems reasonable based on data, for example)
                        
			#tck = interpolate.splrep(time, signal, s=0)  # the interpolation represenation
                        tck = UnivariateSpline(time, signal, s=0)

			# sample the interpolation num_resamplings times to get values
                        # resampled_data = interpolate.splev(time_new, tck, der=0) # the resampled data
                        resampled_data = tck(time_new)

                        # scale data (center, norm)
                        resampled_data = preprocessing.scale(resampled_data)
                        
                        # first integral
                        tck.integral = tck.antiderivative()
                        resampled_data_integral = tck.integral(time_new)

                        # scale data (center, norm)
                        resampled_data_integral = preprocessing.scale(resampled_data_integral)

                        # 2nd integral
                        tck.integral_2 = tck.antiderivative(2)
                        resampled_data_integral_2 = tck.integral_2(time_new)

                        # scale data (center, norm)
                        resampled_data_integral_2 = preprocessing.scale(resampled_data_integral_2)

                        # first deriv
                        tck.deriv = tck.derivative()
                        resampled_data_deriv = tck.deriv(time_new)

                        # scale
                        resampled_data_deriv = preprocessing.scale(resampled_data_deriv)

                        # second deriv
                        tck.deriv_2 = tck.derivative(2)
                        resampled_data_deriv_2 = tck.deriv_2(time_new)

                        #scale
                        resampled_data_deriv_2 = preprocessing.scale(resampled_data_deriv_2)


                        # concatenate into one vector
                        concatenated_resampled_data = np.concatenate((resampled_data, 
                                                                      resampled_data_integral, 
                                                                      resampled_data_integral_2, 
                                                                      resampled_data_deriv, 
                                                                      resampled_data_deriv_2))
                        
                        # store for the correct parameter, to be used later as part of inputs to SVM
 			X_matrix[j] = concatenated_resampled_data

			# while we're at it, square vector of resampled data to get a matrix, vectorize the matrix, and store
			#  for each X in list, multiply X by itself -> X_2
			#- vectorize X^2 (e.g. 10 x 10 -> 100 dimensions)
#			X_2_matrix = np.outer(concatenated_resampled_data, concatenated_resampled_data) # temp matrix for outer product
#			X_2_vector = np.reshape(X_2_matrix, -1) # reshape into a vector

			#- center and normalize X^2 by mean and standard deviation
#			X_2_vector_scaled[j] = preprocessing.scale(X_2_vector) 

			#- concatenate with input X -> 110 dimensions
#			concatenated_X_X_2[j] = np.concatenate([X_matrix[j], X_2_vector_scaled[j]])

# FOR NOW, ONLY USE X, NOT OUTER PRODUCT
			concatenated_X_X_2[j] = X_matrix[j]

                # NOTE, THIS SHOULD REALLY JUST BE A BIG VECTOR FOR EACH STROKE, SO RESHAPE BEFORE ADDING TO OUTPUT LIST
                # ALSO, THE STROKE VALUE SHOULD BE ADDED
                this_sample = np.concatenate((np.reshape(concatenated_X_X_2, -1), np.array([curr_stroke_val])))
                concatenated_samples = np.reshape(this_sample, -1)

                # ADD TO OUTPUT ARRAY
                output_array[i] = concatenated_samples
        
        print(output_array.size)
        
	return(output_array)
コード例 #41
0
ファイル: postborn.py プロジェクト: alexander-mead/CAMB
def get_field_rotation_power_from_PK(params, PK, chi_source, lmax=20000, acc=1, lsamp=None):
    results = camb.get_background(params)
    nz = int(100 * acc)
    if lmax < 3000:
        raise ValueError('field rotation assumed lmax > 3000')
    ls = np.hstack((np.arange(2, 400, 1), np.arange(401, 2600, int(10. / acc)),
                    np.arange(2650, lmax, int(50. / acc)), np.arange(lmax, lmax + 1))).astype(np.float64)

    # get grid of C_L(chi_s,k) for different redshifts
    chimaxs = np.linspace(0, chi_source, nz)
    cls = np.zeros((nz, ls.size))
    for i, chimax in enumerate(chimaxs[1:]):
        cl = cl_kappa_limber(results, PK, ls, nz, chimax)
        cls[i + 1, :] = cl
    cls[0, :] = 0
    cl_chi = RectBivariateSpline(chimaxs, ls, cls)

    # Get M(l,l') matrix
    chis = np.linspace(0, chi_source, nz, dtype=np.float64)
    zs = results.redshift_at_comoving_radial_distance(chis)
    dchis = (chis[2:] - chis[:-2]) / 2
    chis = chis[1:-1]
    zs = zs[1:-1]
    win = (1 / chis - 1 / chi_source) ** 2 / chis ** 2
    w = np.ones(chis.shape)
    cchi = cl_chi(chis, ls, grid=True)
    M = np.zeros((ls.size, ls.size))
    for i, l in enumerate(ls):
        k = (l + 0.5) / chis
        w[:] = 1
        w[k < 1e-4] = 0
        w[k >= PK.kmax] = 0
        cl = np.dot(dchis * w * PK.P(zs, k, grid=False) * win / k ** 4, cchi)
        M[i, :] = cl * l ** 4  # note we don't attempt to be accurate beyond lowest Limber
    Mf = RectBivariateSpline(ls, ls, np.log(M))

    # L sampling for output
    if lsamp is None:
        lsamp = np.hstack((np.arange(2, 20, 2), np.arange(25, 200, 10 // acc), np.arange(220, 1200, 30 // acc),
                           np.arange(1300, min(lmax // 2, 2600), 150 // acc),
                           np.arange(3000, lmax // 2 + 1, 1000 // acc)))

    # Get field rotation (curl) spectrum.
    diagm = np.diag(M)
    diagmsp = UnivariateSpline(ls, diagm, s=0)

    def high_curl_integrand(ll, lp):
        lp = lp.astype(np.int)
        r2 = (np.float64(ll) / lp) ** 2
        return lp * r2 * diagmsp(lp) / np.pi

    clcurl = np.zeros(lsamp.shape)
    lsall = np.arange(2, lmax + 1, dtype=np.float64)

    for i, ll in enumerate(lsamp):

        l = np.float64(ll)
        lmin = lsall[0]
        lpmax = min(lmax, int(max(1000, l * 2)))
        if ll < 500:
            lcalc = lsall[0:lpmax - 2]
        else:
            # sampling in l', with denser around l~l'
            lcalc = np.hstack((lsall[0:20:4],
                               lsall[29:ll - 200:35],
                               lsall[ll - 190:ll + 210:6],
                               lsall[ll + 220:lpmax + 60:60]))

        tmps = np.zeros(lcalc.shape)
        for ix, lp in enumerate(lcalc):
            llp = int(lp)
            lp = np.float64(lp)
            if abs(ll - llp) > 200 and lp > 200:
                nphi = 2 * int(min(lp / 10 * acc, 200)) + 1
            elif ll > 2000:
                nphi = 2 * int(lp / 10 * acc) + 1
            else:
                nphi = 2 * int(lp) + 1
            dphi = 2 * np.pi / nphi
            phi = np.linspace(dphi, (nphi - 1) / 2 * dphi, (nphi - 1) // 2)  # even and don't need zero
            w = 2 * np.ones(phi.size)
            cosphi = np.cos(phi)
            lrat = lp / l
            lfact = np.sqrt(1 + lrat ** 2 - 2 * cosphi * lrat)
            lnorm = l * lfact
            lfact[lfact <= 0] = 1
            w[lnorm < lmin] = 0
            w[lnorm > lmax] = 0

            lnorm = np.maximum(lmin, np.minimum(lmax, lnorm))
            tmps[ix] += lp * np.dot(w, (np.sin(phi) / lfact ** 2 * (cosphi - lrat)) ** 2 *
                                    np.exp(Mf(lnorm, lp, grid=False))) * dphi

        sp = UnivariateSpline(lcalc, tmps, s=0)
        clcurl[i] = sp.integral(2, lpmax - 1) * 4 / (2 * np.pi) ** 2

        if lpmax < lmax:
            tail = np.sum(high_curl_integrand(ll, lsall[lpmax - 2:]))
            clcurl[i] += tail

    return lsamp, clcurl
コード例 #42
0
ファイル: GeneralModel.py プロジェクト: lowks/OpOpGadget
class GeneralModel(Model.Model):

    def __init__(self,R,dens,rc=1,Mmax=1, G='kpc km2 / (M_sun s2)', denorm=True, use_c=False):
        """
        The purpose of the general model is to start from a density law R-dens to build a galaxy model.
        Attenzione per come è creato il modello assume sempre che
        per R>rmax la densita sia 0, la massa resti costante al suo valore massimo e il potenziale vada
        come M/r. Per modelli che raggiungono la massa massima all infinito questo potrebbe essere un problema,
        quindi si dovrebbero usare modelli con massa finita o troncarli e campionarli fino a quanto la massa non raggiunge
        il suo valore max. Per modelli non troncati è meglio utilizzare modelli analitici se possibile.
        Anche nel calcolo del potenziale Rinf è settato uguale all ultimo punto di R, poichè cmq per R>Rmax
        dens=0 e l integrale int_Rmax^inf dens r dr=0 sempre.
        :param R: list of radii, it needs to  be in the form  r/rc
        :param dens: list of dens at radii R. It can be also a function or a lambda function that depends
                     only on the variable R=r/rc
        :param rc: Scale length of the model, the R in input will be multiplyed by rc before start all the calculation
        :param Mmax: Physical Value of the Mass at Rmax (the last point of the R grid). The physical unity of dens and pot and mass
               will depends on the unity of Mmax
        :param G: Value of the gravitational constant G, it can be a number of a string.
                    If G=1, the physical value of the potential will be Phi/G.
                    If string it must follow the rule of the unity of the module.astropy constants.
                    E.g. to have G in unit of kpc3/Msun s2, the input string is 'kpc3 / (M_sun s2)'
                    See http://astrofrog-debug.readthedocs.org/en/latest/constants/
        :param denorm: If True, the output value of mass, dens and pot will be de normalized using Mmax and G.
        :param use_c: To calculate pot and mass with a C-cyle, WARNING it creates more noisy results
        """

        self.rc=rc
        self.Mmax=Mmax
        if isinstance(G,float) or isinstance(G,int): self.G=G
        else:
            GG=conG.to(G)
            self.G=GG.value


        if isinstance(dens,list) or isinstance(dens,tuple) or isinstance(dens,np.ndarray):  self.dens_arr=np.array(dens,dtype=float,order='C')
        else:
            self.dens_arr=dens(R)

        self.R=np.array(R,dtype=float,order='C')*self.rc
        self.mass_arr=np.empty_like(self.dens_arr,dtype=float,order='C')
        self.pot_arr=np.empty_like(self.dens_arr,dtype=float,order='C')
        self.use_c=use_c
        self._use_nparray=False

        self._dens=UnivariateSpline(self.R,self.dens_arr, k=1, s=0, ext=1) #for R>rmax, dens=0

        if self.use_c==True:
            #add to path to use relative path
            dll_name='model_c_ext/GeneralModel.so'
            dllabspath = os.path.dirname(os.path.abspath(__file__)) + os.path.sep + dll_name
            lib = ct.CDLL(dllabspath)
            #add to path to use relativ path
            mass_func=lib.evalmass
            mass_func.restype=None
            mass_func.argtypes=[ndpointer(ct.c_double, flags="C_CONTIGUOUS"), ndpointer(ct.c_double, flags="C_CONTIGUOUS"),ct.c_int,ndpointer(ct.c_double, flags="C_CONTIGUOUS")]
            mass_func(self.R,self.dens_arr,len(self.dens_arr),self.mass_arr)
            self._mass_int=UnivariateSpline(self.R,self.mass_arr, k=1, s=0, ext=3) #ext=3, const for R>Rmax non ci osno piu particelle e la massa rimane uguale



            pot_func=lib.evalpot
            pot_func.restype=None
            pot_func.argtypes=[ndpointer(ct.c_double, flags="C_CONTIGUOUS"),ndpointer(ct.c_double, flags="C_CONTIGUOUS"),ndpointer(ct.c_double, flags="C_CONTIGUOUS"),ct.c_int,ndpointer(ct.c_double, flags="C_CONTIGUOUS")]
            pot_func(self.R,self.dens_arr,self.mass_arr,len(self.dens_arr),self.pot_arr)
            self._pot_int=UnivariateSpline(self.R,self.pot_arr, k=1, s=0, ext=1)



        else:
            self._dm2=UnivariateSpline(self.R,self.R*self.R*self.dens_arr, k=2, s=0,ext=1)
            self._dm=UnivariateSpline(self.R,self.R*self.dens_arr, k=1, s=0,ext=1)


            #Evaluate mass and pot on the R grid in input
            #mass
            func=np.vectorize(self._dm2.integral)
            self.mass_arr=func(0,self.R)

            #pot
            a=(1/self.R)*self.mass_arr
            func=np.vectorize(self._dm.integral)
            b=func(self.R,self.R[-1])
            self.pot_arr=a+b

        if denorm==True: self._set_denorm(self.Mmax)
        else:
            self.Mc=1
            self.dc=1
            self.pc=1

    def _evaluatedens(self,R):
        return self.dc*self._dens(R)

    def _evaluatemass(self,R):
        return self.Mc*self._dm2.integral(0,R)

    def _evaluatemassc(self,R):

        return self.Mc*self._mass_int(R)

    def _evaluatepot(self,R):
            """
            NB specific potential=-Phi
            :param R:
            :return:
            """

            a=(1/R)*self._dm2.integral(0,R)
            b=self._dm.integral(R,self.R[-1])

            return self.pc*(a+b)

    def _evaluatepotc(self,R):
            """
            A differenza di evaluatepot, in questo caso per R>Rmax non
            abbiamo valori, per cui dividiamo in due parti per R<Rmax
            usiamo l 'array calcolato, mentre per R>Rmax, usiamo semplicemente
            Phi=M/R dato che a questi raggi non c è piu materia e tutta la massa
            totale sottesa è l ultimo valore della griglia.
            NB specific potential=-Phi
            :param R:
            :return:
            """

            ret_arr=np.where(R<=self.R[-1],self._pot_int(R),self.mass_arr[-1]/R)
            return self.pc*ret_arr

    def _evaluateradius(self,x,x_type='mass'):

        if x_type=='mass': ret_func=interp1d(self.mass_arr,self.R, kind=linear)
        if x_type=='pot': ret_func=interp1d(self.pot_arr,self.R, kind=linear) #we use this beacuse Univariate spline can have problem if some value on x are equals

        return ret_func(x)

    def _set_denorm(self,Mmax):
        self.Mc=Mmax/self.mass_arr[-1]
        self.dc=self.Mc/(4*np.pi)
        self.pc=self.G*self.Mc
コード例 #43
0
ファイル: mbhb.py プロジェクト: AstroVPK/kali
    def estimate(self, observedLC):
        """!
        Estimate intrinsicFlux, period, eccentricity, omega, tau, & a2sini
        """
        # fluxEst
        if observedLC.numCadences > 50:
            model = gatspy.periodic.LombScargleFast(optimizer_kwds={"quiet": True}).fit(observedLC.t,
                                                                                        observedLC.y,
                                                                                        observedLC.yerr)
        else:
            model = gatspy.periodic.LombScargle(optimizer_kwds={"quiet": True}).fit(observedLC.t,
                                                                                    observedLC.y,
                                                                                    observedLC.yerr)
        periods, power = model.periodogram_auto(nyquist_factor=observedLC.numCadences)
        model.optimizer.period_range = (
            2.0*np.mean(observedLC.t[1:] - observedLC.t[:-1]), observedLC.T)
        periodEst = model.best_period
        numIntrinsicFlux = 100
        lowestFlux = np.min(observedLC.y[np.where(observedLC.mask == 1.0)])
        highestFlux = np.max(observedLC.y[np.where(observedLC.mask == 1.0)])
        intrinsicFlux = np.linspace(np.min(observedLC.y[np.where(observedLC.mask == 1.0)]), np.max(
            observedLC.y[np.where(observedLC.mask == 1.0)]), num=numIntrinsicFlux)
        intrinsicFluxList = list()
        totalIntegralList = list()
        for f in xrange(1, numIntrinsicFlux - 1):
            beamedLC = observedLC.copy()
            beamedLC.x = np.require(np.zeros(beamedLC.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
            for i in xrange(beamedLC.numCadences):
                beamedLC.y[i] = observedLC.y[i]/intrinsicFlux[f]
                beamedLC.yerr[i] = observedLC.yerr[i]/intrinsicFlux[f]
            dopplerLC = beamedLC.copy()
            dopplerLC.x = np.require(np.zeros(dopplerLC.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
            for i in xrange(observedLC.numCadences):
                dopplerLC.y[i] = math.pow(beamedLC.y[i], 1.0/3.44)
                dopplerLC.yerr[i] = (1.0/3.44)*math.fabs(dopplerLC.y[i]*(beamedLC.yerr[i]/beamedLC.y[i]))
            dzdtLC = dopplerLC.copy()
            dzdtLC.x = np.require(np.zeros(dopplerLC.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
            for i in xrange(observedLC.numCadences):
                dzdtLC.y[i] = 1.0 - (1.0/dopplerLC.y[i])
                dzdtLC.yerr[i] = math.fabs((-1.0*dopplerLC.yerr[i])/math.pow(dopplerLC.y[i], 2.0))
            foldedLC = dzdtLC.fold(periodEst)
            foldedLC.x = np.require(np.zeros(foldedLC.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
            integralSpline = UnivariateSpline(
                foldedLC.t[np.where(foldedLC.mask == 1.0)], foldedLC.y[np.where(foldedLC.mask == 1.0)],
                1.0/foldedLC.yerr[np.where(foldedLC.mask == 1.0)], k=3, s=None, check_finite=True)
            totalIntegral = math.fabs(integralSpline.integral(foldedLC.t[0], foldedLC.t[-1]))
            intrinsicFluxList.append(intrinsicFlux[f])
            totalIntegralList.append(totalIntegral)
        fluxEst = intrinsicFluxList[
            np.where(np.array(totalIntegralList) == np.min(np.array(totalIntegralList)))[0][0]]

        # periodEst
        for i in xrange(beamedLC.numCadences):
            beamedLC.y[i] = observedLC.y[i]/fluxEst
            beamedLC.yerr[i] = observedLC.yerr[i]/fluxEst
            dopplerLC.y[i] = math.pow(beamedLC.y[i], 1.0/3.44)
            dopplerLC.yerr[i] = (1.0/3.44)*math.fabs(dopplerLC.y[i]*(beamedLC.yerr[i]/beamedLC.y[i]))
            dzdtLC.y[i] = 1.0 - (1.0/dopplerLC.y[i])
            dzdtLC.yerr[i] = math.fabs((-1.0*dopplerLC.yerr[i])/math.pow(dopplerLC.y[i], 2.0))
            if observedLC.numCadences > 50:
                model = gatspy.periodic.LombScargleFast(optimizer_kwds={"quiet": True}).fit(dzdtLC.t,
                                                                                            dzdtLC.y,
                                                                                            dzdtLC.yerr)
            else:
                model = gatspy.periodic.LombScargle(optimizer_kwds={"quiet": True}).fit(dzdtLC.t,
                                                                                        dzdtLC.y,
                                                                                        dzdtLC.yerr)
        periods, power = model.periodogram_auto(nyquist_factor=dzdtLC.numCadences)
        model.optimizer.period_range = (2.0*np.mean(dzdtLC.t[1:] - dzdtLC.t[:-1]), dzdtLC.T)
        periodEst = model.best_period

        # eccentricityEst & omega2Est
        # First find a full period going from rising to falling.
        risingSpline = UnivariateSpline(
            dzdtLC.t[np.where(dzdtLC.mask == 1.0)], dzdtLC.y[np.where(dzdtLC.mask == 1.0)],
            1.0/dzdtLC.yerr[np.where(dzdtLC.mask == 1.0)], k=3, s=None, check_finite=True)
        risingSplineRoots = risingSpline.roots()
        firstRoot = risingSplineRoots[0]
        if risingSpline.derivatives(risingSplineRoots[0])[1] > 0.0:
            tRising = risingSplineRoots[0]
        else:
            tRising = risingSplineRoots[1]
        # Now fold the LC starting at tRising and going for a full period.
        foldedLC = dzdtLC.fold(periodEst, tStart=tRising)
        foldedLC.x = np.require(np.zeros(foldedLC.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
        # Fit the folded LC with a spline to figure out alpha and beta
        fitLC = foldedLC.copy()
        foldedSpline = UnivariateSpline(
            foldedLC.t[np.where(foldedLC.mask == 1.0)], foldedLC.y[np.where(foldedLC.mask == 1.0)],
            1.0/foldedLC.yerr[np.where(foldedLC.mask == 1.0)], k=3, s=2*foldedLC.numCadences,
            check_finite=True)
        for i in xrange(fitLC.numCadences):
            fitLC.x[i] = foldedSpline(fitLC.t[i])
        # Now get the roots and find the falling root
        tZeros = foldedSpline.roots()

        # Find tRising, tFalling, tFull, startIndex, & stopIndex via DBSCAN #######################
        # Find the number of clusters
        '''dbsObj = DBSCAN(eps = periodEst/10.0, min_samples = 1)
        db = dbsObj.fit(tZeros.reshape(-1,1))
        core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
        core_samples_mask[db.core_sample_indices_] = True
        labels = db.labels_
        unique_labels = set(labels)
        n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)'''

        # Find tRising, tFalling, tFull, startIndex, & stopIndex
        if tZeros.shape[0] == 1:  # We have found just tFalling
            tFalling = tZeros[0]
            tRising = fitLC.t[0]
            startIndex = 0
            tFull = fitLC.t[-1]
            stopIndex = fitLC.numCadences
        elif tZeros.shape[0] == 2:  # We have found tFalling and one of tRising or tFull
            if foldedSpline.derivatives(tZeros[0])[1] < 0.0:
                tFalling = tZeros[0]
                tFull = tZeros[1]
                stopIndex = np.where(fitLC.t < tFull)[0][-1]
                tRising = fitLC.t[0]
                startIndex = 0
            elif foldedSpline.derivatives(tZeros[0])[1] > 0.0:
                if foldedSpline.derivatives(tZeros[1])[1] < 0.0:
                    tRising = tZeros[0]
                    startIndex = np.where(fitLC.t > tRising)[0][0]
                    tFalling = tZeros[1]
                    tFull = fitLC.t[-1]
                    stopIndex = fitLC.numCadences
                else:
                    raise RuntimeError(
                        'Could not determine alpha & omega correctly because the first root is rising but \
                        the second root is not falling!')
        elif tZeros.shape[0] == 3:
            tRising = tZeros[0]
            startIndex = np.where(fitLC.t > tRising)[0][0]
            tFalling = tZeros[1]
            tFull = tZeros[2]
            stopIndex = np.where(fitLC.t < tFull)[0][-1]
        else:
            # More than 3 roots!!! Use K-Means to cluster the roots assuming we have 3 groups
            root_groups = KMeans(n_clusters=3).fit_predict(tZeros.reshape(-1, 1))
            RisingGroupNumber = root_groups[0]
            FullGroupNumber = root_groups[-1]
            RisingSet = set(root_groups[np.where(root_groups != RisingGroupNumber)[0]])
            FullSet = set(root_groups[np.where(root_groups != FullGroupNumber)[0]])
            FallingSet = RisingSet.intersection(FullSet)
            FallingGroupNumber = FallingSet.pop()
            numRisingRoots = np.where(root_groups == RisingGroupNumber)[0].shape[0]
            numFallingRoots = np.where(root_groups == FallingGroupNumber)[0].shape[0]
            numFullRoots = np.where(root_groups == FullGroupNumber)[0].shape[0]

            if numRisingRoots == 1:
                tRising = tZeros[np.where(root_groups == RisingGroupNumber)[0]][0]
            else:
                RisingRootCands = tZeros[np.where(root_groups == RisingGroupNumber)[0]]
                for i in xrange(RisingRootCands.shape[0]):
                    if foldedSpline.derivatives(RisingRootCands[i])[1] > 0.0:
                        tRising = RisingRootCands[i]
                        break

            if numFallingRoots == 1:
                tFalling = tZeros[np.where(root_groups == FallingGroupNumber)[0]][0]
            else:
                FallingRootCands = tZeros[np.where(root_groups == FallingGroupNumber)[0]]
                for i in xrange(FallingRootCands.shape[0]):
                    if foldedSpline.derivatives(FallingRootCands[i])[1] < 0.0:
                        tFalling = FallingRootCands[i]
                        break

            if numFullRoots == 1:
                tFull = tZeros[np.where(root_groups == FullGroupNumber)[0]][0]
            else:
                FullRootCands = tZeros[np.where(root_groups == FullGroupNumber)[0]]
                for i in xrange(FullRootCands.shape[0]):
                    if foldedSpline.derivatives(FullRootCands[i])[1] > 0.0:
                        tFull = FullRootCands[i]
                        break
            startIndex = np.where(fitLC.t > tRising)[0][0]
            stopIndex = np.where(fitLC.t < tFull)[0][-1]
        #

        # One full period now goes from tRising to periodEst. The maxima occurs between tRising and tFalling
        # while the minima occurs between tFalling and tRising + periodEst. Find the minima and maxima
        alpha = math.fabs(fitLC.x[np.where(np.max(fitLC.x[startIndex:stopIndex]) == fitLC.x)[0][0]])
        beta = math.fabs(fitLC.x[np.where(np.min(fitLC.x[startIndex:stopIndex]) == fitLC.x)[0][0]])
        peakLoc = fitLC.t[np.where(np.max(fitLC.x[startIndex:stopIndex]) == fitLC.x)[0][0]]
        troughLoc = fitLC.t[np.where(np.min(fitLC.x[startIndex:stopIndex]) == fitLC.x)[0][0]]
        KEst = 0.5*(alpha + beta)
        delta2 = (math.fabs(foldedSpline.integral(tRising, peakLoc)) + math.fabs(
            foldedSpline.integral(troughLoc, tFull)))/2.0
        delta1 = (math.fabs(foldedSpline.integral(peakLoc, tFalling)) + math.fabs(
            foldedSpline.integral(tFalling, troughLoc)))/2.0
        eCosOmega2 = (alpha - beta)/(alpha + beta)
        eSinOmega2 = ((2.0*math.sqrt(alpha*beta))/(alpha + beta))*((delta2 - delta1)/(delta2 + delta1))
        eccentricityEst = math.sqrt(math.pow(eCosOmega2, 2.0) + math.pow(eSinOmega2, 2.0))
        tanOmega2 = math.fabs(eSinOmega2/eCosOmega2)
        if (eCosOmega2/math.fabs(eCosOmega2) == 1.0) and (eSinOmega2/math.fabs(eSinOmega2) == 1.0):
            omega2Est = math.atan(tanOmega2)*(180.0/math.pi)
        if (eCosOmega2/math.fabs(eCosOmega2) == -1.0) and (eSinOmega2/math.fabs(eSinOmega2) == 1.0):
            omega2Est = 180.0 - math.atan(tanOmega2)*(180.0/math.pi)
        if (eCosOmega2/math.fabs(eCosOmega2) == -1.0) and (eSinOmega2/math.fabs(eSinOmega2) == -1.0):
            omega2Est = 180.0 + math.atan(tanOmega2)*(180.0/math.pi)
        if (eCosOmega2/math.fabs(eCosOmega2) == 1.0) and (eSinOmega2/math.fabs(eSinOmega2) == -1.0):
            omega2Est = 360.0 - math.atan(tanOmega2)*(180.0/math.pi)
        if omega2Est >= 180.0:
            omega1Est = omega2Est - 180.0
        if omega2Est < 180.0:
            omega1Est = omega2Est + 180.0

        # tauEst
        zDot = KEst*(1.0 + eccentricityEst)*(eCosOmega2/eccentricityEst)
        zDotLC = dzdtLC.copy()
        for i in xrange(zDotLC.numCadences):
            zDotLC.y[i] = zDotLC.y[i] - zDot
        zDotSpline = UnivariateSpline(
            zDotLC.t[np.where(zDotLC.mask == 1.0)], zDotLC.y[np.where(zDotLC.mask == 1.0)],
            1.0/zDotLC.yerr[np.where(zDotLC.mask == 1.0)], k=3, s=2*zDotLC.numCadences, check_finite=True)
        for i in xrange(zDotLC.numCadences):
            zDotLC.x[i] = zDotSpline(zDotLC.t[i])
        zDotZeros = zDotSpline.roots()
        zDotFoldedLC = dzdtLC.fold(periodEst)
        zDotFoldedSpline = UnivariateSpline(
            zDotFoldedLC.t[np.where(zDotFoldedLC.mask == 1.0)],
            zDotFoldedLC.y[np.where(zDotFoldedLC.mask == 1.0)],
            1.0/zDotFoldedLC.yerr[np.where(zDotFoldedLC.mask == 1.0)], k=3, s=2*zDotFoldedLC.numCadences,
            check_finite=True)
        for i in xrange(zDotFoldedLC.numCadences):
            zDotFoldedLC.x[i] = zDotFoldedSpline(zDotFoldedLC.t[i])
        tC = zDotFoldedLC.t[np.where(np.max(zDotFoldedLC.x) == zDotFoldedLC.x)[0][0]]
        nuC = (360.0 - omega2Est)%360.0
        tE = zDotFoldedLC.t[np.where(np.min(zDotFoldedLC.x) == zDotFoldedLC.x)[0][0]]
        nuE = (180.0 - omega2Est)%360.0
        if math.fabs(360.0 - nuC) < math.fabs(360 - nuE):
            tauEst = zDotZeros[np.where(zDotZeros > tC)[0][0]]
        else:
            tauEst = zDotZeros[np.where(zDotZeros > tE)[0][0]]
        tauEst = tauEst%periodEst

        # a2sinInclinationEst
        a2sinInclinationEst = ((KEst*periodEst*self.Day*self.c*math.sqrt(1.0 - math.pow(
            eccentricityEst, 2.0)))/self.twoPi)/self.Parsec

        return fluxEst, periodEst, eccentricityEst, omega1Est, tauEst, a2sinInclinationEst
    if(len(curr_data) <= 3):
        curr_data = np.concatenate([curr_data, np.zeros((3,num_params))])

    time = np.arange(0, len(curr_data), 1) # the sample 'times' (0 to number of samples)

    acc_X = curr_data[:,0]
    acc_Y = curr_data[:,1]
    acc_Z = curr_data[:,2]

    # fit 2nd the antiderivative

    # the interpolation representation
    tck_X = UnivariateSpline(time, acc_X, s=0)

    # integrals
    tck_X.integral = tck_X.antiderivative()
    tck_X.integral_2 = tck_X.antiderivative(2)

    # the interpolation representation
    tck_Y = UnivariateSpline(time, acc_Y, s=0)

    # integrals
    tck_Y.integral = tck_Y.antiderivative()
    tck_Y.integral_2 = tck_Y.antiderivative(2)

    # the interpolation representation
    tck_Z = UnivariateSpline(time, acc_Z, s=0)

    # integrals
    tck_Z.integral = tck_Z.antiderivative()
    tck_Z.integral_2 = tck_Z.antiderivative(2)
コード例 #45
0
def find_spline_maxima(xi,yi,min_normpeak=0.05,min_area_k=0.05):
#==============================================================================
#     description
#     extracts peaks from the gaussian_kse function,
#     such that peaks have a minimum normalized peak height and a minimum bound area     
#     
#     inputs
#     xi,yi           points corresponding to the gaussian_kde function of the data
#     min_normpeak    minimum normalized peak of the gaussian_kde function
#     min_area_k      proportional constant multiplied to the maximum bound area to compute the minimum bound area    
#     
#     output
#     peaks[x,y]      the peak locations [x] and heights [y] of the gaussian_kde function
#==============================================================================
    
    #setting gaussian_kde points as spline    
    s0=UnivariateSpline(xi,yi,s=0)
    
    try:
        #first derivative (for gettinge extrema)        
        dy=s0(xi,1)
        s1=UnivariateSpline(xi,dy,s=0)
        
        #second derivative (for getting inflection points)
        dy2=s1(xi,1)
        s2=UnivariateSpline(xi,dy2,s=0)
        
        #solving for extrema, maxima, and inflection points
        extrema=s1.roots()
        maxima=np.sort(extrema[(s2(extrema)<0)])
        inflection=np.sort(s2.roots())
        
        try:
            #setting up dataframe for definite integrals with inflection points as bounds            
            df_integ=pd.DataFrame()
            df_integ['lb']=inflection[:-1]
            df_integ['ub']=inflection[1:]

            #assigning maxima to specific ranges
            df_integ['maxloc']=np.nan            
            for i in range(len(df_integ)):
                try:
                    len((maxima>df_integ['lb'][i])*(maxima<df_integ['ub'][i]))>0
                    df_integ['maxloc'][i]=maxima[(maxima>df_integ['lb'][i])*(maxima<df_integ['ub'][i])]
                except:
                    continue
            
            #filtering maxima based on peak height and area
            df_integ.dropna(inplace=True)
            df_integ['maxpeak']=s0(df_integ['maxloc'])
            df_integ=df_integ[df_integ['maxpeak']>0.001]
            df_integ['normpeak']=s0(df_integ['maxpeak'])/s0(df_integ['maxpeak'].values.max())
            df_integ['area']=df_integ.apply(lambda x: s0.integral(x[0],x[1]), axis = 1)
            df_integ=df_integ[(df_integ['area']>min_area_k*df_integ['area'].values.max())*(df_integ['normpeak']>min_normpeak)]
            return df_integ['maxloc'],df_integ['maxpeak']#,inflection[df_integ.index],s0(inflection[df_integ.index])

        except:
            #filtering maxima based on peak height only
            maxima=extrema[(s2(extrema)<0)*(s0(extrema)/s0(extrema).max()>min_normpeak)]
            return maxima,s0(maxima)#,inflection,s0(inflection)
    
    except:
        #unimodal kde
        return xi[np.argmax(yi)],yi.max()#,None,None
コード例 #46
0
 def length(self):
     der1 = self.__call__(self._param, nu=1)
     dl = sqrt((der1 ** 2).sum(axis=1))
     spl = UnivariateSpline(self._param, dl, k=self._degree, s=0.0)
     return spl.integral(self._param.min(), self._param.max())