예제 #1
0
def normalize(x_th, y_th, x_exp, y_exp):
    f_th = InterpolatedUnivariateSpline(x_th, y_th, k=1)
    f_exp = InterpolatedUnivariateSpline(x_exp, y_exp, k=2)

    a_th = f_th.integral(x_th[0], x_th[len(x_th) - 1])
    a_exp = f_exp.integral(x_exp[0], x_exp[len(x_exp) - 1])

    return float(a_exp / a_th)
예제 #2
0
파일: lick.py 프로젝트: kadubarbosa/groups
 def classic_integration(self):
     k_order=10.
     # Define number of points for integration
     npoints = 2**k_order + 1
     self.R = np.zeros(self.bands.shape[0])
     self.Ia = np.zeros_like(self.R)
     self.Im = np.zeros_like(self.R)
     for i, w in enumerate(self.bands):
         if (w[0] - self.dw < self.wave[0]) or \
            (w[-1] + self.dw > self.wave[-1]):
             self.R[i] = np.nan
         # Defining indices for each section
         idxb = np.where(((self.wave > w[0] - 2 * self.dw) &
                              (self.wave < w[1] + 2 * self.dw)))
         idxr = np.where(((self.wave > w[4] - 2 * self.dw) &
                             (self.wave < w[5] + 2 * self.dw)))
         idxcen = np.where(((self.wave > w[2] - 2 * self.dw) &
                             (self.wave < w[3] + 2 * self.dw)))
         # Defining wavelenght samples
         wb = self.wave[idxb]
         wr = self.wave[idxr]
         wcen = self.wave[idxcen]
         # Defining intensity samples
         fb = self.galaxy[idxb]
         fr = self.galaxy[idxr]
         fcen = self.galaxy[idxcen]
         # Making interpolation functions
         sb = InterpolatedUnivariateSpline(wb, fb)
         sr = InterpolatedUnivariateSpline(wr, fr)
         # Make oversampled arrays of wavelenghts
         xb = np.linspace(w[0], w[1], npoints)
         xr = np.linspace(w[4], w[5], npoints)
         xcen = np.linspace(w[2], w[3], npoints)
         # Calculating the mean fluxes for the pseudocontinuum
         fp1 = sb.integral(w[0], w[1]) / (w[1] - w[0])
         fp2 = sr.integral(w[4], w[5]) / (w[5] - w[4])
         # Making pseudocontinuum vector
         x0 = (w[2] + w[3])/2.
         x1 = (w[0] + w[1])/2.
         x2 = (w[4] + w[5])/2.
         fc = fp1 + (fp2 - fp1)/ (x2 - x1) * (wcen - x1)
         # Calculating indices
         ffc = InterpolatedUnivariateSpline(wcen, fcen/fc/(w[3]-w[2]))
         self.R[i] =  ffc.integral(w[2], w[3])
         self.Ia[i] = (1 - self.R[i]) * (w[3]-w[2])
         self.Im[i] = -2.5 * np.log10(self.R[i])
     self.classic = np.copy(self.Ia)
     idx = np.array([2,3,14,15,23,24])
     self.classic[idx] = self.Im[idx]
     return
예제 #3
0
    def cost_curve() -> InterpolatedUnivariateSpline:
        try:
            interp = getattr(CostModel, "__cost_curve")
        except AttributeError:

            points = np.array([
                [0, 1000 / 25000],
                [12500, 1400 / 25000],
                [37500, 2200 / 25000],
                [62500, 3000 / 25000],
                [87500, 3900 / 25000],
                [112500, 4100 / 25000],
                [137500, 4400 / 25000],
                [162500, 4800 / 25000],
                [187500, 5000 / 25000],
                # fake numbers below roughly extrapolating
                [300000, 6000 / 25000],
                [400000, 7000 / 25000],
                [500000, 8000 / 25000],
            ])
            base_interp = InterpolatedUnivariateSpline(points[:, 0], points[:,
                                                                            1])
            norm = base_interp.integral(0, 75000)

            points[:, 1] /= norm
            interp = InterpolatedUnivariateSpline(points[:, 0], points[:, 1])

            setattr(CostModel, "__cost_curve", interp)

        return interp
예제 #4
0
    def __init__(self,
                 x_points,
                 pdf_points,
                 transform='interval',
                 *args,
                 **kwargs):
        if transform == 'interval':
            transform = transforms.interval(x_points[0], x_points[-1])
        super(Interpolated, self).__init__(transform=transform,
                                           *args,
                                           **kwargs)

        interp = InterpolatedUnivariateSpline(x_points,
                                              pdf_points,
                                              k=1,
                                              ext='zeros')
        Z = interp.integral(x_points[0], x_points[-1])

        self.Z = tt.as_tensor_variable(Z)
        self.interp_op = DifferentiableSplineWrapper(interp)
        self.x_points = x_points
        self.pdf_points = pdf_points / Z
        self.cdf_points = interp.antiderivative()(x_points) / Z

        self.median = self._argcdf(0.5)
def get_args(i):
    Ms = np.array([])
    bs = np.array([])
    tbs = np.array([])
    pd = np.array([])
    pde = np.array([])
    bes = np.array([])
    icovs = np.array([])
    boxes = np.array([])
    snaps = np.array([])
    cosmo, h, Omega_m = get_cosmo(i)
    hmf = mf_obj(i)
    k = np.logspace(-5, 1, num=1000) #Mpc^-1
    kh = k/h
    nus = [] #sigma^2
    for j in range(0,10): #snap
        z = zs[j]
        M, Mlo, Mhigh, b, be = np.loadtxt("/Users/tmcclintock/Data/linear_bias_test/TestBox%03d-combined_Z%d_DS50_linearbias.txt"%(i,j)).T
        M = np.ascontiguousarray(M)
        Mlo = np.ascontiguousarray(Mlo)
        Mhigh = np.ascontiguousarray(Mhigh)
        inds = Mhigh > 1e99
        Mhigh[inds] = 1e16
        p = np.array([cosmo.pk_lin(ki, z) for ki in k])*h**3
        nu = ph.nu_at_M(M, kh, p, Omega_m)

        #Replace this part with the average bias
        Mbins = np.array([Mlo, Mhigh]).T
        n_bins = hmf.n_in_bins(Mbins, z) #Denominator

        Marr = np.logspace(np.log10(M[0]*0.98), 16, 1000)
        lMarr = np.log(Marr)
        nuarr = ph.nu_at_M(Marr, kh, p, Omega_m)
        dndlm = hmf.dndlM(Marr, z)
        b_n = dndlm * ct.bias.bias_at_nu(nuarr)
        b_n_spl = IUS(lMarr, b_n)
        lMbins = np.log(Mbins)
        tb = np.zeros_like(nu)
        for ind in range(len(tb)):
            tbi = quad(b_n_spl, lMbins[ind,0], lMbins[ind,1])
            tbi2 = b_n_spl.integral(lMbins[ind,0], lMbins[ind,1])
            #print tbi[0]/n_bins[ind], tbi2/n_bins[ind]
            tb[ind] = tbi[0] / n_bins[ind]
        #print b
        #exit()
        #print tb
        #print ct.bias.bias_at_nu(nu) #instantaneous tinker bias
        #tb = ct.bias.bias_at_nu(nu) #instantaneous tinker bias

        tbs = np.concatenate((tbs, tb))

        Ms=np.concatenate((Ms, M))
        bs=np.concatenate((bs, b))
        bes=np.concatenate((bes, be))
        nus = np.concatenate((nus, nu))
        pd = np.concatenate((pd, (b-tb)/tb))
        pde = np.concatenate((pde, be/tb))
        boxes = np.concatenate((boxes, np.ones_like(M)*i))
        snaps = np.concatenate((snaps, np.ones_like(M)*j))
    return nus, bs, bes, Ms, tbs, pd, pde, boxes, snaps
예제 #6
0
def ode_solver(y, z1, z, alpha, sigmadd_initial, hdd_initial, rho_dh, rho_dd):
    i = 0
    limit = 0
    for i in range(20):
        sol2 = odeint(func_dark, y, z1,
                      args=(rho_dh[i],
                            rho_dd[i]))  #ensure it takes different values
        sol_dd = np.concatenate(
            (list(reversed(sol2[:, 0])), sol2[:, 0]),
            axis=0)  #workaround for obtaining symmetric plot
        phi_dd = InterpolatedUnivariateSpline(z, sol_dd)
        rho_dh.append(rhoDH_constrain / exp(-phi_dd(2500) /
                                            (2 * vel_disp_old[15])))
        rho_z_dd = InterpolatedUnivariateSpline(z1, [
            rho0_old[16] * exp(-x / (2 * vel_disp_old[16])) for x in sol2[:, 0]
        ])
        sigmadd = 2 * rho_z_dd.integral(0, 3000)
        rho_dd.append(pow((sigmadd_initial / sigmadd), alpha) * rho_dd[i])
        if (abs(sigmadd - sigmadd_initial) / sigmadd_initial) <= 0.01:
            break
        else:
            i += 1

    def func_hsol(x):
        return rho_z_dd(x) - rho0_old[16] * pow(sech(1 / 2), 2)

    hdd_sech = brentq(func_hsol, limit, hdd_initial)

    return sigmadd, hdd_sech, sol_dd
예제 #7
0
def get_av_radius(R, z, S, R_ax, z_ax):
    # S is the integrated arc length
    s = np.linspace(0.0, S, len(R))
    r_spline = InterpolatedUnivariateSpline(s, np.sqrt((R - R_ax) ** 2 + (z - z_ax) ** 2))
#    plt.plot(s, r_spline(s))
#    plt.show()
    return r_spline.integral(0.0, S) / S
예제 #8
0
def g2_precise(alpha, beta, u):
    # Use 0.2 as lower boundary to avoid divergence
    u_int = np.linspace(0.01, np.max(u), 120)
    gamma_int = np.sqrt(1.0 + u_int**2)
    g2_int = u_int**4 / gamma_int**2 * (u_int / (gamma_int + 1.0))**beta
    g2_spl = InterpolatedUnivariateSpline(u_int, g2_int)
    gamma = np.sqrt(1.0 + u**2)
    #    plt.plot(u_int, g2_int)
    #    plt.show()
    if (np.isscalar(u)):
        g2 = alpha * ((gamma + 1.e0) / u)**beta * g2_spl.integral(0.01, u)
    else:
        g2 = np.zeros(len(u))
        for i in range(len(u)):
            g2[i] = alpha * (
                (gamma[i] + 1.e0) / u[i])**beta * g2_spl.integral(0.01, u[i])
    return g2
예제 #9
0
def estimate_lorentzian_dip(self, x_axis, data, params):
    """ Provides an estimator to obtain initial values for lorentzian function.

    @param numpy.array x_axis: 1D axis values
    @param numpy.array data: 1D data, should have the same dimension as x_axis.
    @param lmfit.Parameters params: object includes parameter dictionary which
                                    can be set

    @return tuple (error, params):

    Explanation of the return parameter:
        int error: error code (0:OK, -1:error)
        Parameters object params: set parameters of initial values
    """
    # check if parameters make sense
    error = self._check_1D_input(x_axis=x_axis, data=data, params=params)

    # check if input x-axis is ordered and increasing
    sorted_indices = np.argsort(x_axis)
    if not np.all(sorted_indices == np.arange(len(x_axis))):
        x_axis = x_axis[sorted_indices]
        data = data[sorted_indices]

    data_smooth, offset = self.find_offset_parameter(x_axis, data)

    # data_level = data-offset
    data_level = data_smooth - offset

    # calculate from the leveled data the amplitude:
    amplitude = data_level.min()

    smoothing_spline = 1  # must be 1<= smoothing_spline <= 5
    fit_function = InterpolatedUnivariateSpline(x_axis,
                                                data_level,
                                                k=smoothing_spline)
    numerical_integral = fit_function.integral(x_axis[0], x_axis[-1])

    x_zero = x_axis[np.argmin(data_smooth)]

    # according to the derived formula, calculate sigma. The crucial part is
    # here that the offset was estimated correctly, then the area under the
    # curve is calculated correctly:
    sigma = np.abs(numerical_integral / (np.pi * amplitude))

    # auxiliary variables
    stepsize = x_axis[1] - x_axis[0]
    n_steps = len(x_axis)

    params['amplitude'].set(value=amplitude, max=-1e-12)
    params['sigma'].set(value=sigma,
                        min=stepsize / 2,
                        max=(x_axis[-1] - x_axis[0]) * 10)
    params['center'].set(value=x_zero,
                         min=(x_axis[0]) - n_steps * stepsize,
                         max=(x_axis[-1]) + n_steps * stepsize)
    params['offset'].set(value=offset)

    return error, params
예제 #10
0
def frac_in_halos(zvals, Mlow, Mhigh, rmax=1.):
    """
    Calculate the fraction of dark matter in collapsed halos
     over a mass range and at a given redshift

    Note that the fraction of DM associated with these halos
    will be scaled down by an additional factor of f_diffuse

    Requires Aemulus HMF to be installed

    Args:
        zvals: ndarray
        Mlow: float
          In h^-1 units already so this will be applied for the halo mass function
        Mhigh: float
          In h^-1 units already
        rmax: float
          Extent of the halo in units of rvir

    Returns:
        ratios: ndarray
          rho_halo / rho_m
    """
    hmfe = init_hmf()

    M = np.logspace(np.log10(Mlow * cosmo.h),
                    np.log10(Mhigh * cosmo.h),
                    num=1000)
    lM = np.log(M)

    ratios = []
    for z in zvals:
        a = 1. / (1.0 + z)  # scale factor

        # Setup
        #dndlM = np.array([hmfe.dndlnM(Mi, a)[0] for Mi in M])
        dndlM = hmfe.dndlnM(M, z)
        M_spl = IUS(lM, M * dndlM)

        # Integrate
        rho_tot = M_spl.integral(np.log(Mlow * cosmo.h), np.log(
            Mhigh * cosmo.h)) * units.M_sun / units.Mpc**3
        # Cosmology
        rho_M = cosmo.critical_density(z) * cosmo.Om(z) / (
            1 + z)**3  # Tinker calculations are all mass
        ratio = (rho_tot * cosmo.h**2 / rho_M).decompose()
        #
        ratios.append(ratio)
    ratios = np.array(ratios)
    # Boost halos if extend beyond rvir (homologous in mass, but constant concentration is an approx)
    if rmax != 1.:
        #from pyigm.cgm.models import ModifiedNFW
        c = 7.7
        nfw = ModifiedNFW(c=c)
        M_ratio = nfw.fy_dm(rmax * nfw.c) / nfw.fy_dm(nfw.c)
        ratios *= M_ratio
    # Return
    return np.array(ratios)
예제 #11
0
def std_func(aid, balance):
    x, y, min_x, max_x = extract_points_from_aid(aid)
    if len(x) == 1 or len(y) == 1:
        return 0
    y2 = []
    for yy in y:
        y2.append((yy - balance) ** 2)

    f = InterpolatedUnivariateSpline(x, y2, k=1)
    return f.integral(min_x, max_x) / (max_x - min_x)
예제 #12
0
 def classic_integration(self):
     self.R = np.zeros(self.bands.shape[0])
     self.Ia = np.zeros_like(self.R)
     self.Im = np.zeros_like(self.R)
     for i, w in enumerate(self.bands):
         if (w[0] < self.wave[0]) or (w[-1] > self.wave[-1]):
             self.R[i] = np.nan
             self.Ia[i] = np.nan
             self.Im[i] = np.nan
             continue
         # Defining indices for each section
         idxb = np.where(((self.wave > w[0]) & (self.wave < w[1])))
         idxr = np.where(((self.wave > w[4]) & (self.wave < w[5])))
         idxcen = np.where(((self.wave > w[2]) & (self.wave < w[3])))
         # Defining wavelength samples
         wb = self.wave[idxb]
         wr = self.wave[idxr]
         wcen = self.wave[idxcen]
         # Defining intensity samples
         fb = self.galaxy[idxb]
         fr = self.galaxy[idxr]
         fcen = self.galaxy[idxcen]
         # Making interpolation functions
         sb = InterpolatedUnivariateSpline(wb, fb)
         sr = InterpolatedUnivariateSpline(wr, fr)
         # Calculating the mean fluxes for the pseudocontinuum
         fp1 = sb.integral(w[0], w[1]) / (w[1] - w[0])
         fp2 = sr.integral(w[4], w[5]) / (w[5] - w[4])
         # Making pseudocontinuum vector
         x0 = (w[2] + w[3]) / 2.
         x1 = (w[0] + w[1]) / 2.
         x2 = (w[4] + w[5]) / 2.
         fc = fp1 + (fp2 - fp1) / (x2 - x1) * (wcen - x1)
         # Calculating indices
         ffc = InterpolatedUnivariateSpline(wcen, fcen / fc / (w[3] - w[2]))
         self.R[i] = ffc.integral(w[2], w[3])
         self.Ia[i] = (1 - self.R[i]) * (w[3] - w[2])
         self.Im[i] = -2.5 * np.log10(self.R[i])
     self.classic = np.copy(self.Ia)
     idx = np.array([2, 3, 14, 15, 23, 24])
     self.classic[idx] = self.Im[idx]
     return
예제 #13
0
    def integrate(self, quantity, x, t):
        """Integrates a quantity either in fixed time from x_low to x_high, or in fixed x from t_low to t_high.

        :param quantity: (str) currently recognised are 'ppot', 'ne', 'ni', 'nnet' (= ni - ne)
        :param x: (float or array of length 2) either static x or x = (x_low, x_high)
        :param t: (float or array of length 2) either static t or t = (t_low, t_high)
        :return: (float) definite integral along specified axis between specified bounderies
        """
        try:
            x = float(x)  # no test if x is number
            try:
                if len(t) != 2:
                    raise ValueError('One of arguments must be number and other iterable of length 2!')
            except TypeError:
                raise ValueError('One of arguments must be number and other iterable of length 2!')
            # integrating over time with static x
            p_low, p_high = t
            try:
                spline = self.memo[('1DSpline', quantity, 'x', x)]  # try to pull spline for integration from memo
            except KeyError:
                points = self.data[quantity].get_knots()[1][1:-1]
                func = self.evaluate(quantity, x, points)  # evaluation of spline in knots
                spline = InterpolatedUnivariateSpline(points, func, k=1)  # build the spline for integration
                self.memo[('1DSpline', quantity, 'x', x)] = spline  # save the spline for integration into memo
        except TypeError:
            try:
                t = float(t)  # to test if t is number
                try:
                    if len(x) != 2:
                        raise ValueError('One of arguments must be number and other iterable of length 2!')
                except TypeError:
                    raise ValueError('One of arguments must be number and other iterable of length 2!')
                # integrating over x with static time
                p_low, p_high = x
                try:
                    spline = self.memo[('1DSpline', quantity, 't', t)]  # try to pull spline for integration from memo
                except KeyError:
                    points = self.data[quantity].get_knots()[0][1:-1]
                    func = self.evaluate(quantity, points, t)  # evaluation of spline in knots
                    spline = InterpolatedUnivariateSpline(points, func, k=1)  # build the spline for integration
                    self.memo[('1DSpline', quantity, 't', t)] = spline  # save the spline for integration into memo
            except TypeError:
                raise ValueError('One of arguments must be number and other iterable of length 2!')

        return spline.integral(p_low, p_high)
def field_rotation(ephem, ts, data_dir, lat=-37.814, lon=144.96332, el=0):

    f_names = sorted([f for f in Path(data_dir).glob("*.tif")])
    loc = earth_loc(lat=lat, lon=lon, el=el)
    lat = loc.latitude.degrees

    field_rot = {}
    field_rot["timestamp"] = []
    field_rot["alt"] = []
    field_rot["az"] = []
    field_rot["ror"] = []

    gps_t = []

    for f in f_names:
        timestamp = winjupos_time(ts, f)
        dt = timestamp.utc_datetime()
        gps = int(Time(dt).gps)
        alt, az = planet_vector(ephem, timestamp, loc)
        ror = rotation_rate(lat, alt, az)

        field_rot["timestamp"].append(dt)
        field_rot["alt"].append(alt)
        field_rot["az"].append(az)
        field_rot["ror"].append(ror)
        gps_t.append(gps)

    ΔT = np.array(gps_t) - gps_t[0]
    field_rot["delta_t"] = ΔT

    # Spline interpolate data to get smooth function for rotation rate
    f = InterpolatedUnivariateSpline(np.array(field_rot["delta_t"]),
                                     np.array(field_rot["ror"]),
                                     k=3)

    # Total rotation of each observation from the beginning of the night
    rot_tot = [
        f.integral(field_rot["delta_t"][0], i) for i in field_rot["delta_t"]
    ]
    field_rot["rot_tot"] = rot_tot

    for i, file in enumerate(f_names):
        rot_image(file=file, ini_angle=-70, angle=rot_tot[i], border=False)

    return field_rot
예제 #15
0
def current_weight(DistWaveFile,
                   shot,
                   time,
                   EQ_exp,
                   EQ_diag,
                   EQ_ed,
                   fig=None,
                   ax=None):
    dist_obj = load_f_from_mat(DistWaveFile, True)
    dist_mat = loadmat(DistWaveFile, squeeze_me=True)
    waves = read_dist_mat_to_beam(dist_mat, True)
    ECCD_weight = np.zeros(dist_obj.f_cycl[0].shape)
    j_spl = InterpolatedUnivariateSpline(waves.rhop, waves.j)
    j_tot = j_spl.integral(waves.rhop[0], waves.rhop[-1])
    EQ_aug_obj = EQData(shot, EQ_exp=EQ_exp, EQ_diag=EQ_diag, EQ_ed=EQ_ed)
    EQ_aug_obj.init_read_from_shotfile()
    Vol = EQ_aug_obj.getQuantity(dist_obj.rhop, "Vol", time)
    Vol_spl = InterpolatedUnivariateSpline(dist_obj.rhop, Vol)
    for irhop, rhop in enumerate(dist_obj.rhop):
        weight = np.abs(waves.j[irhop] / j_tot) * Vol_spl(rhop, nu=1)
        for i, u_par in enumerate(dist_obj.ull):
            for j, u_perp in enumerate(dist_obj.uxx):
                f_diff = dist_obj.f_cycl[irhop][i, j] - Juettner2D(
                    u_par, u_perp, dist_obj.Te[irhop])
                ECCD_weight[i,
                            j] += weight * f_diff * u_par / np.sqrt(1.0 +
                                                                    u_par**2 +
                                                                    u_perp**2)
    ECCD_weight /= np.max(np.abs(ECCD_weight.flatten()))
    if (fig is None):
        fig = plt.figure()
    if (ax is None):
        ax = fig.add_subplot(111)
    ax.contourf(dist_obj.uxx, dist_obj.ull, ECCD_weight, \
                 levels = np.linspace(-1,1,30), cmap = plt.get_cmap("coolwarm"))
    ax.set_ylabel(r"$u_\parallel$")
    ax.set_xlabel(r"$u_\perp$")
    m = cm.ScalarMappable(cmap=plt.cm.get_cmap("coolwarm"))
    m.set_array(np.linspace(-1.0, 1.0, 30))
    cb_j = fig.colorbar(m, pad=0.15, ticks=[-1.0, 0.0, 1.0])
    cb_j.set_label(r"$(f - f_0) \beta_\parallel [\si{{a.u.}}]$")
    ax.set_aspect("equal")
예제 #16
0
 def compute_integral(self, x_dict, f_dict, F_dict):
     f_h = {}
     F_ly = {}
     f_y = {}
     product = {}
     int_dict = {}
     
     for key1 in F_dict.keys():
         f_h[key1] = {}
         F_ly[key1] = {}
         f_y[key1] = {}
         product[key1] = {}
         int_dict[key1] = {}
         
         for key2 in F_dict[key1].keys():
             f_h[key1][key2] = np.histogram(f_dict[key1][key2][0],
                                            density=True, bins=self.bins)
             
             F_ly[key1][key2] = np.log(F_dict[key1][key2][0])
             F_ly[key1][key2][(F_ly[key1][key2]==-np.inf)] = 0
             
             freq, bins = f_h[key1][key2][0], f_h[key1][key2][1]
             freq = np.concatenate((np.array([0]), freq))
             f_y[key1][key2] = np.interp(x_dict[key1][key2], bins, freq)
             
             product[key1][key2] = f_y[key1][key2]*F_ly[key1][key2]
             
             I = InterpolatedUnivariateSpline(x_dict[key1][key2],
                                              product[key1][key2])
             int_dict[key1][key2] = I.integral(0, np.max(x_dict[key1][key2]))
         
     idxs = self.d_values
     cols = self.n_values
     df = pd.DataFrame(np.zeros((len(idxs), len(cols))),
                       index=idxs, columns=cols)
     for idx in df.index:
         df.loc[idx] = int_dict[str(idx)].values()
     
     return(df)
예제 #17
0
def non_limber_integral(ell,
                        chimin,
                        chimax,
                        nchi,
                        fftlog_kernel_interp1,
                        fftlog_kernel_interp2,
                        pk0_interp_loglog,
                        chi_pad_factor=1):
    """full integral is \int_0^\inf k^{-1} dk P(k,0) I_1(k) I_2(k)
    where I_1(k) = \int_0^{\inf} k dr_1 F_1(r_1) r^{-0.5} D(r_1) J_{l+0.5}(kr_1),
    and F_1(r_1) is the radial kernel for tracer 1.
    We want to use a fftlog for the I_1(k) calculation, so write it in the form 
    I_1(k) = \int_0^{\inf} k dr_1 f_1(r_1) J_{mu}(kr_1).
    So f_1(r_1_) = F_1(r_1) D(r_1) r^{-0.5}, this is what should be passed as the fftlog_kernel_interp1 spline.
    We actually do the integral in log(k), so calculate \int_0^\inf dlogk P(k,0) I_1(k) I_2(k).
    """
    q = 0
    log_chimin, log_chimax = np.log(chimin), np.log(chimax)
    log_chimin_padded, log_chimax_padded = log_chimin - chi_pad_factor, log_chimax + chi_pad_factor
    log_chi_vals = np.linspace(log_chimin_padded, log_chimax_padded,
                               nchi + 2 * chi_pad_factor)
    chi_vals = np.exp(log_chi_vals)
    kernel1_vals = fftlog_kernel_interp1(chi_vals)
    k_vals, I_1 = fft_log(chi_vals, kernel1_vals, q, ell + 0.5)
    if fftlog_kernel_interp2 is fftlog_kernel_interp1:
        I_2 = I_1
    else:
        kernel2_vals = fftlog_kernel_interp2(chi_vals)
        _, I_2 = fft_log(chi_vals, kernel2_vals, q, ell + 0.5)
    pk_vals = np.exp(pk0_interp_loglog(np.log(k_vals)))
    #Now we can compute the full integral \int_0^{\inf} k dk P(k,0) I_1(k) I_2(k)
    #We are values logspaced in k, so calculate as \int_0^{inf} k^2 dlog(k) P(k,0) I_1(k) I_2(k)
    #integrand_vals = k_vals * k_vals * pk_vals * I_1 * I_2
    integrand_vals = pk_vals * I_1 * I_2
    logk_vals = np.log(k_vals)
    integrand_interp = IUS(logk_vals, integrand_vals)
    integral = integrand_interp.integral(logk_vals.min(), logk_vals.max())
    return integral
예제 #18
0
    def compute(self,
                inputs,
                outputs,
                discrete_inputs=None,
                discrete_outputs=None):
        x, y_u, y_l, _, t = self.compute_coords(inputs)

        # Compute the t/c and cross-sectional area of the airfoil
        outputs["t_c"] = np.max(t)
        outputs["A_cs"] = np.trapz(t, x)

        # Compute the area, as fraction of the bin width,
        # enclosed by the upper and lower surfaces for each bin.
        f_t = InterpolatedUnivariateSpline(x, t)
        n_area_bins = self.options["n_area_bins"]
        dx_bins = 1 / n_area_bins
        outputs["A_bins"] = [
            f_t.integral(i * dx_bins, (i + 1) * dx_bins) / dx_bins
            for i in range(n_area_bins)
        ]

        # Compute the leading edge radius of the airfoil
        xs = np.concatenate((np.flip(x), x[1:]))
        ys = np.concatenate((np.flip(y_u), y_l[1:]))

        dx = np.gradient(xs)
        dy = np.gradient(ys)

        d2x = np.gradient(dx)
        d2y = np.gradient(dy)

        curvature = np.abs(d2x * dy - dx * d2y) / (dx * dx + dy * dy)**1.5
        if (np.isnan(curvature[x.size]) or np.isinf(curvature[x.size])
                or curvature[x.size] == 0.0):
            outputs["r_le"] = 0.0
        else:
            outputs["r_le"] = 1.0 / curvature[x.size]
예제 #19
0
    def sheath_integral(self, electrode='p'):
        """Calculates sheath integral at the specified electrode.

        :param electrode: (str) 'p' for powered electrode and 'g' for grounded electrode
        :return: (float) sheath integral
        """
        t_sm, sm = self.sheath_edge_max(electrode=electrode)
        x_points = self.data['ni'].get_knots()[0][1:-1]  # x knot points for the spline holding the ni data
        ni = self.evaluate('ni', x_points, t=t_sm)  # ion density in the time of maximum sheath expansion
        # calculation of average ion density in sheath at the time of maximum expansion:
        if electrode == 'p':
            ns_max = self.integrate('ni', (0., sm), t_sm)/sm
        else:
            ns_max = self.integrate('ni', (sm, self.x[-1]), t_sm)/(self.x[-1] - sm)
        # transformation to grounded electrode system:
        if electrode == 'g':
            ni = np.flip(ni, axis=0)
            sm = self.x[-1] - sm
        # transformation to the sheath integral system
        ps_xi = ni/ns_max
        xi = x_points/sm
        f = InterpolatedUnivariateSpline(xi, ps_xi*xi, k=1)
        i_s = 2*f.integral(0., 1.)
        return i_s
예제 #20
0
def get_field_rotation_power_from_PK(params,
                                     PK,
                                     chi_source,
                                     lmax=20000,
                                     acc=1,
                                     lsamp=None):
    results = camb.get_background(params)
    nz = int(100 * acc)
    if lmax < 3000:
        raise ValueError('field rotation assumed lmax > 3000')
    ls = np.hstack((np.arange(2, 400, 1), np.arange(401, 2600, int(10. / acc)),
                    np.arange(2650, lmax, int(50. / acc)),
                    np.arange(lmax, lmax + 1))).astype(np.float64)

    # get grid of C_L(chi_s,k) for different redshifts
    chimaxs = np.linspace(0, chi_source, nz)
    cls = np.zeros((nz, ls.size))
    for i, chimax in enumerate(chimaxs[1:]):
        cl = cl_kappa_limber(results, PK, ls, nz, chimax)
        cls[i + 1, :] = cl
    cls[0, :] = 0
    cl_chi = RectBivariateSpline(chimaxs, ls, cls)

    # Get M(L,L') matrix
    chis = np.linspace(0, chi_source, nz, dtype=np.float64)
    zs = results.redshift_at_comoving_radial_distance(chis)
    dchis = (chis[2:] - chis[:-2]) / 2
    chis = chis[1:-1]
    zs = zs[1:-1]
    win = (1 / chis - 1 / chi_source)**2 / chis**2
    w = np.ones(chis.shape)
    cchi = cl_chi(chis, ls, grid=True)
    M = np.zeros((ls.size, ls.size))
    for i, ell in enumerate(ls):
        k = (ell + 0.5) / chis
        w[:] = 1
        w[k < 1e-4] = 0
        w[k >= PK.kmax] = 0
        cl = np.dot(dchis * w * PK.P(zs, k, grid=False) * win / k**4, cchi)
        M[i, :] = cl * ell**4  # note we don't attempt to be accurate beyond lowest Limber
    Mf = RectBivariateSpline(ls, ls, np.log(M))

    # L sampling for output
    if lsamp is None:
        lsamp = np.hstack((np.arange(2, 20, 2), np.arange(25, 200, 10 // acc),
                           np.arange(220, 1200, 30 // acc),
                           np.arange(1300, min(lmax // 2, 2600), 150 // acc),
                           np.arange(3000, lmax // 2 + 1, 1000 // acc)))

    # Get field rotation (curl) spectrum.
    diagm = np.diag(M)
    diagmsp = InterpolatedUnivariateSpline(ls, diagm)

    def high_curl_integrand(_ll, _lp):
        _lp = _lp.astype(np.int)
        r2 = (np.float64(_ll) / _lp)**2
        return _lp * r2 * diagmsp(_lp) / np.pi

    clcurl = np.zeros(lsamp.shape)
    lsall = np.arange(2, lmax + 1, dtype=np.float64)

    for i, ll in enumerate(lsamp):

        ell = np.float64(ll)
        lmin = lsall[0]
        lpmax = min(lmax, int(max(1000, ell * 2)))
        if ll < 500:
            lcalc = lsall[0:lpmax - 2]
        else:
            # sampling in L', with denser around L~L'
            lcalc = np.hstack(
                (lsall[0:20:4], lsall[29:ll - 200:35],
                 lsall[ll - 190:ll + 210:6], lsall[ll + 220:lpmax + 60:60]))

        tmps = np.zeros(lcalc.shape)
        for ix, lp in enumerate(lcalc):
            llp = int(lp)
            lp = np.float64(lp)
            if abs(ll - llp) > 200 and lp > 200:
                nphi = 2 * int(min(lp / 10 * acc, 200)) + 1
            elif ll > 2000:
                nphi = 2 * int(lp / 10 * acc) + 1
            else:
                nphi = 2 * int(lp) + 1
            dphi = 2 * np.pi / nphi
            phi = np.linspace(dphi, (nphi - 1) / 2 * dphi,
                              (nphi - 1) // 2)  # even and don't need zero
            w = 2 * np.ones(phi.size)
            cosphi = np.cos(phi)
            lrat = lp / ell
            lfact = np.sqrt(1 + lrat**2 - 2 * cosphi * lrat)
            lnorm = ell * lfact
            lfact[lfact <= 0] = 1
            w[lnorm < lmin] = 0
            w[lnorm > lmax] = 0

            lnorm = np.maximum(lmin, np.minimum(lmax, lnorm))
            tmps[ix] += lp * np.dot(w, (np.sin(phi) / lfact**2 *
                                        (cosphi - lrat))**2 *
                                    np.exp(Mf(lnorm, lp, grid=False))) * dphi

        sp = InterpolatedUnivariateSpline(lcalc, tmps)
        clcurl[i] = sp.integral(2, lpmax - 1) * 4 / (2 * np.pi)**2

        if lpmax < lmax:
            tail = np.sum(high_curl_integrand(ll, lsall[lpmax - 2:]))
            clcurl[i] += tail

    return lsamp, clcurl
예제 #21
0
    def classic_integration(self):
        """ Calculation of Lick indices using spline integration.

        ===========
        Attributes:
        ===========
            R (array):
                Raw integration values for the Lick indices.

            Ia (array):
                Indices measured in equivalent widths.

            Im (array):
                Indices measured in magnitudes.

            classic (array):
                Indices measured according to the conventional
                units mixturing equivalent widths and magnitudes.
        """
        self.R = np.zeros(self.bands.shape[0])
        self.Ia = np.zeros_like(self.R)
        self.Im = np.zeros_like(self.R)
        for i, w in enumerate(self.bands):
            condition = np.array([
                w[0] - self.dw > self.wave[0], w[-1] + self.dw < self.wave[-1]
            ])
            if not np.all(condition):
                self.R[i] = np.nan
                self.Ia[i] = np.nan
                self.Im[i] = np.nan
                continue
            # Defining indices for each section
            idxb = np.where(
                ((self.wave > w[0] - self.dw) & (self.wave < w[1] + self.dw)))
            idxr = np.where(
                ((self.wave > w[4] - self.dw) & (self.wave < w[5] + self.dw)))
            idxcen = np.where(
                ((self.wave > w[2] - self.dw) & (self.wave < w[3] + self.dw)))
            # Defining wavelenght samples
            wb = self.wave[idxb]
            wr = self.wave[idxr]
            wcen = self.wave[idxcen]
            # Defining intensity samples
            fb = self.galaxy[idxb]
            fr = self.galaxy[idxr]
            fcen = self.galaxy[idxcen]
            # Interpolation functions for pseudocontinuum
            sb = InterpolatedUnivariateSpline(wb, fb)
            sr = InterpolatedUnivariateSpline(wr, fr)
            # Calculating the mean fluxes for the pseudocontinuum
            fp1 = sb.integral(w[0], w[1]) / (w[1] - w[0])
            fp2 = sr.integral(w[4], w[5]) / (w[5] - w[4])
            # Making pseudocontinuum vector
            x1 = (w[0] + w[1]) / 2.
            x2 = (w[4] + w[5]) / 2.
            fc = fp1 + (fp2 - fp1) / (x2 - x1) * (wcen - x1)
            # Calculating indices
            ffc = InterpolatedUnivariateSpline(wcen, fcen / fc / (w[3] - w[2]))
            self.R[i] = ffc.integral(w[2], w[3])
            self.Ia[i] = (1 - self.R[i]) * (w[3] - w[2])
            self.Im[i] = -2.5 * np.log10(self.R[i])
        self.Ia = self.Ia * u.AA
        self.Im = self.Im * u.mag
        idx = np.where([_ == u.Unit("mag") for _ in self.units])[0]
        self.classic = np.copy(self.Ia)
        self.classic[idx] = self.Im[idx]
        return
예제 #22
0
class ThermochemRawData(ThermochemBase):
    """
    Implement a thermochemical property correlation from raw data.

    Evaluated quantities are interpolated using a B-spline as discussed in
    :ref:`correlations documentation <correlations>`.
    """
    def __init__(self,
                 ND_H_ref,
                 ND_S_ref,
                 Ts,
                 ND_Cps,
                 T_ref=298.15,
                 range=None):
        """
        Initialize a thermochemical property correlation from raw data.

        Parameters
        ----------
        ND_H_ref : float
            Non-dimensional standard heat of formation |eq_ND_H_ref|
        ND_S_ref : float
            Non-dimensional standard state reference entropy |eq_ND_S_ref|
        Ts : float array
            Temperatures at which `ND_Cps` are evaluated.
        ND_Cps : float
            Non-dimensional standard state heat capacities |eq_ND_Cp_T|
            evaluated at each temperature in `Ts`.
        T_ref : float, optional
            Reference temperature |eq_Tref| for `ND_H_ref` and `ND_S_ref`
            (default: 298.15K).
        range : tuple(float, float), optional
            ``(lb, ub) = range`` where lb and ub are respectively the lower and
             uppers bounds of temperatures [K] for which the correlation is
             valid.    If specified, this range must contain T_ref and all data
             points in ND_Cp.
        """
        (self.Ts, self.ND_Cps) = zip(
            *sorted(zip(Ts, ND_Cps), key=lambda (T, ND_Cps): T))
        self.min_T = Ts[0]
        self.max_T = Ts[-1]

        if range is None:
            range = (self.min_T, self.max_T)
        else:
            if self.min_T < range[0] or self.max_T > range[1]:
                raise ValueError(
                    'Heat capacity data points %g or %g lie outside of range'
                    ' [%g,%g].' % (self.min_T, self.max_T, range[0], range[1]))
        if T_ref < range[0] or T_ref > range[1]:
            raise ValueError(
                'T_ref=%g is outside the valid correlation range [%g,%g].' %
                (T_ref, range[0], range[1]))

        ThermochemBase.__init__(self, range)

        self.min_ND_Cp = self.ND_Cps[0]
        self.max_ND_Cp = self.ND_Cps[-1]

        self.ND_H_ref = ND_H_ref
        self.ND_S_ref = ND_S_ref
        self.T_ref = T_ref

        N = len(self.Ts)
        if N == 1:
            self.spline = ConstantSpline(self.ND_Cps[0])
        else:
            self.spline = InterpolatedUnivariateSpline(self.Ts,
                                                       self.ND_Cps,
                                                       k=(3 if N > 3 else N -
                                                          1))

    def eval_ND_Cp(self, T):
        """Return non-dimensional standard state heat capacity |eq_ND_Cp_T|."""
        self.check_range(T)
        if not np.isscalar(T):
            return self._eval_ND_Cp_ar(T)

        if T < self.min_T:
            return self.min_ND_Cp
        if T > self.max_T:
            return self.max_ND_Cp

        # Work-around for SciPy bug (?):
        #return self.spline(T)
        return float(self.spline(T))

    def _eval_ND_Cp_ar(self, T):
        ND_Cp = np.empty(T.shape)
        T_below = T < self.min_T
        T_above = T > self.max_T
        T_middle = np.logical_not(np.logical_or(T_below, T_above))

        ND_Cp[T_below] = self.min_ND_Cp
        ND_Cp[T_above] = self.max_ND_Cp
        ND_Cp[T_middle] = self.spline(T[T_middle])

        return ND_Cp

    def eval_ND_S(self, T):
        """Return non-dimensional standard state entropy |eq_ND_S_T|."""
        self.check_range(T)
        T_a = self.T_ref
        T_b = T
        min_T = self.min_T
        max_T = self.max_T

        ND_S = self.ND_S_ref

        if T_a <= min_T:
            if T_b <= min_T:
                return ND_S + self.min_ND_Cp * np.log(T_b / T_a)
            ND_S += self.min_ND_Cp * np.log(min_T / T_a)
            T_a = min_T
        elif T_b <= min_T:
            ND_S += self.min_ND_Cp * np.log(T_b / min_T)
            T_b = min_T

        if T_a >= max_T:
            if T_b >= max_T:
                return ND_S + self.max_ND_Cp * np.log(T_b / T_a)
            ND_S += self.max_ND_Cp * np.log(max_T / T_a)
            T_a = max_T
        elif T_b >= max_T:
            ND_S += self.max_ND_Cp * np.log(T_b / max_T)
            T_b = max_T

        # The easiest, albeit not necessarily the best thing to do here is to
        # use numerical integration, so that's what we do.
        return ND_S + integrate(lambda t: self.spline(t) / t, T_a, T_b)[0]

    def eval_ND_H(self, T):
        """Return non-dimensional standard heat of formation |eq_ND_H_T|."""
        self.check_range(T)
        T_a = self.T_ref
        T_b = T
        min_T = self.min_T
        max_T = self.max_T

        # This value represents the accumulated H/R (has temperature units).
        rH = self.ND_H_ref * T_a

        if T_a <= min_T:
            if T_b <= min_T:
                return (rH + self.min_ND_Cp * (T_b - T_a)) / T
            rH += self.min_ND_Cp * (min_T - T_a)
            T_a = min_T
        elif T_b <= min_T:
            rH += self.min_ND_Cp * (T_b - min_T)
            T_b = min_T

        if T_a >= max_T:
            if T_b >= max_T:
                return rH + self.max_ND_Cp * (T_b - T_a) / T
            rH += self.max_ND_Cp * (max_T - T_a)
            T_a = max_T
        elif T_b >= max_T:
            rH += self.max_ND_Cp * (T_b - max_T)
            T_b = max_T

        return (rH + self.spline.integral(T_a, T_b)) / T

    @classmethod
    def yaml_construct(cls, params, context):
        if 'T_ref' in params:
            T_ref = params['T_ref']
        else:
            T_ref = eval_qty(298.15, 'K')
        if 'ND_H_ref' in params:
            ND_H_ref = params['ND_H_ref']
        else:
            ND_H_ref = params['H_ref'] / (R * T_ref)
        if 'ND_S_ref' in params:
            ND_S_ref = params['ND_S_ref']
        else:
            ND_S_ref = params['S_ref'] / R

        if 'ND_Cp_data' in params:
            T_data, ND_Cp_data = zip(*params['ND_Cp_data'])
            Ts = np.array([T.in_units('K') for T in T_data])
            ND_Cps = np.array(ND_Cp_data)
        else:
            T_data, Cp_data = zip(*params['Cp_data'])
            Ts = np.array([T.in_units('K') for T in T_data])
            ND_Cps = np.array([Cp for Cp in Cp_data]) / R

        range = params.get('range')
        if range is not None:
            range = range[0].in_units('K'), range[1].in_units('K')
        else:
            range = Ts.min(), Ts.max()

        return cls(ND_H_ref, ND_S_ref, Ts, ND_Cps, T_ref.in_units('K'), range)

    _yaml_schema = """
예제 #23
0
    def do_gaussian_fit(self, axis, data):
        """ Perform a gaussian fit.

        @param axis:
        @param data:
        @return:
        """

        model, params = self._fit_logic.make_gaussian_model()
        if len(axis) < len(params):
            self.log.warning('Fit could not be performed because number of '
                             'parameters is smaller than data points.')
            return self.do_no_fit()

        else:

            parameters_to_substitute = dict()
            update_dict = dict()

            #TODO: move this to "gated counter" estimator in fitlogic
            #      make the filter an extra function shared and usable for other
            #      functions
            gauss = gaussian(10, 10)
            data_smooth = filters.convolve1d(data,
                                             gauss / gauss.sum(),
                                             mode='mirror')

            # integral of data corresponds to sqrt(2) * Amplitude * Sigma
            function = InterpolatedUnivariateSpline(axis, data_smooth, k=1)
            Integral = function.integral(axis[0], axis[-1])
            amp = data_smooth.max()
            sigma = Integral / amp / np.sqrt(2 * np.pi)
            amplitude = amp * sigma * np.sqrt(2 * np.pi)

            update_dict['offset'] = {
                'min': 0,
                'max': data.max(),
                'value': 0,
                'vary': False
            }
            update_dict['center'] = {
                'min': axis.min(),
                'max': axis.max(),
                'value': axis[np.argmax(data)]
            }
            update_dict['sigma'] = {
                'min': -np.inf,
                'max': np.inf,
                'value': sigma
            }
            update_dict['amplitude'] = {
                'min': 0,
                'max': np.inf,
                'value': amplitude
            }

            result = self._fit_logic.make_gaussian_fit(
                x_axis=axis,
                data=data,
                estimator=self._fit_logic.estimate_gaussian_peak,
                units=None,  # TODO
                add_params=update_dict)
            # 1000 points in x axis for smooth fit data
            hist_fit_x = np.linspace(axis[0], axis[-1], 1000)
            hist_fit_y = model.eval(x=hist_fit_x, params=result.params)

            param_dict = OrderedDict()

            # create the proper param_dict with the values:
            param_dict['sigma_0'] = {
                'value': result.params['sigma'].value,
                'error': result.params['sigma'].stderr,
                'unit': 'Occurrences'
            }

            param_dict['FWHM'] = {
                'value': result.params['fwhm'].value,
                'error': result.params['fwhm'].stderr,
                'unit': 'Counts/s'
            }

            param_dict['Center'] = {
                'value': result.params['center'].value,
                'error': result.params['center'].stderr,
                'unit': 'Counts/s'
            }

            param_dict['Amplitude'] = {
                'value': result.params['amplitude'].value,
                'error': result.params['amplitude'].stderr,
                'unit': 'Occurrences'
            }

            param_dict['chi_sqr'] = {'value': result.chisqr, 'unit': ''}

            return hist_fit_x, hist_fit_y, param_dict, result
예제 #24
0
def Evolution((xi,yi,ci,u,Yi,gi,giS,gipr,giprS,gipr2,gipr2S,AYi,AYiS,n,Lami,OmDEi,OmegaDE,OmegaRad,OmegaM,wphi,hdot,delta,deltap,deltap2,e1,e2,A,WA,Delta,GrZ,w0,wa,wp)):
    """Evolution acts on a Universe class object and models it from a = 10**-9 to a = 1"""
    Fins = np.array([xi[0],xi[1],yi[0],yi[1],ci[0],ci[1],u,Lami[0],Lami[1],n+1])
    cons = (6.0**(1.0/2.0))/2.0
    a = 10**(-9)
    dlna = 10**(-2)
    z = (1.0/a)-1.0 
    while a < 1:
            f = 3.0*(((xi[0]**2)*gi[0])+ n*((xi[1]**2)*gi[1]))+(u**2)
            dxi = ((xi/2.0)*(3.0+f-2.0*cons*Lami*xi)+cons*AYi*(Lami*OmDEi-2.0*cons*xi*(gi+Yi*gipr)))*dlna
            dyi = ((yi/2.0)*(3.0+f-2.0*cons*Lami*xi))*dlna
            du = ((u/2.0)*(-1.0+f))*dlna
            xi += dxi
            yi += dyi
            u += du
            Yi = (xi**2)/(yi**2)
            gi = eval(giS)
            gipr = eval(giprS)
            gipr2 = eval(gipr2S)
            AYi = eval(AYiS)
            OmDEi = (xi**2)*(gi+2.0*Yi*gipr)
            OmegaDE = OmDEi[0] + n*OmDEi[1]
            OmegaRad = (u**2)
            OmegaM = 1-OmegaDE-OmegaRad
            wphi = ((((xi[0]**2)*gi[0])+n*((xi[1]**2)*gi[1]))/(((xi[0]**2)*(gi[0]+2.0*Yi[0]*gipr[0]))+ n*((xi[1]**2)*(gi[1]+2.0*Yi[1]*gipr[1]))))
            hdot = -(3.0/2.0) -(3.0/2.0)*((xi[0]**2)*gi[0]+n*((xi[1]**2)*gi[1]))-.5*(u**2)
            delta += deltap*dlna
            deltap += deltap2*dlna
            deltap2 = ((3.0/2.0)*OmegaM*delta)-((hdot+2.0)*deltap)
            
            if z < 25.0:
                Delta = np.append(Delta, delta)
                GrZ = np.append(GrZ, z)
            if a > 0.1:
                WA = np.append(WA, wphi)
                A = np.append(A,a)
            a = a*(1.0+dlna)
            z = (1.0/a)-1.0
        #Exporting Linear Growth Data
        
    GrZ = np.trim_zeros(GrZ, 'f')
    Delta = np.trim_zeros(Delta, 'f')
    GrZ = np.flipud(GrZ)
    Delta = (np.flipud(Delta))/delta       
    PickZ = np.zeros(20)
    PickD = np.zeros(20)
            
    for p in xrange(20):
            i = 16*p
            PickZ[p] = GrZ[i]
            PickD[p] = Delta[i]
                
    GrZ = PickZ
    Delta = (PickD)/(1.0/(1.0+PickZ)) 
    
    # Finding w_0, w_a, and w_p
    
    A = np.trim_zeros(A, 'f')
    WA = np.trim_zeros(WA, 'f')  
    e1c = e1.__call__(A)
    e2c = e2.__call__(A)  
    alpha1s = InterpolatedUnivariateSpline(A, (1.0+WA)*e1c, k=3)
    alpha2s = InterpolatedUnivariateSpline(A, (1.0+WA)*e2c, k=3)
    alpha1 = con1*alpha1s.integral(0.1,1.0)
    alpha2 = con2*alpha2s.integral(0.1,1.0)
    w0 = ((alpha1*(gamma2-beta2)+alpha2*(beta1-gamma1))/(beta1*gamma2-beta2*gamma1))-1.0
    wa = (alpha1*beta2-alpha2*beta1)/(beta1*gamma2-beta2*gamma1)
    wp = (alpha1/beta1)-1.0
    if str(wp) != "nan": 
        END = np.array([w0,wa,wp,wphi,OmegaM,OmegaDE])
        END = np.append(END,Fins)
        GROWTH = np.array([GrZ,Delta])
        return END, GROWTH
    else:
        return np.array([0,0]),np.array([0,0])
예제 #25
0
def mean_focus(expstart, expend, camera='UVIS1', spline_order=3,
               not_found_value=None, with_var=False):
    """
    Gets the mean focus over a given observation period. Exposure start and end
    times can be specified as Modified Julian Date float (like the FITS header
    EXPSTART and EXPEND keywords) or a UTC time string in YYYY-MM-DD HH:MM:SS
    format.
    :param expstart: Start time of exposure.
    :param expend: End time of exposure.
    :param camera: One of UVIS1, UVIS2, WFC1, WFC2, HRC, PC. Default is UVIS1.
    :param spline_order: Degree of the spline used to interpolate the model
     data points (passed as k= to scipy.interpolate.UnivariateSpline). Use 1 for
     linear interpolation. Default is 3.
    :param not_found_value: Value to return if the Focus Model does not have
     data for the given time interval. Default value (None) means raise
     HTTPResponseError
     :param with_var: Also include variance in a returned 2-tuple
    :return: Continuous (integral) mean focus between expstart and expend
    """
    # Convert date/time strings to MJD
    try:
        startnums = [int(num) for num in re.split(':|-|/| ', expstart)]
        endnums = [int(num) for num in re.split(':|-|/| ', expend)]
        expstart = _date_time_to_mjd(*startnums)
        expend = _date_time_to_mjd(*endnums)
    except TypeError:
        pass
    # Pad input exposure start and end time, to make sure we get at least one
    # data point before and after. Then split up into year, date, times
    ten_mins = 10 / (24 * 60)
    expstart_pad = float(expstart) - ten_mins
    expend_pad = float(expend) + ten_mins
    start_yr, start_date, start_time = _mjd_to_year_date_time(expstart_pad)
    stop_yr, stop_date, stop_time = _mjd_to_year_date_time(expend_pad)
    # Chop off seconds
    start_time = start_time.rsplit(':', 1)[0]
    stop_time = stop_time.rsplit(':', 1)[0]

    if start_date != stop_date:
        intervals = [(start_yr, start_date, start_time, '23:59'),
                     (stop_yr, stop_date, '00:00', stop_time)]
    else:
        intervals = [(start_yr, start_date, start_time, stop_time)]

    try:
        txt_focus = ''
        # Get text table of focus data for each interval
        for year, date, start, stop in intervals:
            txt_interval = get_model_data(year, date, start, stop, camera,
                                          format='TXT')
            col_names, txt_interval = txt_interval.split('\n', 1)
            txt_focus += txt_interval
        # convert to numpy array
        focus_data = genfromtxt(StringIO(txt_focus), skiprows=0, dtype=None,
                                names=_model_output_columns,
                                delimiter=_output_field_widths)
        # Create interpolating spline
        spline = InterpolatedUnivariateSpline(
            focus_data['JulianDate'], focus_data['Model'], k=spline_order)
        # Return the continuous (integral) mean
        mean_foc = spline.integral(expstart, expend) / (expend - expstart)
        # Calculate signal variance (see e.g. Wikipedia article for RMS)
        if with_var:
            xvals = linspace(expstart, expend, focus_data.size*2)
            var_foc = trapz(spline(xvals)**2, xvals) / (expend - expstart)
            var_foc -= mean_foc**2

    except HTTPResponseError, err:
        if err.response.status == httplib.NOT_FOUND \
                or not_found_value is not None:
            mean_foc = not_found_value
            var_foc = not_found_value
        else:
            raise err
예제 #26
0
def main():
    scale_n = 1.10  # Tully-Fisher total surface number density (unit: arcmin^-2), from Eric et al.(2013), Table 2 (TF-Stage)
    c = 2.99792458e5  # speed of light unit in km/s
    N_dset = (nrbin + 1) * nrbin // 2  # numbe of C^ij(l) data sets
    zbin = np.zeros(nrbin +
                    1)  # for both zbin and chibin, the first element is 0.
    chibin = np.zeros(nrbin + 1)
    eps = 0.1

    inputf = '../Input_files/nz_stage_IV.txt'  # Input file of n(z) which is the galaxy number density distribution in terms of z
    # Here center_z denotes z axis of n(z). It may not be appropriate since we don't have redshift bin setting
    center_z, n_z = np.loadtxt(inputf, dtype='f8', comments='#', unpack=True)
    spl_nz = InterpolatedUnivariateSpline(center_z, n_z)
    n_sum = spl_nz.integral(center_z[0],
                            center_z[-1])  # Calculate the total number density
    #print(n_sum)
    scale_dndz = 1.0 / n_sum
    n_z = n_z * scale_dndz  # Normalize n(z)
    spl_nz = InterpolatedUnivariateSpline(
        center_z, n_z)  # Interpolate n(z) in terms of z using spline

    # bin interval
    z_min = center_z[0]
    z_max = 2.0  # based on the data file, at z=2.0, n(z) is very small
    zbin_avg = (z_max - z_min) / float(nrbin)
    for i in range(nrbin):
        zbin[i] = i * zbin_avg + z_min
        if zbin[i] <= z_target:
            target_i = i
    zbin[-1] = z_max

    # Note that here chibin[0] is not equal to 0, since there is redshift cut at low z.
    for i in range(0, nrbin + 1):
        chibin[i] = comove_d(zbin[i]) * c / 100.0

    print('Xmax', c / 100.0 * comove_d(zbin[-1]))
    #Xmax = comove_d(zbin[-1])
    print('Xmax')

    print('target_i:', target_i)
    #z_array = np.linspace(0.0, zbin[target_i+1], 1000, endpoint=False)
    z_array = np.linspace(0.0, z_max, 1000, endpoint=True)
    chi_array = np.array([comove_d(z_i) for z_i in z_array]) * c / 100.0
    print("chi_array:", chi_array)
    g_i_array = np.array([], dtype=np.float64)
    for chi_k in chi_array:
        g_i = lens_eff(target_i, chi_k, chibin, eps)
        g_i_array = np.append(g_i_array, g_i)

    print('min_gi:', np.min(g_i_array), 'at chi=',
          chi_array[np.argmin(g_i_array)])
    odir0 = '/Users/ding/Documents/playground/shear_ps/project_final/fig_lens_eff/gi_data/'
    odir = odir0 + '{}/'.format(survey_stage)
    if not os.path.exists(odir):
        os.mkdir(odir)
    ofile = odir + 'gi_nrbin{}_zk_{}_rbinid_{}.npz'.format(
        nrbin, z_target, target_i)
    np.savez(ofile, z=z_array, gi=g_i_array)

    odir = './figs/lens_eff_gi/'
    if not os.path.exists(odir):
        os.makedirs(odir)

    ofile = odir + 'gi_{}_nrbin{}_zi_{}.pdf'.format(survey_stage, nrbin,
                                                    z_target)
    plot_lens_eff(z_array, g_i_array, nrbin, z_target, ofile)

    ofile = odir + 'gi_{}_nrbin{}_zi_{}_version2.pdf'.format(
        survey_stage, nrbin, z_target)
    plot_lens_eff_version2(z_array, chi_array, g_i_array, nrbin, z_target,
                           ofile)
예제 #27
0
class SpectrumDist(rv_continuous):
    """
    The `SpectrumDist` object is a `scipy.stats` like object to describe the
    neutron intensity as a function of wavelength. You can use the `pdf, cdf,
    ppf, rvs` methods like you would a `scipy.stats` distribution. Of
    particular interest is the `rvs` method which randomly samples neutrons
    whose distribution obeys the direct beam spectrum. Random variates are
    generated the `rv_continuous` superclass by classical generation of
    uniform noise coupled with the `ppf`. `ppf` is approximated by linear
    interpolation of `q` into a pre-calculated inverse `cdf`.
    """

    def __init__(self, x, y):
        super(SpectrumDist, self).__init__(a=np.min(x), b=np.max(x))
        self._x = x

        # normalise the distribution
        area = simps(y, x)
        y /= area
        self._y = y

        # an InterpolatedUnivariate spline models the spectrum
        self.spl = IUS(x, y)

        # fudge_factor required because integral of the spline is not exactly 1
        self.fudge_factor = self.spl.integral(self.a, self.b)

        # calculate a gridded and sampled version of the CDF.
        # this can be used with interpolation for quick calculation
        # of ppf (necessary for quick rvs)
        self._x_interpolated_cdf = np.linspace(np.min(x), np.max(x), 1000)
        self._interpolated_cdf = self.cdf(self._x_interpolated_cdf)

    def _pdf(self, x):
        return self.spl(x) / self.fudge_factor

    def _cdf(self, x):
        xflat = x.ravel()

        f = lambda x: self.spl.integral(self.a, x) / self.fudge_factor
        v = map(f, xflat)

        r = np.fromiter(v, dtype=float).reshape(x.shape)
        return r

    def _f(self, x, qq):
        return self._cdf(x) - qq

    def _g(self, qq, *args):
        return brentq(self._f, self._a, self._b, args=(qq,) + args)

    def _ppf(self, q, *args):
        qflat = q.ravel()
        """
        _a, _b = self._get_support(*args)

        def f(x, qq):
            return self._cdf(x) - qq

        def g(qq):
            return brentq(f, _a, _b, args=(qq,) + args, xtol=1e-3)

        v = map(g, qflat)

        cdf = _CDF(self.spl, self.fudge_factor, _a, _b)
        g = _G(cdf)

        with Pool() as p:
            v = p.map(g, qflat)
            r = np.fromiter(v, dtype=float).reshape(q.shape)
        """
        # approximate the ppf using a sampled+interpolated CDF
        # the commented out methods are more accurate, but are at least
        # 3 orders of magnitude slower.
        r = np.interp(qflat, self._interpolated_cdf, self._x_interpolated_cdf)
        return r.reshape(q.shape)
예제 #28
0
class ThermochemRawData(ThermochemBase):
    """
    Implement a thermochemical property correlation from raw data.

    Evaluated quantities are interpolated using a B-spline as discussed in
    :ref:`correlations documentation <correlations>`.
    """
    def __init__(self,
                 ND_H_ref,
                 ND_S_ref,
                 Ts,
                 ND_Cps,
                 T_ref=c.T0(units='K'),
                 range=None):
        """
        Initialize a thermochemical property correlation from raw data.

        Parameters
        ----------
        ND_H_ref : float
            Non-dimensional standard heat of formation |eq_ND_H_ref|
        ND_S_ref : float
            Non-dimensional standard state reference entropy |eq_ND_S_ref|
        Ts : float array
            Temperatures at which `ND_Cps` are evaluated.
        ND_Cps : float
            Non-dimensional standard state heat capacities |eq_ND_Cp_T|
            evaluated at each temperature in `Ts`.
        T_ref : float, optional
            Reference temperature |eq_Tref| for `ND_H_ref` and `ND_S_ref`
            (default: room temperature according to pmutt, likely 298.15K).
        range : tuple(float, float), optional
            ``(lb, ub) = range`` where lb and ub are respectively the lower and
             uppers bounds of temperatures [K] for which the correlation is
             valid.    If specified, this range must contain T_ref and all data
             points in ND_Cp.
        """
        #Ts.sort()
        (self.Ts, self.ND_Cps) = list(
            zip(*sorted(zip(Ts, [ND_Cps]), key=lambda T_ND_Cps: T_ND_Cps[0])))
        self.min_T = min(Ts)
        self.max_T = max(Ts)

        if range is None:
            range = (self.min_T, self.max_T)
        else:
            if self.min_T < range[0] or self.max_T > range[1]:
                raise ValueError(
                    'Heat capacity data points %g or %g lie outside of range'
                    ' [%g,%g].' % (self.min_T, self.max_T, range[0], range[1]))
        if T_ref < range[0] or T_ref > range[1]:
            raise ValueError(
                'T_ref=%g is outside the valid correlation range [%g,%g].' %
                (T_ref, range[0], range[1]))

        #runs assertion checks on range
        ThermochemBase.__init__(self, range)

        self.min_ND_Cp = min(self.ND_Cps)
        self.max_ND_Cp = max(self.ND_Cps)

        self.ND_H_ref = ND_H_ref
        self.ND_S_ref = ND_S_ref
        self.T_ref = T_ref

        N = len(self.Ts)
        if N == 1:
            self.spline = ConstantSpline(self.ND_Cps[0])
        else:
            self.spline = InterpolatedUnivariateSpline(self.Ts,
                                                       self.ND_Cps,
                                                       k=min(3, N - 1))

    def get_CpoR(self, T):
        """Return non-dimensional standard state heat capacity |eq_ND_Cp_T|."""
        self.check_range(T)
        if not np.isscalar(T):
            return self._get_CpoR_ar(T)

        return min(max(T, self.min_ND_Cp), self.max_ND_Cp)

        # is this even necessary anymore? code never accesses that return statement
        # |
        # v
        # Work-around for SciPy bug (?):
        # return self.spline(T)
        #return float(self.spline(T))

    def _get_CpoR_ar(self, T):
        # returns an array of dimension T.shape with all values set to,
        # for example, self.min_ND_Cp
        if (T < self.min_T):
            return np.full(T.shape, self.min_ND_Cp)
        elif (T > self.max_T):
            return np.full(T.shape, self.max_ND_Cp)
        else:
            return np.full(T.shape, self.spline(T[True]))

    def get_SoR(self, T):
        """Return non-dimensional standard state entropy |eq_ND_S_T|."""
        self.check_range(T)
        T_a = self.T_ref
        T_b = T
        min_T = self.min_T
        max_T = self.max_T

        ND_S = self.ND_S_ref

        if T_a <= min_T:
            if T_b <= min_T:
                return ND_S + self.min_ND_Cp * np.log(T_b / T_a)
            ND_S += self.min_ND_Cp * np.log(min_T / T_a)
            T_a = min_T
        elif T_b <= min_T:
            ND_S += self.min_ND_Cp * np.log(T_b / min_T)
            T_b = min_T

        if T_a >= max_T:
            if T_b >= max_T:
                return ND_S + self.max_ND_Cp * np.log(T_b / T_a)
            ND_S += self.max_ND_Cp * np.log(max_T / T_a)
            T_a = max_T
        elif T_b >= max_T:
            ND_S += self.max_ND_Cp * np.log(T_b / max_T)
            T_b = max_T

        # The easiest, albeit not necessarily the best thing to do here is to
        # use numerical integration, so that's what we do.
        return ND_S + integrate(lambda t: self.spline(t) / t, T_a, T_b)[0]

    def get_HoRT(self, T):
        """Return non-dimensional standard heat of formation |eq_ND_H_T|."""
        self.check_range(T)
        T_a = self.T_ref
        T_b = T
        min_T = self.min_T
        max_T = self.max_T

        # This value represents the accumulated H/R (has temperature units).
        rH = self.ND_H_ref * T_a

        if T_a <= min_T:
            if T_b <= min_T:
                return (rH + self.min_ND_Cp * (T_b - T_a)) / T
            rH += self.min_ND_Cp * (min_T - T_a)
            T_a = min_T
        elif T_b <= min_T:
            rH += self.min_ND_Cp * (T_b - min_T)
            T_b = min_T

        if T_a >= max_T:
            if T_b >= max_T:
                return rH + self.max_ND_Cp * (T_b - T_a) / T
            rH += self.max_ND_Cp * (max_T - T_a)
            T_a = max_T
        elif T_b >= max_T:
            rH += self.max_ND_Cp * (T_b - max_T)
            T_b = max_T

        return (rH + self.spline.integral(T_a, T_b)) / T

    @classmethod
    def yaml_construct(cls, params, context):
        if 'T_ref' in params:
            T_ref = params['T_ref']
        else:
            #pull room temp from pmutt's constants, append 'K' because eval_qty needs units
            #T_ref is now of type Quantity, a tuple
            T_ref = eval_qty(str(c.T0(units=K)) + ' K')
        if 'ND_H_ref' in params:
            ND_H_ref = params['ND_H_ref']
        else:
            ND_H_ref = params['H_ref'] / (R * T_ref)
        if 'ND_S_ref' in params:
            ND_S_ref = params['ND_S_ref']
        else:
            ND_S_ref = params['S_ref'] / R

        if 'ND_Cp_data' in params:
            T_data, ND_Cp_data = list(zip(*params['ND_Cp_data']))
            Ts = np.array([T.in_units('K') for T in T_data])
            ND_Cps = np.array(ND_Cp_data)
        else:
            T_data, Cp_data = list(zip(*params['Cp_data']))
            Ts = np.array([T.in_units('K') for T in T_data])
            ND_Cps = np.array([Cp for Cp in Cp_data]) / R

        range = params.get('range')
        if range is not None:
            range = range[0].in_units('K'), range[1].in_units('K')
        else:
            range = Ts.min(), Ts.max()

        return cls(ND_H_ref, ND_S_ref, Ts, ND_Cps, T_ref.in_units('K'), range)

    _yaml_schema = """
예제 #29
0
def build_grid(z_FRB=1., ntrial=10, seed=12345, Mlow=1e10, r_max=2., outfile=None, dz_box=0.1,
    dz_grid=0.01, f_hot=0.75, verbose=True):
    """
    Generate a universe of dark matter halos with DM measurements
    Mainly an internal function for generating useful output grids.

    Requires the Aemulus Halo Mass function

    Args:
        z_FRB: float, optional
        ntrial: int, optional
        seed: int, optional
        Mlow: float, optional
          h^-1 mass
        r_max: float, optional
          Extent of the halo in units of rvir
        outfile: str, optional
          Write
        dz_box: float, optional
          Size of the slice of the universe for each sub-calculation
        dz_grid: float, optional
          redshift spacing in the DM grid
        f_hot: float
          Fraction of the cosmic fraction of matter in diffuse gas (for DM)

    Returns:
        DM_grid: ndarray (ntrial, nz)
        halo_tbl: Table
          Table of all the halos intersected

    """
    Mhigh = 1e16  # Msun
    # mNFW
    y0 = 2.
    alpha = 2.

    warnings.warn("Ought to do concentration properly someday!")
    cgm = ModifiedNFW(alpha=alpha, y0=y0, f_hot=f_hot)
    icm = ICM()

    # Random numbers
    rstate = np.random.RandomState(seed)

    # Init HMF
    hmfe = init_hmf()

    # Boxes
    nbox = int(z_FRB / dz_box)
    nz = int(z_FRB / dz_grid)
    dX = int(np.sqrt(ntrial))+1
    #
    npad = 6 # Mpc
    base_l = 2*dX + npad
    print('L_base = {} cMpc'.format(base_l))
    warnings.warn("Worry about being big enough given cMpc vs pMpc")

    DM_grid = np.zeros((ntrial,nz))

    # Spline distance to z
    D_max = cosmo.comoving_distance(z_FRB)
    D_val = np.linspace(1e-3,D_max.value,200) # IS THIS FINE ENOUGH?
    z_val = np.array([z_at_value(cosmo.comoving_distance, iz) for iz in D_val*units.Mpc])
    D_to_z = IUS(D_val, z_val)

    # Save halo info
    #halos = [[] for i in range(ntrial)]
    halo_i, M_i, R_i, DM_i, z_i = [], [], [], [], []

    # Loop me
    prev_zbox = 0.
    #for ss in range(nbox):
    #for ss in [0]:
    for ss in [5]:
        zbox = ss*dz_box + dz_box/2.
        print('zbox = {}'.format(zbox))
        a = 1./(1.0 + zbox) # Scale factor
        # Mass function
        M = np.logspace(np.log10(Mlow*cosmo.h), np.log10(Mhigh*cosmo.h), num=1000)
        lM = np.log(M)
        dndlM = np.array([hmf.dndlM(Mi, a) for Mi in M])
        n_spl = IUS(lM, dndlM)
        cum_n = np.array([n_spl.integral(np.log(Mlow*cosmo.h), ilM) for ilM in lM])
        ncum_n = cum_n/cum_n[-1]
        # As z increases, we have numerical issues at the high mass end (they are too rare)
        try:
            mhalo_spl = IUS(ncum_n, lM)
        except ValueError:
            # Kludge me
            print("REDUCING Mhigh by 2x")
            Mhigh /= 2.
            M = np.logspace(np.log10(Mlow*cosmo.h), np.log10(Mhigh*cosmo.h), num=1000)
            lM = np.log(M)
            dndlM = np.array([hmf.dndlM(Mi, a) for Mi in M])
            n_spl = IUS(lM, dndlM)
            cum_n = np.array([n_spl.integral(np.log(Mlow*cosmo.h), ilM) for ilM in lM])
            ncum_n = cum_n/cum_n[-1]
            #
            mhalo_spl = IUS(ncum_n, lM)

        # Volume -- Box with base l = 2Mpc
        D_zn = cosmo.comoving_distance(zbox + dz_box/2.) # Full box
        D_zp = cosmo.comoving_distance(ss*dz_box) # Previous
        D_z = D_zn - D_zp
        V = D_z * (base_l*units.Mpc)**2

        # Average N_halo
        avg_n = hmf.n_bin(Mlow*cosmo.h, Mhigh*cosmo.h, a) * cosmo.h**3 * units.Mpc**-3
        avg_N = (V * avg_n).value

        # Assume Gaussian stats for number of halos
        N_halo = int(np.round(avg_N + np.sqrt(avg_N)*rstate.randn(1)))

        # Random masses
        randM = rstate.random_sample(N_halo)
        rM = np.exp(mhalo_spl(randM)) / cosmo.h

        # r200
        r200 = (((3*rM*units.M_sun.cgs) / (4*np.pi*200*cosmo.critical_density(zbox)))**(1/3)).to('kpc')

        # Random locations (X,Y,Z)
        X_c = rstate.random_sample(N_halo)*base_l # Mpc
        Y_c = rstate.random_sample(N_halo)*base_l # Mpc
        Z_c = (rstate.random_sample(N_halo)*D_z.to('Mpc') + D_zp).value

        # Check mass fraction
        if verbose:
            Mtot = np.log10(np.sum(rM))
            M_m = (cosmo.critical_density(zbox)*cosmo.Om(zbox) * V/(1+zbox)**3).to('M_sun')
            #print("N_halo: {}  avg_N: {}".format(N_halo, avg_N))
            print("z: {}  Mhalo/M_m = {}".format(zbox, 10**Mtot/M_m.value))
            print(frac_in_halos([zbox], Mlow, Mhigh))

        # Redshifts
        z_ran = D_to_z(Z_c)

        # Loop on trials
        all_DMs = []
        all_nhalo = []
        all_r200 = []
        for itrial in range(ntrial):
            # X,Y trial
            X_trial = npad//2 + (2*itrial%dX)  # Step by 2Mpc
            Y_trial = npad//2 + 2*itrial // dX
            # Impact parameters
            try:
                R_com = np.sqrt((X_c-X_trial)**2 + (Y_c-Y_trial)**2)  # Mpc
            except:
                pdb.set_trace()
            R_phys = R_com * 1000. / (1+z_ran) * units.kpc
            # Cut
            intersect = R_phys < r_max*r200
            print("We hit {} halos".format(np.sum(intersect)))
            all_nhalo.append(np.sum(intersect))
            if not np.any(intersect):
                all_DMs.append(0.)
                continue
            # Loop -- FIND A WAY TO SPEED THIS UP!
            DMs = []
            for iobj in np.where(intersect)[0]:
                # Init
                if rM[iobj] > 1e14: # Use ICM model
                    model = icm
                else:
                    model = cgm
                model.log_Mhalo=np.log10(rM[iobj])
                model.M_halo = 10.**model.log_Mhalo * constants.M_sun.cgs
                model.z = zbox # To be consistent with above;  should be close enough
                model.setup_param(cosmo=cosmo)
                # DM
                DM = model.Ne_Rperp(R_phys[iobj], rmax=r_max, add_units=False)/(1+model.z)
                DMs.append(DM)
                # Save halo info
                halo_i.append(itrial)
                M_i.append(model.M_halo.value)
                R_i.append(R_phys[iobj].value)
                DM_i.append(DM)
                z_i.append(z_ran[iobj])
                all_r200.append(cgm.r200.value)
            # Save em
            iz = (z_ran[intersect]/dz_grid).astype(int)
            DM_grid[itrial,iz] += DMs
            all_DMs.append(np.sum(DMs))
            #print(DMs, np.log10(rM[intersect]), R_phys[intersect])
            if (itrial % 100) == 0:
                pdb.set_trace()

    # Table the halos
    halo_tbl = Table()
    halo_tbl['trial'] = halo_i
    halo_tbl['M'] = M_i
    halo_tbl['R'] = R_i
    halo_tbl['DM'] = DM_i
    halo_tbl['z'] = z_i

    # Write
    if outfile is not None:
        print("Writing to {}".format(outfile))
        np.save(outfile, DM_grid, allow_pickle=False)
        halo_tbl.write(outfile+'.fits', overwrite=True)

    return DM_grid, halo_tbl
예제 #30
0
파일: plot_all.py 프로젝트: jhbhom/MSLab
def ana_color(clear_name, ws_name, color):

    c1 = TCanvas("c1", "c1", 800, 600)

    clear = clear_name + "/"
    ws = ws_name + "/"

    results_clear = []
    results_ws = []
    results_peak_ws = []
    results_peak_clear = []

    print 'Starting clear fiber', clear
    for filename in os.listdir(clear):
        if filename.endswith(".CSV"):
            #print filename

            t = []
            U = []
            UMAX = -10000.
            with open(clear + filename, 'rb') as f:
                reader = csv.reader(f)
                for row in reader:
                    try:
                        tt = float(row[0]) * 1.e9
                        uu = float(row[1]) * 1.e3

                        t.append(tt)
                        U.append(uu)
                    except ValueError:
                        aaa = 1
            #Uaverage=sum(U[7000:len(U)])/(len(U)-7000)
            #print int(0.3*len(U))
            Uaverage = sum(U[0:int(0.3 * len(U))]) / (0.3 * len(U))
            #print 'WS pedestrial: ', Uaverage
            #Uaverage=U[0]
            #Uaverage=0.
            offset(U, Uaverage)
            UMAX = max(U)

            #print len(t)
            plt.plot(t, U)
            plt.xlabel('t [ns]')
            plt.ylabel('U(t) [mV]')
            #plt.show()
            f = InterpolatedUnivariateSpline(t, U, k=1)
            integral = f.integral(min(t), max(t))
            #print integral
            results_clear.append(integral)
            results_peak_clear.append(UMAX)

    print 'Finished clear fiber, starting ws', ws
    for filename in os.listdir(ws):
        if filename.endswith(".CSV"):
            #print filename
            #f = open(clear+filename)
            t = []
            U = []
            with open(ws + filename, 'rb') as f:
                reader = csv.reader(f)
                for row in reader:
                    try:
                        tt = float(row[0]) * 1.e9
                        uu = float(row[1]) * 1.e3

                        t.append(tt)
                        U.append(uu)
                    except ValueError:
                        aaa = 1

            #Uaverage=sum(U[7000:len(U)])/(len(U)-7000)
            Uaverage = sum(U[0:int(0.3 * len(U))]) / (0.3 * len(U))
            #print 'WS pedestrial: ', Uaverage
            #Uaverage=U[0]
            #Uaverage=0.
            offset(U, Uaverage)
            UMAX = max(U)
            plt.plot(t, U)
            plt.xlabel('t [ns]')
            plt.ylabel('U(t) [mV]')
            #plt.show()
            #time.sleep(100.)
            f = InterpolatedUnivariateSpline(t, U, k=1)
            integral = f.integral(min(t), max(t))
            results_ws.append(integral)
            results_peak_ws.append(UMAX)
    print 'Finishe ws'
    Min = min(results_ws + results_clear)
    Max = max(results_ws + results_clear)
    MinU = min(results_peak_ws + results_peak_clear)
    MaxU = max(results_peak_clear + results_peak_ws)

    print MinU, MaxU

    hist_clear = TH1D("clear", "clear", 200, Min, Max)
    hist_ws = TH1D("ws", "ws", 200, Min, Max)

    histU_clear = TH1D("clear", "clear", 200, MinU, MaxU)
    histU_ws = TH1D("ws", "ws", 200, MinU, MaxU)

    for c in results_clear:
        hist_clear.Fill(c)
    for w in results_ws:
        hist_ws.Fill(w)
    for c in results_peak_clear:
        histU_clear.Fill(c)
    for w in results_peak_ws:
        histU_ws.Fill(w)

    mean_ws = hist_ws.GetMean()
    mean_clear = hist_clear.GetMean()

    RMS_ws = hist_ws.GetRMS()
    RMS_clear = hist_clear.GetRMS()

    meanU_ws = histU_ws.GetMean()
    meanU_clear = histU_clear.GetMean()

    RMSU_ws = histU_ws.GetRMS()
    RMSU_clear = histU_clear.GetRMS()

    Min = max(Min, min(mean_ws - 4. * RMS_ws, mean_clear - 4. * RMS_clear))
    Max = min(Max, max(mean_ws + 4. * RMS_ws, mean_clear + 4. * RMS_clear))

    MinU = max(MinU, min(meanU_ws - 4. * RMSU_ws,
                         meanU_clear - 4. * RMSU_clear))
    MaxU = min(MaxU, max(meanU_ws + 4. * RMSU_ws,
                         meanU_clear + 4. * RMSU_clear))
    print meanU_ws, meanU_clear
    print MinU, MaxU

    hist_clear = TH1D("clear", "clear", 200, Min, Max)
    hist_ws = TH1D("ws", "ws", 200, Min, Max)

    histU_clear = TH1D("clear", "clear", 200, MinU, MaxU)
    histU_ws = TH1D("ws", "ws", 200, MinU, MaxU)

    for c in results_clear:
        hist_clear.Fill(c)
    for w in results_ws:
        hist_ws.Fill(w)
    for c in results_peak_clear:
        histU_clear.Fill(c)
    for w in results_peak_ws:
        histU_ws.Fill(w)

    hist_clear.SetLineColor(kBlue + 3)
    hist_clear.SetLineWidth(3)

    hist_ws.SetLineColor(kYellow + 3)
    hist_ws.SetLineWidth(3)

    hist_clear.SetTitle("")
    hist_clear.GetXaxis().SetTitle("[mV ns]")

    hist_ws.SetTitle("")
    hist_ws.GetXaxis().SetTitle("[mV ns]")

    hist_clear.SetStats(False)
    hist_ws.SetStats(False)

    histU_clear.SetLineColor(kBlue + 3)
    histU_clear.SetLineWidth(3)

    histU_ws.SetLineColor(kYellow + 3)
    histU_ws.SetLineWidth(3)

    histU_clear.SetTitle("")
    histU_clear.GetXaxis().SetTitle("[mV]")

    histU_ws.SetTitle("")
    histU_ws.GetXaxis().SetTitle("[mV]")

    histU_clear.SetStats(False)
    histU_ws.SetStats(False)

    hist_ws.DrawNormalized()
    hist_clear.DrawNormalized("SAME")

    leg = TLegend(0.35, 0.5, 0.6, 0.8)
    leg.AddEntry(hist_clear, "Clear Fiber, " + color, "f")
    leg.AddEntry(hist_ws, "WaveShifter, " + color, "f")

    leg.Draw()

    c1.SaveAs("results_" + color + ".pdf")

    histU_ws.DrawNormalized()
    histU_clear.DrawNormalized("SAME")

    leg = TLegend(0.35, 0.5, 0.6, 0.8)
    leg.AddEntry(histU_clear, "Clear Fiber, " + color, "f")
    leg.AddEntry(histU_ws, "WaveShifter, " + color, "f")

    leg.Draw()

    c1.SaveAs("resultsU_" + color + ".pdf")

    plt.savefig("pulse_" + color + ".png")
    plt.savefig("pulse_" + color + ".pdf")
    plt.savefig("pulse_" + color + "self.eps")

    plt.clf()
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline

x = np.linspace(0., 1., 10)
y = np.exp(3. * x) * np.sin(3. * x)

s1 = InterpolatedUnivariateSpline(x, y, k=1)
s3 = InterpolatedUnivariateSpline(x, y, k=3)

print(s1(0.234))
print(s1.integral(0.2, 0.8))

예제 #32
0
    print('This file requires the scipy package to run properly. Please see the readme for instructions on how to install this package.')
import os
import sys

# Creating information from principle components to make w_0, w_a approximation

lok = os.path.dirname(sys.argv[0]) 
zdat = np.genfromtxt(lok+'/Growth/PC_1234.dat','float', usecols = 0, skip_header =1)
adat = np.flipud(1.0/(1.0+zdat))
a1 = np.flipud(np.genfromtxt(lok+'/Growth/PC_1234.dat','float', usecols = 1, skip_header =1))
a2 = np.flipud(np.genfromtxt(lok+'/Growth/PC_1234.dat','float', usecols = 2, skip_header =1))
e1 = InterpolatedUnivariateSpline(adat, a1, k=3)
e2 = InterpolatedUnivariateSpline(adat, a2, k=3)
e12 = InterpolatedUnivariateSpline(adat, a1**2.0, k=3)
e22 = InterpolatedUnivariateSpline(adat, a2**2.0, k=3)
norm1 = e12.integral(0.1, 1.0)
norm2 = e22.integral(0.1, 1.0)
con1 = 1.0/norm1
con2 = 1.0/norm2
beta1 = con1*e1.integral(0.1, 1.0)
beta2 = con2*e2.integral(0.1, 1.0)
gamma1s = InterpolatedUnivariateSpline(adat, adat*a1, k=3)
gamma2s = InterpolatedUnivariateSpline(adat, adat*a2, k=3)
gamma1 = con1*gamma1s.integral(0.1, 1.0)
gamma2 = con2*gamma2s.integral(0.1, 1.0)


def ParFile(loc, name, g, g2, g3, Aterm, x1min, x1max,x2min, x2max, y1min, y1max,y2min, y2max,c1min, c1max,c2min, c2max,umin, umax, fields):
    ParamFile = open(loc+'/Parameters/'+name+'.dat', 'w')
    ParamNeeds = [name, str(g), (str(g2)), (str(g3)), (str(Aterm)), x1min, x1max,x2min, x2max, y1min, y1max,y2min, y2max,c1min, c1max,c2min, c2max,
                    umin, umax, fields]
예제 #33
0
    def do_gaussian_fit(self, axis, data):
        """ Perform a gaussian fit.

        @param axis:
        @param data:
        @return:
        """

        model, params = self._fit_logic.make_gaussian_model()
        if len(axis) < len(params):
            self.log.warning('Fit could not be performed because number of '
                    'parameters is smaller than data points.')
            return self.do_no_fit()

        else:

            parameters_to_substitute = dict()
            update_dict=dict()

            #TODO: move this to "gated counter" estimator in fitlogic
            #      make the filter an extra function shared and usable for other
            #      functions
            gauss = gaussian(10, 10)
            data_smooth = filters.convolve1d(data, gauss/gauss.sum(), mode='mirror')

            # integral of data corresponds to sqrt(2) * Amplitude * Sigma
            function = InterpolatedUnivariateSpline(axis, data_smooth, k=1)
            Integral = function.integral(axis[0], axis[-1])
            amp = data_smooth.max()
            sigma = Integral / amp / np.sqrt(2 * np.pi)
            amplitude = amp * sigma * np.sqrt(2 * np.pi)

            update_dict['offset']    = {'min': 0,          'max': data.max(), 'value': 0, 'vary': False}
            update_dict['center']    = {'min': axis.min(), 'max': axis.max(), 'value': axis[np.argmax(data)]}
            update_dict['sigma']     = {'min': -np.inf,    'max': np.inf,     'value': sigma}
            update_dict['amplitude'] = {'min': 0,          'max': np.inf,     'value': amplitude}

            result = self._fit_logic.make_gaussian_fit(x_axis=axis,
                                                       data=data,
                                                       estimator=self._fit_logic.estimate_gaussian_peak,
                                                       units=None,  # TODO
                                                       add_params=update_dict)
            # 1000 points in x axis for smooth fit data
            hist_fit_x = np.linspace(axis[0], axis[-1], 1000)
            hist_fit_y = model.eval(x=hist_fit_x, params=result.params)

            param_dict = OrderedDict()

            # create the proper param_dict with the values:
            param_dict['sigma_0'] = {'value': result.params['sigma'].value,
                                     'error': result.params['sigma'].stderr,
                                     'unit' : 'Occurrences'}

            param_dict['FWHM'] = {'value': result.params['fwhm'].value,
                                  'error': result.params['fwhm'].stderr,
                                  'unit' : 'Counts/s'}

            param_dict['Center'] = {'value': result.params['center'].value,
                                    'error': result.params['center'].stderr,
                                    'unit' : 'Counts/s'}

            param_dict['Amplitude'] = {'value': result.params['amplitude'].value,
                                       'error': result.params['amplitude'].stderr,
                                       'unit' : 'Occurrences'}

            param_dict['chi_sqr'] = {'value': result.chisqr, 'unit': ''}


            return hist_fit_x, hist_fit_y, param_dict, result
예제 #34
0
파일: postborn.py 프로젝트: cmbant/CAMB
def get_field_rotation_power_from_PK(params, PK, chi_source, lmax=20000, acc=1, lsamp=None):
    results = camb.get_background(params)
    nz = int(100 * acc)
    if lmax < 3000:
        raise ValueError('field rotation assumed lmax > 3000')
    ls = np.hstack((np.arange(2, 400, 1), np.arange(401, 2600, int(10. / acc)),
                    np.arange(2650, lmax, int(50. / acc)), np.arange(lmax, lmax + 1))).astype(np.float64)

    # get grid of C_L(chi_s,k) for different redshifts
    chimaxs = np.linspace(0, chi_source, nz)
    cls = np.zeros((nz, ls.size))
    for i, chimax in enumerate(chimaxs[1:]):
        cl = cl_kappa_limber(results, PK, ls, nz, chimax)
        cls[i + 1, :] = cl
    cls[0, :] = 0
    cl_chi = RectBivariateSpline(chimaxs, ls, cls)

    # Get M(l,l') matrix
    chis = np.linspace(0, chi_source, nz, dtype=np.float64)
    zs = results.redshift_at_comoving_radial_distance(chis)
    dchis = (chis[2:] - chis[:-2]) / 2
    chis = chis[1:-1]
    zs = zs[1:-1]
    win = (1 / chis - 1 / chi_source) ** 2 / chis ** 2
    w = np.ones(chis.shape)
    cchi = cl_chi(chis, ls, grid=True)
    M = np.zeros((ls.size, ls.size))
    for i, l in enumerate(ls):
        k = (l + 0.5) / chis
        w[:] = 1
        w[k < 1e-4] = 0
        w[k >= PK.kmax] = 0
        cl = np.dot(dchis * w * PK.P(zs, k, grid=False) * win / k ** 4, cchi)
        M[i, :] = cl * l ** 4  # note we don't attempt to be accurate beyond lowest Limber
    Mf = RectBivariateSpline(ls, ls, np.log(M))

    # L sampling for output
    if lsamp is None:
        lsamp = np.hstack((np.arange(2, 20, 2), np.arange(25, 200, 10 // acc), np.arange(220, 1200, 30 // acc),
                           np.arange(1300, min(lmax // 2, 2600), 150 // acc),
                           np.arange(3000, lmax // 2 + 1, 1000 // acc)))

    # Get field rotation (curl) spectrum.
    diagm = np.diag(M)
    diagmsp = InterpolatedUnivariateSpline(ls, diagm)

    def high_curl_integrand(ll, lp):
        lp = lp.astype(np.int)
        r2 = (np.float64(ll) / lp) ** 2
        return lp * r2 * diagmsp(lp) / np.pi

    clcurl = np.zeros(lsamp.shape)
    lsall = np.arange(2, lmax + 1, dtype=np.float64)

    for i, ll in enumerate(lsamp):

        l = np.float64(ll)
        lmin = lsall[0]
        lpmax = min(lmax, int(max(1000, l * 2)))
        if ll < 500:
            lcalc = lsall[0:lpmax - 2]
        else:
            # sampling in l', with denser around l~l'
            lcalc = np.hstack((lsall[0:20:4],
                               lsall[29:ll - 200:35],
                               lsall[ll - 190:ll + 210:6],
                               lsall[ll + 220:lpmax + 60:60]))

        tmps = np.zeros(lcalc.shape)
        for ix, lp in enumerate(lcalc):
            llp = int(lp)
            lp = np.float64(lp)
            if abs(ll - llp) > 200 and lp > 200:
                nphi = 2 * int(min(lp / 10 * acc, 200)) + 1
            elif ll > 2000:
                nphi = 2 * int(lp / 10 * acc) + 1
            else:
                nphi = 2 * int(lp) + 1
            dphi = 2 * np.pi / nphi
            phi = np.linspace(dphi, (nphi - 1) / 2 * dphi, (nphi - 1) // 2)  # even and don't need zero
            w = 2 * np.ones(phi.size)
            cosphi = np.cos(phi)
            lrat = lp / l
            lfact = np.sqrt(1 + lrat ** 2 - 2 * cosphi * lrat)
            lnorm = l * lfact
            lfact[lfact <= 0] = 1
            w[lnorm < lmin] = 0
            w[lnorm > lmax] = 0

            lnorm = np.maximum(lmin, np.minimum(lmax, lnorm))
            tmps[ix] += lp * np.dot(w, (np.sin(phi) / lfact ** 2 * (cosphi - lrat)) ** 2 *
                                    np.exp(Mf(lnorm, lp, grid=False))) * dphi

        sp = InterpolatedUnivariateSpline(lcalc, tmps)
        clcurl[i] = sp.integral(2, lpmax - 1) * 4 / (2 * np.pi) ** 2

        if lpmax < lmax:
            tail = np.sum(high_curl_integrand(ll, lsall[lpmax - 2:]))
            clcurl[i] += tail

    return lsamp, clcurl
예제 #35
0
def DataForSpline(LivetimeFile=None,
                    EAFile=None,
                    DispersonFile=None,
                    Mchi=None,
                    WindowEMIN=None,
                    WindowEMAX=None,
                    WhicArray='Front',
                    ):
                    

    #log of the DM mass
    y=np.log10(Mchi)

    
    #returns 1) an Array of the  The Weighted Exposure ie( livetime times EffectiveArea) for An average Healpix given the log of the DMMass ie y
    #as a function of instrument angle
    #2) the instrument angle
    WeightedEffArea,CosAveLTcube,TotalExposure=WeightedExposure(LivetimeFile=LivetimeFile,EAFile=EAFile,y=y)


    
    #dispersion
    #plotdifferentEsposuresFordifferntenergies(SplineEA=SplineEA,CosAveLTcube=CosAveLTcube,AveHealpix=AveHealpix)
    Norm,LS1,LS2,RS1,RS2,BIAS=SplinesforDispersionEq(DispersonFile)

    Dic={
        'Mchi':Mchi,
        'Norm':Norm,
        'LS1':LS1,
        'LS2':LS2,
        'RS1':RS1,
        'RS2':RS2,
        'Bias':BIAS,
        'WhicArray':WhicArray,
        }

    #returns an array of the Energy dispersion Given a range of observed energy (DeltaE) and a given instrument incination angle cth
    #passes a dictions of the elemments from the Dispersion function (Dic) and also the log of the DM mass (y)        
    def fsum(x=None,cth=None):
        return ED_array(DeltaE=x,CTheta=cth,Dic=Dic,y=y)
    
    x=np.linspace(WindowEMIN/3.,WindowEMAX*3,1600)
    fsumTheta=np.zeros(len(x))

    #average over the instrument angle weighted by the Effective Exposure as a function of instrument angle
    for i,cth in enumerate(CosAveLTcube):
        fsumTheta=fsumTheta+fsum(x=x,cth=cth)*WeightedEffArea[i]

    #normalize the Averaged dispersion function.
    fsumTheta=fsumTheta/np.trapz(y=fsumTheta,x=x)
    print 'integration over the dispersion function', np.trapz(y=fsumTheta,x=x)  
        
        
    #generate Pline of the averaged dispersion fucnito.
    Spline_fsumTheta=InterpolatedUnivariateSpline(x,fsumTheta)
    print 'integral of Spline', Spline_fsumTheta.integral(WindowEMIN/2.,WindowEMAX*2)

   
   
    
    return fsum,fsumTheta,Spline_fsumTheta,x,TotalExposure
예제 #36
0
 def n_bin(self, Mlow, Mhigh, a):
     M = np.logspace(np.log10(Mlow), np.log10(Mhigh), num=1000)
     lM = np.log(M)
     dndlM = np.array([self.dndlM(Mi, a) for Mi in M])
     spl = IUS(lM, dndlM)
     return spl.integral(np.log(Mlow), np.log(Mhigh))
class tinker_mass_function(object):
    """A python implementation of the tinker mass function.

    Note: This requires Matt Becker's cosmocalc.
    """

    def __init__(self, cosmo_dict, redshift=0.0, l10M_bounds=[11,16]):
        """Create a TMF_model object.

        Note: the model is created with the default tinker2008_appendix mass function parameters.
        Note: mass units are all assumed to be Msun/h unless otherwise stated.

        Args:
            cosmo_dict (dictionary): Dictionary of cosmological parameters. Only keys necessary to function are "om" and "h" for Omega_m and H0/100.
            redshift (float): Redshift of the mass function; default 0.0.
            l10M_bounds (array_like): Log10 of the upper and lower mass bounds for the splines; defaults to [11,16].
        """
        self.l10M_bounds = np.array(l10M_bounds) #log_10 Mass bounds in Msun/h
        self.redshift = redshift
        self.scale_factor = 1./(1. + self.redshift)
        self.set_new_cosmology(cosmo_dict)
        self.build_splines()
        self.set_parameters(1.97, 1.0, 0.51, 1.228, 0.482)

    def set_parameters(self, d, e, f, g, B=None):
        """Specify the tinker parameters and calculate
        quantities that only depend on them.

        Args:
            d (float): Tinker parameter.
            e (float): Tinker parameter.
            f (float): Tinker parameter.
            g (float): Tinker parameter.
            B (float; optional): Normalization coefficient. If B isn't specified then 
               it's calculated from d,e,f,g such that the mass function is gauranteed 
               to be normalized.
        """
        self.params = np.array([d, e, f, g, B])
        gamma_d2 = special.gamma(d*0.5)
        gamma_f2 = special.gamma(f*0.5)
        log_g = np.log(g)
        gnd2 = g**(-d*0.5)
        gnf2 = g**(-f*0.5)
        ed = e**d
        if not B:
            self.B_coefficient = 2.0/(ed * gnd2 * gamma_d2 + gnf2 * gamma_f2)
            B2 = self.B_coefficient**2
            self.dBdd = 0.25 * B2 * ed * gnd2 * gamma_d2 * (log_g - 2.0 - special.digamma(d*0.5))
            self.dBde = -0.5 * B2 * d * ed/e * gnd2 * gamma_d2
            self.dBdf = 0.25 * B2 * gnf2 * gamma_f2 * (log_g - special.digamma(f*0.5))
            self.dBdg = 0.25 * B2 * (d * ed * gnd2/g * gamma_d2 + f* gnf2/g * gamma_f2)
        else:
            self.B_coefficient = B
            self.dBdd = self.dBde = self.dBdf = self.dBdg = 0
        self.make_dndlM_spline()
        return

    def set_new_cosmology(self, cosmo_dict):
        """Specify a new set of cosmological parameters and then build splines that depend on these.
        
        Args:
            cosmo_dict (dictionary): Keys are cosmological parameters, specifically om for Omega_matter and h for Hubble constant/100.
        """
        print "setting cc cos:",cosmo_dict
        cc.set_cosmology(cosmo_dict)
        print "sigma inside = ",cc.sigmaMtophat(1e14, 0.25)
        Om = cosmo_dict["om"]
        self.rhom=Om*rhocrit#Msunh^2/Mpc^3
        self.cosmo_dict = cosmo_dict
        return

    def build_splines(self):
        """Build the splines needed for integrals over mass bins.
        """
        lM_min,lM_max = self.l10M_bounds
        M_domain = np.logspace(lM_min-1, lM_max+1, num=1000)
        sigmaM = np.array([cc.sigmaMtophat(M, self.scale_factor) 
                           for M in M_domain])
        self.sigmaM_spline = IUS(M_domain, sigmaM)
        ln_sig_inv_spline = IUS(M_domain, -np.log(sigmaM))
        deriv_spline = ln_sig_inv_spline.derivative()
        self.deriv_spline = deriv_spline
        return

    def dndlM(self, lM, params=None):
        """Tinker2008_appendix C mass function.

        Args:
            lM (float or array_lke): Ln(Mass) at which to evaluate the mass function.
            params (array_like; optional): the tinker parameters; default is none, in which case it will use
                the parameters already set.

        Returns:
            dndlM (float or array_like): M*dn/dM; the mass function.
        """
        M = np.exp(lM)
        sigma = self.sigmaM_spline(M)
        if params is None: 
            d, e, f, g, B = self.params
        else: 
            d, e, f, g, B = params
            self.set_parameters(d, e, f, g, B)
        g_sigma = self.B_coefficient*((sigma/e)**-d + sigma**-f) * np.exp(-g/sigma**2)
        return g_sigma * self.rhom * self.deriv_spline(M)

    def make_dndlM_spline(self):
        """Creates a spline for dndlM so that the integrals
        over mass bins are faster
        """
        bounds = np.log(10**self.l10M_bounds)
        lM = np.linspace(bounds[0], bounds[1], num=100)
        dndlM = np.array([self.dndlM(lMi) for lMi in lM])
        self.dndlM_spline = IUS(lM, dndlM)
        return lM, dndlM

    def covariance_in_bins(self, lM_bins, Cov_p, use_numerical_derivatives=False):
        """Compute the covariance between each mass bin.
            Args:
                lM_bins (array_like): List of mass bin edges. Shape must be Nbins by 2. Units are Msun/h.
            Cov_p (array_like): Either the variances of the tinker parameters or a matrix with covariances between all tinker parameters.
                use_numerical_derivatives (boolean): Flag to decide how to take the derivatives; default False.

            Returns:
                Cov_NN (array_like): Matrix that is Nbins by Nbins of the covariances between all mass bins.
        """
        dndp = derivs_in_bins(self, lM_bins, use_numerical_derivatives)
        if len(np.shape(Cov_p)) == 1: Cov_p = np.diag(Cov_p)
        cov = np.zeros((len(lM_bins),len(lM_bins)))
        for i in range(len(lM_bins)):
            for j in range(len(lM_bins)):
                cov[i,j] = np.dot(dndp[i], np.dot(Cov_p, dndp[j]))
        return cov

    def n_in_bins(self, lM_bins, params=None):
        """
        IMPORTANT: need to change this funtion. It should switch
        to using a spline for dn/dm and then using
        the integrate function from scipy.


        Compute the tinker mass function in each mass bin.

        Args:
            lM_bins (array_like): List of mass bin edges. Shape must be Nbins by 2. Units are Msun/h.

        Returns:
            n (array_like): Tinker halo mass function at each mass bin. Units are number/ (Mpc/h)^3.
        """
        lM_bins = np.log(10**lM_bins) #switch to natural log
        return np.array([self.dndlM_spline.integral(lMlow, lMhigh) for lMlow, lMhigh in lM_bins])
예제 #38
0
def main():
    comm = MPI.COMM_WORLD
    size = comm.Get_size()
    rank = comm.Get_rank()

    t_0 = MPI.Wtime()
    N_dset = (nrbin + 1) * nrbin // 2  # numbe of C^ij(l) data sets
    #data_type_size = 8                     # number of bytes for double precison data
    zbin = np.zeros(nrbin +
                    1)  # for both zbin and chibin, the first element is 0.
    chibin = np.zeros(nrbin + 1)
    shape_noise = np.zeros(nrbin)

    num_kin = 506  # the number of boundary points of k bins from the input matter power spectrum file
    # consider num_kbin as the input number of k bins
    num_kbin = num_kin - 1  # k_max should be larger than lmax/xmax, -1 means disregarding the last term

    k_par = np.zeros(num_kbin)  # Input k and Pk for the calculation of C^ij(l)
    Pk_par = np.zeros(num_kbin)

    # l (the parameter of C^ij(l)) value equals to l_min, l_min+delta_l, ..., l_max-delta_l
    # We choose the case below:
    l_max = 2002  # l_max < X_max*k_max
    #l_max = 22
    l_min = 1
    delta_l = 3
    num_l = (l_max - l_min) // delta_l + 1

    c = 2.99792458e5  # speed of light unit in km/s
    H_0 = 100.0  # unit: h * km/s/Mpc
    sigmae = 0.021  # Tully-Fisher case \sigma_e from Eric's paper
    scale_n = 1.10  # Tully-Fisher total surface number density (unit: arcmin^-2), from Eric et al.(2013), Table 2 (TF-Stage)

    cross_const = (1.5 * cosmic_params.omega_m)**2.0 * (
        H_0 / c
    )**4.0  # It's the coefficent constant of convergence power spectrum, see Eq.(21)
    #print 'cross_const', cross_const
    sr_const = np.pi**2.0 / 1.1664e8  # 1 square acrminute = sr_const steradian
    constx = sr_const / cross_const  # The constx connects shot noise with C^ij(l)

    idir0 = '/Users/ding/Documents/playground/shear_ps/SVD_ps/'
    inputf = idir0 + 'Input_files/nz_stage_IV.txt'  # Input file of n(z) which is the galaxy number density distribution in terms of z
    # Here center_z denotes z axis of n(z). It may not be appropriate since we don't have redshift bin setting
    center_z, n_z = np.loadtxt(inputf, dtype='f8', comments='#', unpack=True)
    spl_nz = InterpolatedUnivariateSpline(center_z, n_z)
    n_sum = spl_nz.integral(center_z[0],
                            center_z[-1])  # Calculate the total number density
    #print(n_sum)
    scale_dndz = scale_n / n_sum
    n_z = n_z * scale_dndz  # rescale n(z) to match the total number density from the data file equal to scale_n
    spl_nz = InterpolatedUnivariateSpline(
        center_z, n_z)  # Interpolate n(z) in terms of z using spline

    #nz_test = interpolate.splev(center_z, tck_nz, der=0)
    #print(abs(n_z- nz_test)<1.e-7)

    # calculate total number density n^i (integrate dn/dz) in the ith tomographic bin
    def n_i_bin(zbin, i):
        zi = zbin[i]
        zf = zbin[i + 1]
        # rescale n(z) to match the total number density from the data file equal to scale_n
        ##n_i = scale_dndz * integrate.quad(n_z, zi, zf, epsabs=1.e-7, epsrel=1.e-7)[0]
        n_i = spl_nz.integral(zi, zf)
        return n_i

    G_0 = growth_factor(
        0.0, cosmic_params.omega_m)  # G_0 at z=0, normalization factor
    num_z = np.size(
        center_z)  # the number of z bins of n(z), obtained from the data file
    chi_z = np.zeros(num_z)

    for i in range(num_z):
        chi_z[i] = comove_d(
            center_z[i]
        ) * c / H_0  # with unit Mpc/h, matched with that of ell/k
    # we want interpolate z as a function of chi
    spl_zchi = InterpolatedUnivariateSpline(chi_z,
                                            center_z)  # z as a function of chi

    # here interpolate \chi as a function of z
    spl_chiz = InterpolatedUnivariateSpline(center_z, chi_z)

    # bin interval
    z_min = center_z[0]
    z_max = 2.0  # based on the data file, at z=2.0, n(z) is very small
    zbin_avg = (z_max - z_min) / float(nrbin)
    for i in range(nrbin):
        zbin[i] = i * zbin_avg + z_min
    zbin[-1] = z_max

    # print('Xmax', c/H_0*comove_d(zbin[-1]))
    # print('nbar first element: ', n_i_bin(zbin, 0))

    # Note that here chibin[0] is not equal to 0, since there is redshift cut at low z. Unit is Mpc/h
    for i in range(0, nrbin + 1):
        chibin[i] = comove_d(zbin[i]) * c / H_0

    # 3D power spectrum is obtained from CAMB using the above cosmological parameters.
    ##inputf = fpath+'test_matterpower.dat'# if it's used, the cosmological parameters should also be changed correspondingly.
    inputf = idir0 + 'Input_files/CAMB_Planck2015_matterpower.dat'
    k_camb, Pk_camb = np.loadtxt(inputf, dtype='f8', comments='#', unpack=True)
    Pk_camb_spl = InterpolatedUnivariateSpline(k_camb, Pk_camb)

    ifile = idir0 + 'Input_files/transfer_fun_Planck2015.dat'
    kk, Tf = np.loadtxt(ifile,
                        dtype='f8',
                        comments='#',
                        usecols=(0, 1),
                        unpack=True)
    ##print(kk)
    k_0 = 0.001  # unit h*Mpc^-1
    Pk_0 = Pk_camb_spl(k_0)
    Tf_spl = InterpolatedUnivariateSpline(kk, Tf)
    Tf_0 = Tf_spl(k_0)
    P0_a = Pk_0 / (pow(k_0, cosmic_params.ns) * Tf_0**2.0)
    Psm_transfer = P0_a * pow(
        k_camb, cosmic_params.ns
    ) * Tf**2.0  # Get primordial (smooth) power spectrum from the transfer function
    Pk_now_spl = InterpolatedUnivariateSpline(k_camb, Psm_transfer)

    # ------ This part calculates the Sigma^2_{xy} using Pwig from CAMB. -------#
    z_mid = z_max / 2.0
    q_BAO = 110.0  # unit: Mpc/h, the sound horizon scale
    Sigma2_integrand = lambda k: Pk_camb_spl(k) * (1.0 - np.sin(k * q_BAO) /
                                                   (k * q_BAO))
    pre_factor = 1.0 / (3.0 * np.pi**2.0) * (
        growth_factor(z_mid, cosmic_params.omega_m) / G_0)**2.0
    Sigma2_xy = pre_factor * integrate.quad(
        Sigma2_integrand, k_camb[0], k_camb[-1], epsabs=1.e-03,
        epsrel=1.e-03)[0]
    print('At z=', z_mid, 'Sigma2_xy=', Sigma2_xy)

    #----------------------------------------------------------------------------#
    def Pk_par_integrand(k):
        if Pk_type == 'Pwig_linear':
            Pk_par = Pk_camb_spl(k)
        elif Pk_type == 'Pnow':
            Pk_par = Pk_now_spl(k)
        elif Pk_type == 'Pwig_nonlinear':
            Pk_par = Pk_now_spl(k) + (Pk_camb_spl(k) - Pk_now_spl(k)) * np.exp(
                -k**2.0 * Sigma2_xy / 2.0)
        return Pk_par

    odir1 = 'mpi_preliminary_data_{}/'.format(Pk_type)
    if Psm_type == 'Pnow':
        odir1_Gm = odir1 + 'set_Pnorm_Pnow/'
    else:
        odir1_Gm = odir1

    odir = odir0 + odir1 + 'comm_size{}/'.format(size)
    odir_Gm = odir0 + odir1_Gm + 'comm_size{}/'.format(size)

    if rank == 0:
        if not os.path.exists(odir):
            os.makedirs(odir)

        if not os.path.exists(odir_Gm):
            os.makedirs(odir_Gm)
    comm.Barrier()

    print('odir_Gm:', odir_Gm, 'from rank:', rank)
    prefix = 'Tully-Fisher'
    Cijl_outf_prefix = odir + prefix  # The prefix of output file name
    Gm_outf_prefix = odir_Gm + prefix
    iu1 = np.triu_indices(
        nrbin)  # Return the indices for the upper-triangle of an (n, m) array

    #------------------------------------------------
    def get_shapenoise(rank):
        if rank == 0:
            # Calculate covariance matrix of Pk, the unit of number density is per steradians
            for i in range(nrbin):
                shape_noise[i] = sigmae**2.0 / n_i_bin(zbin, i)
            pseudo_sn = shape_noise * constx

            # Output the shape noise (includes the scale factor) in a file
            outf = odir0 + odir1 + prefix + '_pseudo_shapenoise_{0}rbins.out'.format(
                nrbin)  # basic variable
            np.savetxt(outf, pseudo_sn, fmt='%.15f', newline='\n')

    #---------------------------------------------------
    # Interpolate g^i in terms of chi (comoving distance)
    ifile = './lens_eff_g_i/g_i_{}rbins.npz'.format(nrbin)
    npz_data = np.load(ifile)
    chi_array = npz_data['chi_array'] * c / H_0
    g_i_matrix = npz_data['g_i_matrix']
    spl_gi_list = []
    for i in range(nrbin):
        spl_gi = InterpolatedUnivariateSpline(chi_array, g_i_matrix[i, :])
        spl_gi_list.append(spl_gi)

    ifile = idir0 + 'Input_files/KW_stage_IV_num_ell_per_rank_comm_size{}.dat'.format(
        size)
    num_ell_array = np.loadtxt(ifile, dtype='int', comments='#', usecols=(1, ))
    num_l_in_rank = num_ell_array[rank]

    #------------------------------------------------------------------------------------------------#
    #--------------------------- Part 1: calculate C^ij(l) ------------------------------------------#
    # Note: Don't generate output G matrix for output P(k) in the process with C^ij(l), because the interval of k bins are
    # different from those of the G matrix for 'observered' data C^ij(l)!
    #------------------------------------------------------------------------------------------------#
    # This is for output G' matrix.
    def Gm_integrand_out(k, c_i, spl_gi, spl_gj, ell):
        chi_k = ell / k
        # Since the diameter of Milky Way is about 0.03 Mpc, we assume that the smallest interval between chi_k and chibin[i+1] larger than 0.1 Mpc/h.
        if (chibin[c_i + 1] - 1.e-8) < chi_k:
            return 0.0
        else:
            #z_k = interpolate.splev(chi_k, tck_zchi, der=0)
            z_k = spl_zchi(chi_k)
            GF = (growth_factor(z_k, cosmic_params.omega_m) / G_0)**2.0
            return (1.0 + z_k
                    )**2.0 * spl_gi(chi_k) * spl_gj(chi_k) * ell / k**2.0 * GF

    # This is for output C^{ij}(l).
    def Gm_integrand_in(k, c_i, spl_gi, spl_gj, ell):
        return Gm_integrand_out(k, c_i, spl_gi, spl_gj,
                                ell) * Pk_par_integrand(k)

    def get_Cijl(comm, rank):
        # Output the Cij_l array in which each term is unique.
        def cal_cijl(l, rank):
            #n_l = default_num_l_in_rank * rank + l
            n_l = np.sum(num_ell_array[0:rank]) + l
            ell = l_min + n_l * delta_l
            ell = ell * alpha
            #offset_cijl = n_l * N_dset * data_type_size
            c_temp = np.zeros((nrbin, nrbin))
            for c_i in range(nrbin):
                for c_j in range(c_i, nrbin):
                    # we could use smaller epsrel, but it would require more integration points to achieve that precision.
                    res = integrate.quad(Gm_integrand_in,
                                         k_camb[0],
                                         k_camb[-1],
                                         args=(c_i, spl_gi_list[c_i],
                                               spl_gi_list[c_j], ell),
                                         limit=200,
                                         epsabs=1.e-6,
                                         epsrel=1.e-12)
                    c_temp[c_i][c_j] = res[0]
                    abserr = res[1]
                    if res[0] != 0.0:
                        relerr = abserr / res[0]
                    else:
                        relerr = 0.0
                #c_temp[c_i][c_i : nrbin] = np.dot(gmatrix_jk, Pk_par)
            array_cij = np.asarray(
                c_temp[iu1],
                dtype=np.float64)  # extract upper-triangle of c_temp
            if rank == 0:
                #print('rank:', rank, 'array_cij:', array_cij)
                print('ell from rank', rank, 'is', ell,
                      'abs err of Cijl is %.4e' % abserr,
                      'and rel err is %.4e' % relerr)
            return ell, array_cij, abserr, relerr

        Cijl_file = Cijl_outf_prefix + '_Cij_l_{0}rbins_{1}kbins_CAMB_rank{2}.bin'.format(
            nrbin, num_kbin, rank)  # basic variable
        Cijl_fwriter = open(Cijl_file, 'wb')

        err_info = np.array([], dtype=np.float64).reshape(0, 3)
        for l in range(num_l_in_rank):
            ell, cijl, abserr, relerr = cal_cijl(l, rank)
            cijl.tofile(Cijl_fwriter, sep="")
            err_info = np.vstack((err_info, np.array([ell, abserr, relerr])))
        Cijl_fwriter.close()
        err_info_ofile = Cijl_outf_prefix + '_integration_error_Cij_l_{0}rbins_{1}kbins_CAMB_rank{2}.out'.format(
            nrbin, num_kbin, rank)
        np.savetxt(err_info_ofile,
                   err_info,
                   fmt='%i  %.4e  %.4e',
                   delimiter=' ',
                   newline='\n',
                   header='ell    abs_err   rel_err',
                   comments='#')
        #comm.Barrie()

    #-----------------------------------------------------------------------------------------------#
    #------------------------- Part 2: get Gm_cross_out for output P(k) ----------------------------#

    ######------------- set up output k space and G' matrix for output Pk ----------------###########
    def get_Gm_out(comm, rank):
        # construct Gmatrix: Gout for output Pk with num_kout kbins
        # Note: The algorithm is the same as that calculating C^ij(l) in Part 1. Here we use a simplified (looks like) way to get Gmatrix_l.
        def cal_G(l, rank):
            n_l = np.sum(num_ell_array[0:rank]) + l
            ell = l_min + n_l * delta_l
            ell = ell * alpha
            #offset_Gm = n_l * N_dset * num_kout * data_type_size

            Gmatrix_l = np.zeros((N_dset, num_kout))
            # j denotes column
            for j in range(num_kout):
                # i denotes row
                for i in range(N_dset):
                    # redshift bin i: rb_i
                    rb_i = iu1[0][i]
                    # in python, eps should be larger than 1.e-15, to be safe. The smallest chi from the corresponding output k bin should be smaller than the
                    # the upper boundary of chi from the ith tomographic bin
                    if chibin[rb_i + 1] > ell / kout[j + 1]:
                        ##krb_i = ell/(chibin[rb_i]+1.e-12)  # avoid to be divided by 0
                        rb_j = iu1[1][i]
                        # more precise calculation of Gmatrix_l
                        # the j index of Pnorm_out denotes k bin id, different from the index rb_j of g_j
                        res = integrate.quad(Gm_integrand_out,
                                             kout[j],
                                             kout[j + 1],
                                             args=(rb_i, spl_gi_list[rb_i],
                                                   spl_gi_list[rb_j], ell),
                                             epsabs=1.e-6,
                                             epsrel=1.e-6)
                        Gmatrix_l[i][j] = res[0] * Pnorm_out[j]
                        abserr = res[1]
                        if res[0] != 0.0:
                            relerr = abserr / res[0]
                        else:
                            relerr = 0.0
            #print Gmatrix_l[:, 0]
            if rank == 0:
                #print('rank:', rank, 'Gm:', Gmatrix_l)
                print('ell from rank', rank, 'is', ell,
                      'abs err of G is %.4e' % abserr,
                      'rel err is %.4e' % relerr)
            return ell, Gmatrix_l, abserr, relerr

        kout, k_mid = np.zeros(num_kout + 1), np.zeros(num_kout)
        k_low, k_high = 0.01, 1.0  # This set may need to be checked more!
        kout[0], kout[1], kout[-1] = k_camb[0], k_low, k_camb[-1]
        lnk_factor = np.log(k_high / k_low) / (num_kout - 2)

        for i in range(2, num_kout):
            kout[i] = kout[i - 1] * np.exp(lnk_factor)
        #print kout

        for i in range(num_kout):
            k_mid[i] = (kout[i] + kout[i + 1]) / 2.0
        if Psm_type == 'Pnorm' or Psm_type == 'default':
            Pnorm_out = 1.5e4 / (
                1.0 + (k_mid / 0.05)**
                2.0)**0.65  # from Eisenstein & Zaldarriaga (2001)
        elif Psm_type == 'Pnow':
            Pnorm_out = Pk_now_spl(
                k_mid
            )  # Test how the change of Pnow could influence the eigenvalues from SVD routine.

        # Gm_cross_out uses selected new k bins
        Gm_cross_file = Gm_outf_prefix + '_Gm_cross_out_{0}rbins_{1}kbins_CAMB_rank{2}.bin'.format(
            nrbin, num_kout, rank)  # basic variable
        Gm_cross_fwriter = open(Gm_cross_file, 'wb')

        err_info = np.array([], dtype=np.float64).reshape(0, 3)
        for l in range(num_l_in_rank):
            ell, Gm, abserr, relerr = cal_G(l, rank)
            Gm.tofile(Gm_cross_fwriter, sep="")
            err_info = np.vstack(
                (err_info, np.array([ell, abserr, relerr]))
            )  # If relerr = xx/0, it doesn't seem to be appendable on array.
        Gm_cross_fwriter.close()
        err_info_ofile = Gm_outf_prefix + '_integration_error_Gm_cross_out_{0}rbins_{1}kbins_CAMB_rank{2}.out'.format(
            nrbin, num_kbin, rank)
        np.savetxt(err_info_ofile,
                   err_info,
                   fmt='%i  %.4e  %.4e',
                   delimiter=' ',
                   newline='\n',
                   header='ell    abs_err   rel_err',
                   comments='#')

    if cal_sn == "True":
        get_shapenoise(rank)
    if cal_cijl == "True":
        get_Cijl(comm, rank)
    #comm.Barrier()
    t_1 = MPI.Wtime()

    if cal_Gm == "True" and Pk_type == 'Pwig_nonlinear':
        get_Gm_out(comm, rank)
    #comm.Barrier()
    t_2 = MPI.Wtime()
    if rank == 0:
        print('Running time for Cijl:', t_1 - t_0)
        print('Running time for G matrix:', t_2 - t_1)


#######################################################

    def plot_numd_spectroscopy():
        odir_data = "./numd_distribute_spectro/"
        if not os.path.exists(odir_data):
            os.makedirs(odir_data)
        odir_fig = odir_data + 'nz_fig/'
        if not os.path.exists(odir_fig):
            os.makedirs(odir_fig)
        nd_avg = []
        for i in range(nrbin):
            nd_avg.append(n_i_bin(zbin, i) / (zbin[i + 1] - zbin[i]))
        ofile = odir_data + 'gal_numden_spectroz_{}rbins.out'.format(nrbin)
        header_line = ' bin_boundary(low)   nz_avg'
        np.savetxt(ofile,
                   np.array([zbin[0:-1], nd_avg]).T,
                   fmt='%.7f',
                   newline='\n',
                   comments='#')

        print("nd_avg:", nd_avg, "zbin:", zbin)
        fig, ax = plt.subplots(figsize=(8, 6))
        bars = ax.bar(left=zbin[0:-1],
                      height=nd_avg,
                      width=zbin_avg,
                      align='edge',
                      color='white',
                      edgecolor='grey')
        bars[11].set_color('r')
        print(bars)
        # n, bins, pathes = ax.hist(nd_avg, bins=nrbin, range=[zbin[0], zbin[-1]], align='left')
        # print(n, bins, pathes)
        ax.plot(center_z, n_z, 'k-', lw=2.0)
        ax.set_xlim([0.0, z_max])
        ax.set_ylim([0.0, 1.0])
        ax.set_xlabel(r'$z$', fontsize=20)
        #ax.set_ylabel('$n^i(z)$ $[\mathtt{arcmin}]^{-2}$', fontsize=20)
        ax.set_ylabel(r'$dn^i/dz \; [\mathtt{arcmin}]^{-2}$', fontsize=20)
        ax.minorticks_on()
        ax.tick_params('both', length=5, width=2, which='major', labelsize=15)
        ax.tick_params('both', length=3, width=1, which='minor')
        ax.set_title("KWL-Stage IV", fontsize=20)

        plt.tight_layout()
        figname = "gal_numden_{}rbins_spectro.pdf".format(nrbin)
        plt.savefig(odir_fig + figname)
        plt.show()
        plt.close()

    if show_nz == "True" and rank == 0:
        plot_numd_spectroscopy()
예제 #39
0
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy import integrate
k = np.inf

file = np.loadtxt('podaci.txt')
tal = file[:,0]
inte = file[:,1] 

spl = InterpolatedUnivariateSpline(tal, inte, k=3)
spl.set_smoothing_factor(0.1)
xs = file[:,0]
ys = spl(xs)
plt.plot(tal, inte, '.-')
plt.plot(xs, ys)
plt.show()

print (spl.integral(0, np.inf))