Пример #1
0
def test_age():
    """Test integrated age against analytical age."""
    z = numpy.arange(0, 10.0, 0.05)

    cosmo = {}
    cosmo['omega_M_0'] = numpy.array([[0.99],[0.01],[0.3]])
    cosmo['omega_lambda_0'] = 1. - cosmo['omega_M_0']
    cosmo['h'] = 0.7
    cd.set_omega_k_0(cosmo)

    linestyle = ['-', ':', '--']

    gyr = 1e9 * cc.yr_s

    tl = cd.lookback_time(z, **cosmo)
    age = cd.age(z, **cosmo)
    age_ana = cd.age_flat(z, **cosmo)

    pylab.figure(figsize=(6,6))
    for i in range(len(linestyle)):
        pylab.plot(z, (tl/gyr)[i], ls=linestyle[i], color='0.5')
        pylab.plot(z, (age/gyr)[i], ls=linestyle[i], color='r')
        pylab.plot(z, (age_ana/gyr)[i], ls=linestyle[i], color='k')
    pylab.xlabel("redshift z")
    pylab.ylabel(r"age $t_L/$Gyr")

    pylab.figure(figsize=(6,6))
    for i in range(len(linestyle)):
        pylab.plot(z, ((age - age_ana)/age_ana)[i], ls=linestyle[i], 
                   color='k')
        # Make sure errors are small:
        ntest.assert_array_less((numpy.abs((age - age_ana)/age_ana)[i]),
                                3e-13)
    pylab.xlabel("redshift z")
    pylab.ylabel(r"age: (integral - analytical)/analytical")
Пример #2
0
def test_GBL_tau_inst():
    """Test match between analytical and numerical tau with instant
    reionization.

    Also makes a plot reproducing figure 1 of arXiv:astro-ph/9812125v3.
    """
    dz = 0.05
    z = numpy.arange(0., 80. + 1.5 * dz, dz)

    # Fully ionized H and He
    x_ionH = 1.0
    x_ionHe = 2.0

    cosmo = {}
    cosmo['omega_M_0'] = numpy.array([[0.3], [0.6], [1.0]])
    cosmo['omega_lambda_0'] = 1. - cosmo['omega_M_0']
    cosmo['h'] = 0.65
    cosmo['omega_b_0'] = 0.02 / cosmo['h']**2.
    cosmo['Y_He'] = 0.24
    cd.set_omega_k_0(cosmo)

    tau_inst = cr.optical_depth_instant(z,
                                        x_ionH=x_ionH,
                                        x_ionHe=x_ionHe,
                                        **cosmo)
    tau_int = cr.integrate_optical_depth(x_ionH, x_ionHe, z, **cosmo)

    linestyle = ['-', ':', '--']

    pylab.figure()
    pylab.subplot(2, 1, 1)
    pylab.title("Compare to GB&L fig. 1 (astro-ph/9812125v3.)")
    for i in range(len(linestyle)):
        pylab.plot(z, tau_inst[i], ls=linestyle[i], color='b')
        pylab.plot(z, tau_int[i], ls=linestyle[i], color='r')

    pylab.xlim(0, 80)
    pylab.ylim(0, 1)
    pylab.xlabel(r"$\mathrm{z_{ion}}$")
    pylab.ylabel(r"$\tau$")

    pylab.subplot(2, 1, 2)
    for i in range(len(linestyle)):
        pylab.plot(z,
                   1.e4 * (tau_int[i] - tau_inst[i]) / tau_inst[i],
                   ls=linestyle[i],
                   color='k')
        diff = (tau_int[i] - tau_inst[i]) / tau_inst[i]
        diff[numpy.isnan(diff)] = 0.0
        print("max fractional error in num. int. = %.3g" %
              numpy.max(numpy.abs(diff)))
        ntest.assert_array_less(numpy.abs(diff),
                                numpy.zeros(diff.shape) + 2.e-4)

    pylab.xlim(0, 40)
    pylab.xlabel(r"$\mathrm{z_{ion}}$")
    pylab.ylabel(r"$\mathrm{10^4 \times (num.\tau - ana.\tau)/ana.\tau}$")
def test_GBL_tau_inst():
    """Test match between analytical and numerical tau with instant
    reionization.

    Also makes a plot reproducing figure 1 of arXiv:astro-ph/9812125v3.
    """
    dz = 0.05
    z = numpy.arange(0., 80. + 1.5*dz, dz)

    # Fully ionized H and He
    x_ionH = 1.0
    x_ionHe = 2.0

    cosmo = {}
    cosmo['omega_M_0'] = numpy.array([[0.3],[0.6],[1.0]])
    cosmo['omega_lambda_0'] = 1. - cosmo['omega_M_0']
    cosmo['h'] = 0.65
    cosmo['omega_b_0'] = 0.02 / cosmo['h']**2.
    cosmo['Y_He'] = 0.24
    cd.set_omega_k_0(cosmo)

    tau_inst = cr.optical_depth_instant(z, x_ionH=x_ionH, x_ionHe=x_ionHe, 
                                        **cosmo)
    tau_int = cr.integrate_optical_depth(x_ionH, x_ionHe, z, **cosmo)

    linestyle = ['-', ':', '--']
    
    pylab.figure()
    pylab.subplot(2,1,1)
    pylab.title("Compare to GB&L fig. 1 (astro-ph/9812125v3.)")
    for i in range(len(linestyle)):
        pylab.plot(z, tau_inst[i], ls=linestyle[i], color='b')
        pylab.plot(z, tau_int[i], ls=linestyle[i], color='r')

    pylab.xlim(0,80)
    pylab.ylim(0,1)
    pylab.xlabel(r"$\mathrm{z_{ion}}$")
    pylab.ylabel(r"$\tau$")
    
    pylab.subplot(2,1,2)
    for i in range(len(linestyle)):
        pylab.plot(z, 
                   1.e4 * (tau_int[i] - tau_inst[i])/tau_inst[i], 
                   ls=linestyle[i], color='k')
        diff = (tau_int[i] - tau_inst[i]) / tau_inst[i]
        diff[numpy.isnan(diff)] = 0.0
        print ("max fractional error in num. int. = %.3g" % 
               numpy.max(numpy.abs(diff))
               )
        ntest.assert_array_less(numpy.abs(diff), 
                                numpy.zeros(diff.shape) + 2.e-4)

    pylab.xlim(0,40)
    pylab.xlabel(r"$\mathrm{z_{ion}}$")
    pylab.ylabel(r"$\mathrm{10^4 \times (num.\tau - ana.\tau)/ana.\tau}$")
def test_GBL_tau_star():
    """Test tau_* against GB&L astro-ph/9812125v3.

    tau_* is a quantity used in optical_depth_instant.
    """
    z = 1.0

    # Fully ionized H and He
    x_ionH = 1.0
    x_ionHe = 2.0

    cosmo = {}
    cosmo['omega_M_0'] = numpy.array([[0.3],[0.6],[1.0]])
    cosmo['omega_lambda_0'] = 1. - cosmo['omega_M_0']
    cosmo['h'] = 0.65
    cosmo['omega_b_0'] = 0.02 / cosmo['h']**2.
    cosmo['Y_He'] = 0.24
    cd.set_omega_k_0(cosmo)

    tau_inst, tau_star = cr.optical_depth_instant(z, 
                                                  x_ionH=x_ionH, 
                                                  x_ionHe=x_ionHe, 
                                                  return_tau_star=True,
                                                  **cosmo)
    print "tau_star = %.7f" % tau_star
    print ("tau_star/(h Omega_b) = %.7f =? 0.061" % 
           (tau_star / (cosmo['h'] * cosmo['omega_b_0'])))

    ntest.assert_approx_equal(tau_star / (cosmo['h'] * cosmo['omega_b_0']),
                              0.061,
                              2)

    print "(1 - Y_He/2) = %.3f =? 0.88" % (1. - (cosmo['Y_He']/2.))
    ntest.assert_approx_equal((1. - (cosmo['Y_He']/2.)),
                              0.88,
                              7)

    H_0 = cc.H100_s * cosmo['h']

    # s^-1 * Mpc s^-1 * Mpc^2 / Mpc^3 msun^-1 s^-2 / Msun -> 
    tau_star_explicit =  ((1. - (cosmo['Y_He']/2.)) * 
                          ((3. * H_0 * cosmo['omega_b_0'] * cc.c_light_Mpc_s *
                            cc.sigma_T_Mpc) / 
                           (8. * math.pi * cc.G_const_Mpc_Msun_s * 
                            (cc.m_p_g/cc.M_sun_g))))

    print "tau_star_explicit = %.7f =? tau_star" % tau_star_explicit
    ntest.assert_approx_equal(tau_star, tau_star_explicit, 3)
Пример #5
0
def timeDelayDistanceForSIS(velocityDispersion, zLens, zSource):
    '''
    At the moment, i have saved the fermat potential in unitless
    SIS
    i.e deltaPHI

    so i need to times by 
    eta0^2 Ds / (Dls*Dl)*(1+z)
    
    eta0 = 4 pi (v/c)^2 * Dl Dls / Dls

    '''

    cosmo = {'omega_M_0': 0.3, 'omega_lambda_0': 0.7, 'h': 1.}
    cosmo = dist.set_omega_k_0(cosmo)
    Dl = dist.angular_diameter_distance(zLens, **cosmo)
    Dls = dist.angular_diameter_distance(zSource, z0=zLens, **cosmo)
    Ds = dist.angular_diameter_distance(zSource, **cosmo)

    cInKmPerSecond = 3e5
    cInMpcPerDay = 9.7156e-15 * 60. * 60. * 24

    Eta0 = 4. * np.pi * (velocityDispersion / cInKmPerSecond) * Dl * Dls / Ds

    TimeDelayDistance = (1 + zLens) / cInMpcPerDay * Ds / (Dl * Dls) * Eta0**2

    return TimeDelayDistance
Пример #6
0
def modelTheo(z, omegam, omegal):
    tab = []
    cosmo = {'omega_M_0': omegam, 'omega_lambda_0': omegal, 'h': 0.70}
    cosmo = cd.set_omega_k_0(cosmo)
    for i in range(0, len(z)):
        tab.append(cd.luminosity_distance(z[i], **cosmo) * 10**5)
    return tab
def Hz_cosmo(z):
	cosmo = {'omega_M_0' : 0.24, 'omega_lambda_0' : 0.76, 'h' : 0.73}
	cosmo = cd.set_omega_k_0(cosmo)
	'''___________Hz___________________________'''
	H_z = cd.hubble_distance_z(z, **cosmo)
	#print z,  H_z
	#print 0,  cd.hubble_distance_z(0, **cosmo)
	return H_z
Пример #8
0
def val_dA( z ):
    """This returns angular-diameter distance in [cm comoving],
    or in other words, the comoving radial distance [in cm comoving]. It only takes z.""" 
    cosmo = {'omega_M_0' : Omatter, 'omega_lambda_0' : Olambda, 'h' : h0}
    cosmo = cd.set_omega_k_0( cosmo )

    res =  cd.angular_diameter_distance( z, **cosmo ) * Mpc_in_cm * ( 1 + z )
    return res
Пример #9
0
	def part_stack_3D(self,bin_range,gal_num,run_num,code_num):		

		''' This function is the program 3D_part_stack.py'''
		# This program takes 3D particle data of 100 halo sample from Gerard Lemson from the MDB
		# and stacks the data by mass bin and uses the M_Phi technique. Note:(each bin is an ensemble cluster)

		# last update: 1/29/13

		##########

		import cosmolopy.distance as cd

		## DEFINE CONSTANTS ##

		h = 0.72 		# Hubble Constant / 100.0
		r_limit = 2		# Radius Limit of data in terms of R_crit200
		H0 = h*100.0		# Hubble constant
		q = 10.0
		c = 300000.0
		cosmo = {'omega_M_0':0.3, 'omega_lambda_0':0.7, 'h':H0/100.0}
		cosmo = cd.set_omega_k_0(cosmo)
		halo_num = 100		# Total number of halos

		## DEFINE FLAGS ##

		use_mems = False
		use_vdisp = True

		## INITIALIZATION ##

		G = galaxies()
		P = particles()
		C = caustic()
		U = universal()

		### PROGRAM ###

		print '...loading halos'

		HaloID, R_crit200, M_crit200, HPX, HPY, HPZ, HVX, HVY, HVZ, HVD, SRAD, ESRAD, Z = U.load_halos(h)

		HaloID, R_crit200, M_crit200, HPX, HPY, HPZ, HVX, HVY, HVZ, HVD, SRAD, ESRAD, Z = U.sort_halos(HaloID, R_crit200, M_crit200, HPX, HPY, HPZ, HVX, HVY, HVZ, HVD, SRAD, ESRAD, Z)

		print '...loading particles'

		R, V, PPX, PPY, PPZ = P.configure_particles(HaloID,h,HPX,HPY,HPZ,HVX,HVY,HVZ,Z,r_limit,R_crit200,HVD,halo_num,gal_num,run_num)

		print '...binning data'
		# All variables beginning with 'ENC_' stand for ensemble cluster, same for *bin variables

		ENC_R,ENC_V,ENC_M200,ENC_R200,ENC_HVD,ENC_SRAD,ENC_ESRAD = P.bin_data(HaloID,R,V,SRAD,ESRAD,M_crit200,R_crit200,HVD,halo_num,bin_range,run_num,gal_num)

		print '...running caustic'

		x_range,ENC_INF_NFWMASS,ENC_DIA_NFWMASS,ENC_INF_CAUMASS,ENC_DIA_CAUMASS,ENC_INF_MPROF,ENC_INF_NFW,ENC_INF_CAU,ENC_DIA_MPROF,ENC_DIA_NFW,ENC_DIA_CAU = P.kernel_caustic_masscalc(ENC_R,ENC_V,ENC_M200,ENC_R200,ENC_SRAD,ENC_ESRAD,ENC_HVD,halo_num,bin_range,gal_num,H0,q,r_limit,run_num,use_mems)

		return x_range,ENC_INF_NFWMASS,ENC_DIA_NFWMASS,ENC_INF_CAUMASS,ENC_DIA_CAUMASS,ENC_INF_MPROF,ENC_INF_NFW,ENC_INF_CAU,ENC_DIA_MPROF,ENC_DIA_NFW,ENC_DIA_CAU,ENC_R,ENC_V,ENC_M200,ENC_R200 
Пример #10
0
def test_figure2():
    """Plot Hogg fig. 2: The dimensionless angular diameter distance DA/DH.

    The three curves are for the three world models, 

    - Einstein-de Sitter (omega_M, omega_lambda) = (1, 0) [solid]
    
    : Low-density (0.05, 0) [dotted]

    -- High lambda, (0.2, 0.8) [dashed]

    Hubble distance DH = c / H0

    z from 0--5
    DA / DH from 0--0.5

    """

    z = numpy.arange(0, 5.05, 0.05)

    cosmo = {}
    cosmo['omega_M_0'] = numpy.array([[1.0], [0.05], [0.2]])
    cosmo['omega_lambda_0'] = numpy.array([[0.0], [0.0], [0.8]])
    cosmo['h'] = 0.5
    cd.set_omega_k_0(cosmo)

    linestyle = ['-', ':', '--']

    dh = cd.hubble_distance_z(0, **cosmo)
    da = cd.angular_diameter_distance(z, **cosmo)

    # Also test the pathway with non-zero z0
    da2 = cd.angular_diameter_distance(z, z0=1e-8, **cosmo)

    pylab.figure(figsize=(6, 6))
    for i in range(len(linestyle)):
        pylab.plot(z, (da / dh)[i], ls=linestyle[i])
        pylab.plot(z, (da2 / dh)[i], ls=linestyle[i])
    pylab.xlim(0, 5)
    pylab.ylim(0, 0.5)
    pylab.xlabel("redshift z")
    pylab.ylabel(r"angular diameter distance $D_A/D_H$")
    pylab.title("compare to " + inspect.stack()[0][3].replace('test_', '') +
                " (astro-ph/9905116v4)")
Пример #11
0
def test_figure2():
    """Plot Hogg fig. 2: The dimensionless angular diameter distance DA/DH.

    The three curves are for the three world models, 

    - Einstein-de Sitter (omega_M, omega_lambda) = (1, 0) [solid]
    
    : Low-density (0.05, 0) [dotted]

    -- High lambda, (0.2, 0.8) [dashed]

    Hubble distance DH = c / H0

    z from 0--5
    DA / DH from 0--0.5

    """

    z = numpy.arange(0, 5.05, 0.05)

    cosmo = {}
    cosmo['omega_M_0'] = numpy.array([[1.0],[0.05],[0.2]])
    cosmo['omega_lambda_0'] = numpy.array([[0.0],[0.0],[0.8]])
    cosmo['h'] = 0.5
    cd.set_omega_k_0(cosmo)
    
    linestyle = ['-', ':', '--']

    dh = cd.hubble_distance_z(0, **cosmo)
    da = cd.angular_diameter_distance(z, **cosmo)

    # Also test the pathway with non-zero z0
    da2 = cd.angular_diameter_distance(z, z0=1e-8, **cosmo)

    pylab.figure(figsize=(6,6))
    for i in range(len(linestyle)):
        pylab.plot(z, (da/dh)[i], ls=linestyle[i])
        pylab.plot(z, (da2/dh)[i], ls=linestyle[i])
    pylab.xlim(0,5)
    pylab.ylim(0,0.5)
    pylab.xlabel("redshift z")
    pylab.ylabel(r"angular diameter distance $D_A/D_H$")
    pylab.title("compare to " + inspect.stack()[0][3].replace('test_', '') + 
                " (astro-ph/9905116v4)")
Пример #12
0
 def zdistance(self,clus_z,H0=100.0):
     """
     Finds the angular diameter distance for an array of cluster center redshifts.
     Instead, use angular distance file precalculated and upload.
     """
     cosmo = {'omega_M_0':0.3,'omega_lambda_0':0.7,'h':H0/100.0}
     cosmo = cd.set_omega_k_0(cosmo)
     ang_d = cd.angular_diameter_distance(clus_z,**cosmo)
     lum_d = cd.luminosity_distance(clus_z,**cosmo)
     return ang_d,lum_d
Пример #13
0
def test_figure6():
    """Plot Hogg fig. 6: The dimensionless lookback time t_L/t_H and age t/t_H.

    The three curves are for the three world models, 

    - Einstein-de Sitter (omega_M, omega_lambda) = (1, 0) [solid]
    
    : Low-density (0.05, 0) [dotted]

    -- High lambda, (0.2, 0.8) [dashed]

    Hubble distance DH = c / H0

    z from 0--5
    t/th from 0--1.2

    """

    z = numpy.arange(0, 5.05, 0.05)

    cosmo = {}
    cosmo['omega_M_0'] = numpy.array([[1.0], [0.05], [0.2]])
    cosmo['omega_lambda_0'] = numpy.array([[0.0], [0.0], [0.8]])
    cosmo['h'] = 0.5
    cd.set_omega_k_0(cosmo)

    linestyle = ['-', ':', '--']

    th = 1 / cd.hubble_z(0, **cosmo)

    tl = cd.lookback_time(z, **cosmo)
    age = cd.age(z, **cosmo)

    pylab.figure(figsize=(6, 6))
    for i in range(len(linestyle)):
        pylab.plot(z, (tl / th)[i], ls=linestyle[i])
        pylab.plot(z, (age / th)[i], ls=linestyle[i])
    pylab.xlim(0, 5)
    pylab.ylim(0, 1.2)
    pylab.xlabel("redshift z")
    pylab.ylabel(r"lookback timne $t_L/t_H$")
    pylab.title("compare to " + inspect.stack()[0][3].replace('test_', '') +
                " (astro-ph/9905116v4)")
Пример #14
0
def test_figure6():
    """Plot Hogg fig. 6: The dimensionless lookback time t_L/t_H and age t/t_H.

    The three curves are for the three world models, 

    - Einstein-de Sitter (omega_M, omega_lambda) = (1, 0) [solid]
    
    : Low-density (0.05, 0) [dotted]

    -- High lambda, (0.2, 0.8) [dashed]

    Hubble distance DH = c / H0

    z from 0--5
    t/th from 0--1.2

    """

    z = numpy.arange(0, 5.05, 0.05)

    cosmo = {}
    cosmo['omega_M_0'] = numpy.array([[1.0],[0.05],[0.2]])
    cosmo['omega_lambda_0'] = numpy.array([[0.0],[0.0],[0.8]])
    cosmo['h'] = 0.5
    cd.set_omega_k_0(cosmo)
    
    linestyle = ['-', ':', '--']

    th = 1/ cd.hubble_z(0, **cosmo)

    tl = cd.lookback_time(z, **cosmo)
    age = cd.age(z, **cosmo)

    pylab.figure(figsize=(6,6))
    for i in range(len(linestyle)):
        pylab.plot(z, (tl/th)[i], ls=linestyle[i])
        pylab.plot(z, (age/th)[i], ls=linestyle[i])
    pylab.xlim(0,5)
    pylab.ylim(0,1.2)
    pylab.xlabel("redshift z")
    pylab.ylabel(r"lookback timne $t_L/t_H$")
    pylab.title("compare to " + inspect.stack()[0][3].replace('test_', '') + 
                " (astro-ph/9905116v4)")
Пример #15
0
 def zdistance(self, clus_z, H0=100.0):
     """
     Finds the angular diameter distance for an array of cluster center redshifts.
     Instead, use angular distance file precalculated and upload.
     """
     cosmo = {'omega_M_0': 0.3, 'omega_lambda_0': 0.7, 'h': H0 / 100.0}
     cosmo = cd.set_omega_k_0(cosmo)
     ang_d = cd.angular_diameter_distance(clus_z, **cosmo)
     lum_d = cd.luminosity_distance(clus_z, **cosmo)
     return ang_d, lum_d
Пример #16
0
def test_GBL_tau_star():
    """Test tau_* against GB&L astro-ph/9812125v3.

    tau_* is a quantity used in optical_depth_instant.
    """
    z = 1.0

    # Fully ionized H and He
    x_ionH = 1.0
    x_ionHe = 2.0

    cosmo = {}
    cosmo['omega_M_0'] = numpy.array([[0.3], [0.6], [1.0]])
    cosmo['omega_lambda_0'] = 1. - cosmo['omega_M_0']
    cosmo['h'] = 0.65
    cosmo['omega_b_0'] = 0.02 / cosmo['h']**2.
    cosmo['Y_He'] = 0.24
    cd.set_omega_k_0(cosmo)

    tau_inst, tau_star = cr.optical_depth_instant(z,
                                                  x_ionH=x_ionH,
                                                  x_ionHe=x_ionHe,
                                                  return_tau_star=True,
                                                  **cosmo)
    print("tau_star = %.7f" % (tau_star))
    print("tau_star/(h Omega_b) = %.7f =? 0.061" %
          (tau_star / (cosmo['h'] * cosmo['omega_b_0'])))

    ntest.assert_approx_equal(tau_star / (cosmo['h'] * cosmo['omega_b_0']),
                              0.061, 2)

    print("(1 - Y_He/2) = %.3f =? 0.88" % (1. - (cosmo['Y_He'] / 2.)))
    ntest.assert_approx_equal((1. - (cosmo['Y_He'] / 2.)), 0.88, 7)

    H_0 = cc.H100_s * cosmo['h']

    # s^-1 * Mpc s^-1 * Mpc^2 / Mpc^3 msun^-1 s^-2 / Msun ->
    tau_star_explicit = ((1. - (cosmo['Y_He'] / 2.)) * (
        (3. * H_0 * cosmo['omega_b_0'] * cc.c_light_Mpc_s * cc.sigma_T_Mpc) /
        (8. * math.pi * cc.G_const_Mpc_Msun_s * (cc.m_p_g / cc.M_sun_g))))

    print("tau_star_explicit = %.7f =? tau_star" % (tau_star_explicit))
    ntest.assert_approx_equal(tau_star, tau_star_explicit, 3)
Пример #17
0
def test_figure5():
    """Plot Hogg fig. 5: The dimensionless comoving volume element (1/DH)^3(dVC/dz).

    The three curves are for the three world models, (omega_M, omega_lambda) =
    (1, 0), solid; (0.05, 0), dotted; and (0.2, 0.8), dashed.

    """
    z = numpy.arange(0, 5.05, 0.05)

    cosmo = {}
    cosmo['omega_M_0'] = numpy.array([[1.0], [0.05], [0.2]])
    cosmo['omega_lambda_0'] = numpy.array([[0.0], [0.0], [0.8]])
    cosmo['h'] = 0.5
    cd.set_omega_k_0(cosmo)

    linestyle = ['-', ':', '--']

    dh = cd.hubble_distance_z(0, **cosmo)

    dVc = cd.diff_comoving_volume(z, **cosmo)
    dVc_normed = dVc / (dh**3.)

    Vc = cd.comoving_volume(z, **cosmo)
    dz = z[1:] - z[:-1]
    dVc_numerical = (Vc[:, 1:] - Vc[:, :-1]) / dz / (4. * numpy.pi)
    dVc_numerical_normed = dVc_numerical / (dh**3.)

    pylab.figure(figsize=(6, 6))
    for i in range(len(linestyle)):
        pylab.plot(z, dVc_normed[i], ls=linestyle[i], lw=2.)
        pylab.plot(z[:-1],
                   dVc_numerical_normed[i],
                   ls=linestyle[i],
                   c='k',
                   alpha=0.1)
    pylab.xlim(0, 5)
    pylab.ylim(0, 1.1)
    pylab.xlabel("redshift z")
    pylab.ylabel(r"comoving volume element $[1/D_H^3]$ $dV_c/dz/d\Omega$")
    pylab.title("compare to " + inspect.stack()[0][3].replace('test_', '') +
                " (astro-ph/9905116v4)")
Пример #18
0
def test_figure3():
    """Plot Hogg fig. 3: The dimensionless luminosity distance DL/DH

    The three curves are for the three world models, 

    - Einstein-de Sitter (omega_M, omega_lambda) = (1, 0) [solid]
    
    : Low-density (0.05, 0) [dotted]

    -- High lambda, (0.2, 0.8) [dashed]

    Hubble distance DH = c / H0

    z from 0--5
    DL / DH from 0--16

    """

    z = numpy.arange(0, 5.05, 0.05)

    cosmo = {}
    cosmo['omega_M_0'] = numpy.array([[1.0],[0.05],[0.2]])
    cosmo['omega_lambda_0'] = numpy.array([[0.0],[0.0],[0.8]])
    cosmo['h'] = 0.5
    cd.set_omega_k_0(cosmo)
    
    linestyle = ['-', ':', '--']

    dh = cd.hubble_distance_z(0, **cosmo)
    dl = cd.luminosity_distance(z, **cosmo)

    pylab.figure(figsize=(6,6))
    for i in range(len(linestyle)):
        pylab.plot(z, (dl/dh)[i], ls=linestyle[i])
    pylab.xlim(0,5)
    pylab.ylim(0,16)
    pylab.xlabel("redshift z")
    pylab.ylabel(r"luminosity distance $D_L/D_H$")
    pylab.title("compare to " + inspect.stack()[0][3].replace('test_', '') + 
                " (astro-ph/9905116v4)")
Пример #19
0
def test_figure3():
    """Plot Hogg fig. 3: The dimensionless luminosity distance DL/DH

    The three curves are for the three world models, 

    - Einstein-de Sitter (omega_M, omega_lambda) = (1, 0) [solid]
    
    : Low-density (0.05, 0) [dotted]

    -- High lambda, (0.2, 0.8) [dashed]

    Hubble distance DH = c / H0

    z from 0--5
    DL / DH from 0--16

    """

    z = numpy.arange(0, 5.05, 0.05)

    cosmo = {}
    cosmo['omega_M_0'] = numpy.array([[1.0], [0.05], [0.2]])
    cosmo['omega_lambda_0'] = numpy.array([[0.0], [0.0], [0.8]])
    cosmo['h'] = 0.5
    cd.set_omega_k_0(cosmo)

    linestyle = ['-', ':', '--']

    dh = cd.hubble_distance_z(0, **cosmo)
    dl = cd.luminosity_distance(z, **cosmo)

    pylab.figure(figsize=(6, 6))
    for i in range(len(linestyle)):
        pylab.plot(z, (dl / dh)[i], ls=linestyle[i])
    pylab.xlim(0, 5)
    pylab.ylim(0, 16)
    pylab.xlabel("redshift z")
    pylab.ylabel(r"luminosity distance $D_L/D_H$")
    pylab.title("compare to " + inspect.stack()[0][3].replace('test_', '') +
                " (astro-ph/9905116v4)")
def err_Dv(z):
	'''___________DA__________________________'''
	cosmo = {'omega_M_0' : 0.24, 'omega_lambda_0' : 0.76, 'h' : 0.73}
	cosmo = cd.set_omega_k_0(cosmo)
	d_a = cd.angular_diameter_distance(z, **cosmo)
	'''___________Hz___________________________'''
	H_z = cd.hubble_distance_z(z, **cosmo)
	'''________________The error on Dv___________________'''
	part1 = ( dasigma/ d_a ) **2 
	part2 = (Hsigma/ H_z)**2
	part3 = 0.0 #(cov_DaH/ (d_a* H_z))
	sigma_Dv = sqrt(Dv(Z)**2 * (part1 + part2 + part3 ))
	return  sigma_Dv
Пример #21
0
def test_figure5():
    """Plot Hogg fig. 5: The dimensionless comoving volume element (1/DH)^3(dVC/dz).

    The three curves are for the three world models, (omega_M, omega_lambda) =
    (1, 0), solid; (0.05, 0), dotted; and (0.2, 0.8), dashed.

    """
    z = numpy.arange(0, 5.05, 0.05)

    cosmo = {}
    cosmo['omega_M_0'] = numpy.array([[1.0],[0.05],[0.2]])
    cosmo['omega_lambda_0'] = numpy.array([[0.0],[0.0],[0.8]])
    cosmo['h'] = 0.5
    cd.set_omega_k_0(cosmo)
    
    linestyle = ['-', ':', '--']

    dh = cd.hubble_distance_z(0, **cosmo)

    dVc = cd.diff_comoving_volume(z, **cosmo)
    dVc_normed = dVc/(dh**3.)

    Vc = cd.comoving_volume(z, **cosmo)
    dz = z[1:] - z[:-1]
    dVc_numerical = (Vc[:,1:] - Vc[:,:-1])/dz/(4. * numpy.pi)
    dVc_numerical_normed = dVc_numerical/(dh**3.)

    pylab.figure(figsize=(6,6))
    for i in range(len(linestyle)):
        pylab.plot(z, dVc_normed[i], ls=linestyle[i], lw=2.)
        pylab.plot(z[:-1], dVc_numerical_normed[i], ls=linestyle[i], 
                   c='k', alpha=0.1)
    pylab.xlim(0,5)
    pylab.ylim(0,1.1)
    pylab.xlabel("redshift z")
    pylab.ylabel(r"comoving volume element $[1/D_H^3]$ $dV_c/dz/d\Omega$")
    pylab.title("compare to " + inspect.stack()[0][3].replace('test_', '') + 
                " (astro-ph/9905116v4)")
Пример #22
0
def test_figure1():
    """Plot Hogg fig. 1: The dimensionless proper motion distance DM/DH. 

    The three curves are for the three world models, Einstein-de
    Sitter (omega_M, omega_lambda) = (1, 0), solid; low-density,
    (0.05, 0), dotted; and high lambda, (0.2, 0.8), dashed.

    Hubble distance DH = c / H0

    z from 0--5
    DM / DH from 0--3

    """

    z = numpy.arange(0, 5.05, 0.05)

    cosmo = {}
    cosmo['omega_M_0'] = numpy.array([[1.0], [0.05], [0.2]])
    cosmo['omega_lambda_0'] = numpy.array([[0.0], [0.0], [0.8]])
    cosmo['h'] = 0.5
    cd.set_omega_k_0(cosmo)

    linestyle = ['-', ':', '--']

    dh = cd.hubble_distance_z(0, **cosmo)
    dm = cd.comoving_distance_transverse(z, **cosmo)

    pylab.figure(figsize=(6, 6))
    for i in range(len(linestyle)):
        pylab.plot(z, (dm / dh)[i], ls=linestyle[i])
        #pylab.plot(z, (dm_err/dh)[i], ls=linestyle[i])
    pylab.xlim(0, 5)
    pylab.ylim(0, 3)
    pylab.xlabel("redshift z")
    pylab.ylabel(r"proper motion distance $D_M/D_H$")
    pylab.title("compare to " + inspect.stack()[0][3].replace('test_', '') +
                " (astro-ph/9905116v4)")
Пример #23
0
def test_figure1():
    """Plot Hogg fig. 1: The dimensionless proper motion distance DM/DH. 

    The three curves are for the three world models, Einstein-de
    Sitter (omega_M, omega_lambda) = (1, 0), solid; low-density,
    (0.05, 0), dotted; and high lambda, (0.2, 0.8), dashed.

    Hubble distance DH = c / H0

    z from 0--5
    DM / DH from 0--3

    """

    z = numpy.arange(0, 5.05, 0.05)

    cosmo = {}
    cosmo['omega_M_0'] = numpy.array([[1.0],[0.05],[0.2]])
    cosmo['omega_lambda_0'] = numpy.array([[0.0],[0.0],[0.8]])
    cosmo['h'] = 0.5
    cd.set_omega_k_0(cosmo)
    
    linestyle = ['-', ':', '--']

    dh = cd.hubble_distance_z(0, **cosmo)
    dm = cd.comoving_distance_transverse(z, **cosmo)

    pylab.figure(figsize=(6,6))    
    for i in range(len(linestyle)):
        pylab.plot(z, (dm/dh)[i], ls=linestyle[i])
        #pylab.plot(z, (dm_err/dh)[i], ls=linestyle[i])
    pylab.xlim(0,5)
    pylab.ylim(0,3)
    pylab.xlabel("redshift z")
    pylab.ylabel(r"proper motion distance $D_M/D_H$")
    pylab.title("compare to " + inspect.stack()[0][3].replace('test_', '') + 
                " (astro-ph/9905116v4)")
Пример #24
0
def test_age():
    """Test integrated age against analytical age."""
    z = numpy.arange(0, 10.0, 0.05)

    cosmo = {}
    cosmo['omega_M_0'] = numpy.array([[0.99], [0.01], [0.3]])
    cosmo['omega_lambda_0'] = 1. - cosmo['omega_M_0']
    cosmo['h'] = 0.7
    cd.set_omega_k_0(cosmo)

    linestyle = ['-', ':', '--']

    gyr = 1e9 * cc.yr_s

    tl = cd.lookback_time(z, **cosmo)
    age = cd.age(z, **cosmo)
    age_ana = cd.age_flat(z, **cosmo)

    pylab.figure(figsize=(6, 6))
    for i in range(len(linestyle)):
        pylab.plot(z, (tl / gyr)[i], ls=linestyle[i], color='0.5')
        pylab.plot(z, (age / gyr)[i], ls=linestyle[i], color='r')
        pylab.plot(z, (age_ana / gyr)[i], ls=linestyle[i], color='k')
    pylab.xlabel("redshift z")
    pylab.ylabel(r"age $t_L/$Gyr")

    pylab.figure(figsize=(6, 6))
    for i in range(len(linestyle)):
        pylab.plot(z, ((age - age_ana) / age_ana)[i],
                   ls=linestyle[i],
                   color='k')
        # Make sure errors are small:
        ntest.assert_array_less((numpy.abs((age - age_ana) / age_ana)[i]),
                                3e-13)
    pylab.xlabel("redshift z")
    pylab.ylabel(r"age: (integral - analytical)/analytical")
Пример #25
0
def getAnalyticExpression(logTimeDelay,
                          velocityDispersion,
                          zSource=1.,
                          zLens=0.2,
                          kernelSize=3.):
    cosmo = {'omega_M_0': 0.3, 'omega_lambda_0': 0.7, 'h': 1.}
    cosmo = dist.set_omega_k_0(cosmo)
    Dl = dist.angular_diameter_distance(zLens, **cosmo)
    Dls = dist.angular_diameter_distance(zSource, z0=zLens, **cosmo)
    Ds = dist.angular_diameter_distance(zSource, **cosmo)

    cInKmPerSecond = 3e5
    cInMpcPerSecond = 9.7156e-15
    seconds2days = 1. / 60. / 60. / 24
    timeDelayDistance = getTimeDelayDistance(zLens, zSource, 100.)
    lensPlaneDistanceMpc = np.arange(500) / 1000. * 1e-4
    angle = lensPlaneDistanceMpc / Dl

    analytic = 8. * np.pi * (
        velocityDispersion / cInKmPerSecond
    )**2 * timeDelayDistance * Dls / Ds * angle * seconds2days

    maxTimeDelay = np.log10(
        32. * np.pi**2 * (velocityDispersion / cInKmPerSecond)**4 * Dl * Dls /
        Ds * (1. + zLens) / cInMpcPerSecond * seconds2days)

    #logTimeDelay = np.linspace(-3,maxTimeDelay,100)
    probability = (10**logTimeDelay)**2
    probability = probability / probability[np.argmin(
        np.abs(logTimeDelay - maxTimeDelay))]
    probability[logTimeDelay > maxTimeDelay] = 0
    dX = logTimeDelay[1] - logTimeDelay[0]
    #probability = gauss(probability, 1)
    #characterstic scale in kpc
    epsilon0 = 4.*np.pi*(velocityDispersion/cInKmPerSecond)**2\
      *Dl*Dls/Ds*1e3
    subsample = 4.
    dy = 0.1 / epsilon0 * Dl / Ds / subsample

    timeDelayOnePixel = dy / 10**maxTimeDelay
    #timeDelayOnePixel/dX

    #pdb.set_trace()
    box_kernel = Box1DKernel(kernelSize)
    probability = convolve(probability, box_kernel)
    probability /= np.sum(probability * dX)
    print("convolution kernel size is ", dX * kernelSize)
    return logTimeDelay, probability
Пример #26
0
    def __init__(self, redshift, limitingObsMag=27):
        self.cosmo = {'omega_M_0': 0.3, 'omega_lambda_0': 0.7, 'h': 0.7}
        self.cosmo = distance.set_omega_k_0(self.cosmo)

        distancePc = \
          distance.luminosity_distance(redshift, **self.cosmo)*1e6

        limitingAbsoluteMag = limitingObsMag - \
          5.*np.log10(distancePc) + 5

        self.magnitudes = np.linspace(-28, limitingAbsoluteMag, 10000)
        self.dMag = self.magnitudes[1] - self.magnitudes[0]
        self.redshift = redshift
        self.getLuminosityStar()
        self.getMagnitudeStar()
        self.getLuminosityFunction()
def dvdz(z):
	# Planck best-fit parameters
	cosmo = {'omega_M_0':        0.316,
			 'omega_lambda_0':   0.684,
    			'omega_b_0':        0.049,
    			'N_eff':            3.046,
   			 'h':                0.67,
   			 'ns':               0.962,
   			 'sigma_8':          0.834,
    			'gamma':            0.55,
   			 'w0':               -1.,
    			'wa':               0.,
   			 'sigma_nl':         7.}
	cosmo = cd.set_omega_k_0(cosmo)
	Vc = cd.diff_comoving_volume(z, **cosmo)
	return  Vc
Пример #28
0
def getMeanMag(z, dz=1e-4):
    #return 0.1311
    cosmo = {'omega_M_0': 0.3086, 'omega_lambda_0': 0.6914, 'h': 0.6777}
    cosmo = dist.set_omega_k_0(cosmo)
    distanceEB = 0
    distanceFB = 0
    for i in np.arange(0., z, dz):
        dist_hz = dist.hubble_distance_z(i, **cosmo)

        distanceEB += dz * dist_hz / (1 + i)**2

        distanceFB += dz * dist_hz

    distanceFB /= 1. + z

    return (distanceEB / distanceFB)**2 - 1.
Пример #29
0
def calculate_age_stars(ro_in=None,
                        dset_in=None,
                        converted=False,
                        time_proper=True):
    """

    Parameters
    ----------
    converted: bool
        if the epoch field is in code unit or converted to some physical unit

    Return
    ------
    starsFormedatUniverseAge:
        age of the universe when the star particle was created in Myr

    """

    if converted:
        raise ValueError(
            "Epoch field should be in code unit for this function to work properly.."
        )

    if (time_proper):
        # switch depends on ramses run setup
        import cosmolopy.distance as cd
        import cosmolopy.constants as cc

        cosmo = {
            'omega_M_0': ro_in.info["omega_m"],
            'omega_lambda_0': ro_in.info["omega_l"],
            'h': ro_in.info["H0"] / 100.
        }
        cosmo = cd.set_omega_k_0(cosmo)

        t_z0 = cd.age(0., **cosmo) / (cc.Gyr_s / 1.e+3)  # Myr
        ram2myr = ro_in.info["unit_time"].express(
            C.Myr) / ro_in.info["aexp"]**2

        starsFormedatUniverseAge = t_z0 + dset_in["epoch"][:] * ram2myr
    else:
        Myr_unit_time = ro_in.info["unit_time"].express(C.Myr)
        starsFormedatUniverseAge = (ro_in.info["time"] -
                                    dset_in["epoch"][:]) * Myr_unit_time

    return starsFormedatUniverseAge
Пример #30
0
    def __init__(self, h=0.678, omega_m=0.308, omega_l=0.692, log=None):
        """Initializes the cosmology for use with cosmolopy.distance."""
        self.h = h
        self.omega_m = omega_m
        self.omega_l = omega_l
        cosmo = {'omega_M_0':self.omega_m, 'omega_lambda_0':self.omega_l, \
                 'h':self.h}

        if log is not None:
            f = open(log, 'a')
            f.write('\nCosmological Parameters\n')
            f.write('   Omega_M = %.2f\n' % self.omega_m)
            f.write('   Omega_L = %.2f\n' % self.omega_l)
            f.write('   h = %.2f\n' % self.h)
            f.close()

        self.cosmo = cd.set_omega_k_0(cosmo)
def da(z):
	# Planck best-fit parameters
	cosmo = {'omega_M_0':        0.316,
			 'omega_lambda_0':   0.684,
    			'omega_b_0':        0.049,
    			'N_eff':            3.046,
   			 'h':                0.67,
   			 'ns':               0.962,
   			 'sigma_8':          0.834,
    			'gamma':            0.55,
   			 'w0':               -1.,
    			'wa':               0.,
   			 'sigma_nl':         7.}
	cosmo = cd.set_omega_k_0(cosmo)
	d_a = cd.angular_diameter_distance(z, **cosmo)/(h)
	print "Angular diameter distance = %.1f Mpc)" % (d_a) 
	return  d_a
def getTimeDelayDistance(zLens, zSource, HubbleConstant, omegaLambda=1.0):
        '''
        Get the time delay distance for this particle lens
        '''

        #Wessels distance class
        
        omegaMatter = 1. - omegaLambda
        OmegaK = 1. - omegaMatter - omegaLambda
        

        cosmo = {'omega_M_0' : 0.3, 'omega_lambda_0' : 0.7, 'h' : HubbleConstant/100.}
        cosmo = dist.set_omega_k_0(cosmo)    
    
        Dls =  dist.angular_diameter_distance(zSource, z0=zLens, **cosmo)
        Dl =  dist.angular_diameter_distance(zLens,**cosmo)
        Ds =  dist.angular_diameter_distance(zSource, **cosmo)
        
        cInMpcPerSecond = 9.7156e-15
        
        return  (1.+zLens)*Dl*Ds/Dls/cInMpcPerSecond
Пример #33
0
def dvdz(z):
	''' this function is to calculate the diff comoving volume 
	the results are given in units of Mpc^3.
	to use this function you need to install cosmolopy.
	Also note that the cosmological 
	parameters are  Planck best-fit parameters.
	'''

	cosmo = {'omega_M_0':        0.316,
			 'omega_lambda_0':   0.684,
    			'omega_b_0':        0.049,
    			'N_eff':            3.046,
   			 'h':                0.67,
   			 'ns':               0.962,
   			 'sigma_8':          0.834,
    			'gamma':            0.55,
   			 'w0':               -1.,
    			'wa':               0.,
   			 'sigma_nl':         7.}
	cosmo = cd.set_omega_k_0(cosmo)
	Vc = cd.diff_comoving_volume(z, **cosmo)
	return  Vc
Пример #34
0
def da(z):
	'''This function is to calculate the angular diameter distance
        The units are in Mpc
        The cosmological parameters are Planck best-fit parameters
        Note: you need to install cosmolopy and import cosmolopy.constants as cc
        and  import cosmolopy.distance as cd
	'''
	# 
	cosmo = {'omega_M_0':        0.316,
			 'omega_lambda_0':   0.684,
    			'omega_b_0':        0.049,
    			'N_eff':            3.046,
   			 'h':                0.67,
   			 'ns':               0.962,
   			 'sigma_8':          0.834,
    			'gamma':            0.55,
   			 'w0':               -1.,
    			'wa':               0.,
   			 'sigma_nl':         7.}
	cosmo = cd.set_omega_k_0(cosmo)
	d_a = cd.angular_diameter_distance(z, **cosmo)*(cosmo['h'])
	#print "Angular diameter distance = %.1f Mpc)" % (d_a) 
	return  d_a
Пример #35
0
import cosmolopy
import cosmolopy.distance as cd


qso_zmin = 2.2
qso_zmax = 2.8
area =5800 #deg2



zvals = np.linspace(0,1e5,1e5)
lcdm = cd.set_omega_k_0({'omega_M_0' : 0.3, 'omega_lambda_0' : 0.7, 'h' : 0.7})
d_lcdm = cd.comoving_distance_transverse(zvals, **lcdm)

clf()
plot(zvals,d_lcdm)
xscale('log')

d_bb = np.max(d_lcdm)
d_min = cd.comoving_distance_transverse(qso_zmin, **lcdm)
d_min = cd.comoving_distance_transverse(qso_zmin, **lcdm)





def DA_cosmo(z):
	'''___________DA__________________________'''
	cosmo = {'omega_M_0' : 0.24, 'omega_lambda_0' : 0.76, 'h' : 0.73}
	cosmo = cd.set_omega_k_0(cosmo)
	d_a = cd.angular_diameter_distance(z, **cosmo)
	return d_a
Пример #37
0
def normalise_to_sdss():

    import sys
    sys.path.insert(0, '/home/lc585/Dropbox/IoA/nirspec/python_code')
    from get_nir_spec import get_nir_spec
    from get_sdss_spec import get_sdss_spec
    
    sys.path.insert(0, '/home/lc585/Dropbox/IoA/QSOSED/Model/qsofit')
    from qsrmod import qsrmod
    from load import load
    import cosmolopy.distance as cd
    from get_mono_lum import resid_mag_fit


    fig, axs = plt.subplots(2, 1, figsize=figsize(1, 1.4))

    cs = palettable.colorbrewer.qualitative.Set1_8.mpl_colors
    cs_light = palettable.colorbrewer.qualitative.Pastel1_6.mpl_colors

    df = pd.read_csv('/home/lc585/Dropbox/IoA/nirspec/tables/masterlist_liam.csv', index_col=0)  
    row = df.ix['QSO125']

    wav_nir, dw_nir, flux_nir, err_nir = get_nir_spec(row.NIR_PATH, row.INSTR)      
    
    wav_nir = wav_nir / (1.0 + row.z_IR)

    axs[0].plot(wav_nir, flux_nir*1e15, color=cs_light[0], label='Near-IR')
    

    if (row.SPEC_OPT == 'SDSS') | (row.SPEC_OPT == 'BOSS+SDSS'):
    
        wav_opt, dw_opt, flux_opt, err_opt = get_sdss_spec('SDSS', row.DR7_PATH)
    
    elif (row.SPEC_OPT == 'BOSS') :
    
        wav_opt, dw_opt, flux_opt, err_opt = get_sdss_spec('BOSS', row.DR12_PATH)
  
    wav_opt = wav_opt / (1.0 + row.z_IR)
    
    axs[0].plot(wav_opt, flux_opt*1e15, color=cs_light[1], label='SDSS')

    
    
    
    # Normalise SED model to SDSS spectra ----------------------------------------------------
    
    """
    SDSS spectra in Shen & Liu emission line free windows 
    """
    
    fit_region = [[1350,1360], [1445,1465], [1700,1705], [2155,2400], [2480,2675], [2925,3500], [4200,4230], [4435,4700], [5100,5535], [6000,6250], [6800,7000]]
    
    fit_mask = np.zeros(len(wav_opt), dtype=bool)
    
    for r in fit_region:
         fit_mask[(wav_opt > r[0]) & (wav_opt < r[1])] = True
    
    tmp = ma.array(flux_opt)
    tmp[~fit_mask] = ma.masked 

    for item in ma.extras.flatnotmasked_contiguous(tmp):
        axs[0].plot(wav_opt[item], flux_opt[item]*1e15, color=cs[1])
    
    # ax.plot(wav_opt[fit_mask], flux_opt[fit_mask], color=cs[0])

    plslp1 = 0.46
    plslp2 = 0.03
    plbrk = 2822.0
    bbt = 1216.0
    bbflxnrm = 0.24
    elscal = 0.71
    scahal = 0.86
    galfra = 0.31
    ebv = 0.0
    imod = 18.0
    
    with open('/home/lc585/Dropbox/IoA/QSOSED/Model/qsofit/input.yml', 'r') as f:
        parfile = yaml.load(f)
    
    fittingobj = load(parfile)
    
    lin = fittingobj.get_lin()
    galspc = fittingobj.get_galspc()
    ext = fittingobj.get_ext()
    galcnt = fittingobj.get_galcnt()
    ignmin = fittingobj.get_ignmin()
    ignmax = fittingobj.get_ignmax()
    wavlen_rest = fittingobj.get_wavlen()
    ztran = fittingobj.get_ztran()
    lyatmp = fittingobj.get_lyatmp()
    lybtmp = fittingobj.get_lybtmp()
    lyctmp = fittingobj.get_lyctmp()
    whmin = fittingobj.get_whmin()
    whmax = fittingobj.get_whmax()
    cosmo = {'omega_M_0':0.3, 'omega_lambda_0':0.7, 'h':0.7}
    cosmo = cd.set_omega_k_0(cosmo)
    flxcorr = np.array( [1.0] * len(wavlen_rest) )
    
    params = Parameters()
    params.add('plslp1', value = plslp1, vary=False)
    params.add('plslp2', value = plslp2, vary=False)
    params.add('plbrk', value = plbrk, vary=False)
    params.add('bbt', value = bbt, vary=False)
    params.add('bbflxnrm', value = bbflxnrm, vary=False)
    params.add('elscal', value = elscal, vary=False)
    params.add('scahal', value = scahal, vary=False)
    params.add('galfra', value = galfra, vary=False)
    params.add('ebv', value = ebv, vary=True)
    params.add('imod', value = imod, vary=False)
    params.add('norm', value = 1e-17, vary=True)
    
    def resid(params,
              wav_opt,
              flux_opt):
    
        wav_sed, flux_sed = qsrmod(params,
                                   parfile,
                                   wavlen_rest,
                                   row.z_IR,
                                   lin,
                                   galspc,
                                   ext,
                                   galcnt,
                                   ignmin,
                                   ignmax,
                                   ztran,
                                   lyatmp,
                                   lybtmp,
                                   lyctmp,
                                   whmin,
                                   whmax,
                                   cosmo,
                                   flxcorr)
    
        wav_sed = wav_sed / (1.0 + row.z_IR) 
    
        spc = interp1d(wav_sed, flux_sed, bounds_error=True, fill_value=0.0)
        flux_sed_fit = spc(wav_opt)
    
        return flux_opt - params['norm'].value * flux_sed_fit
      
    
    resid_p = partial(resid,
                      wav_opt = wav_opt[fit_mask],
                      flux_opt = flux_opt[fit_mask])
    
    
    result = minimize(resid_p, params, method='leastsq')
    
    
    # ---------------------------------------------------------------------------------------
    
    xs = np.arange(np.nanmin(wav_opt), np.nanmax(wav_nir), 1)


    wav_sed, flux_sed = qsrmod(result.params,
                               parfile,
                               wavlen_rest,
                               row.z_IR,
                               lin,
                               galspc,
                               ext,
                               galcnt,
                               ignmin,
                               ignmax,
                               ztran,
                               lyatmp,
                               lybtmp,
                               lyctmp,
                               whmin,
                               whmax,
                               cosmo,
                               flxcorr)
    
    wav_sed = wav_sed / (1.0 + row.z_IR) 
    spc = interp1d(wav_sed, flux_sed * result.params['norm'].value, bounds_error=True, fill_value=0.0)
    
    # do error weighted fit of spectra to SED model
    # Hewett et al. 1985 

    # mask out regions between bandpasses 

    wav_nir_obs = wav_nir * (1.0 + row.z_IR)
    goodinds = ((wav_nir_obs > 11800.0) & (wav_nir_obs < 13100.0))\
               | ((wav_nir_obs > 15000.0) & (wav_nir_obs < 17500.0))\
               | ((wav_nir_obs > 19500.0) & (wav_nir_obs < 23500.0))

    wav_nir = wav_nir[goodinds]
    flux_nir = flux_nir[goodinds]
    err_nir = err_nir[goodinds]

    goodinds = err_nir > 0.0 

    wav_nir = wav_nir[goodinds]
    flux_nir = flux_nir[goodinds]
    err_nir = err_nir[goodinds]

    k = np.nansum((flux_nir * spc(wav_nir)) / err_nir**2) / np.nansum((spc(wav_nir) / err_nir)**2)
    

    inds = np.argsort(np.diff(wav_nir))[-2:]
    wav_nir[inds] = np.nan
    flux_nir[inds] = np.nan

    axs[0].plot(wav_nir, flux_nir*1e15 / k, color=cs[0], alpha=1.0)


    axs[0].plot(xs, spc(xs)*1e15, color='black', lw=1, label='Model')

    axs[0].legend(loc='upper right')
        
    axs[0].set_xlim(1300,7300)
    axs[0].set_ylim(0, 1)

    # axs[0].set_xlabel(r'Rest-frame wavelength [${\mathrm \AA}$]')
    axs[0].set_ylabel(r'F$_{\lambda}$ [Arbitary units]')

    # --------------------------------------------------------------------------------

    df = pd.read_csv('/home/lc585/Dropbox/IoA/nirspec/tables/masterlist_liam.csv', index_col=0)  
    row = df.ix['QSO010']

    wav_nir, dw_nir, flux_nir, err_nir = get_nir_spec(row.NIR_PATH, row.INSTR)      
    
    wav_nir = wav_nir / (1.0 + row.z_IR)

    
    

    ftrlst, maglst, errlst, lameff = [], [], [], []  

    # 1250 condition so we don't go near the lyman break
    if (~np.isnan(row.psfMag_u)) & ((3546.0 / (1.0 + row.z_IR)) > 1250.0) & (~np.isnan(row.psfMagErr_u)):
        ftrlst.append('u.response')
        maglst.append(row.psfMag_u - 0.91) 
        errlst.append(row.psfMagErr_u)
        lameff.append(3546.0) 
    if (~np.isnan(row.psfMag_g)) & ((4670.0 / (1.0 + row.z_IR)) > 1250.0) & (~np.isnan(row.psfMagErr_g)):
        ftrlst.append('g.response')
        maglst.append(row.psfMag_g + 0.08)
        errlst.append(row.psfMagErr_g)
        lameff.append(4670.0) 
    if (~np.isnan(row.psfMag_r)) & ((6156.0 / (1.0 + row.z_IR)) > 1250.0) & (~np.isnan(row.psfMagErr_r)):
        ftrlst.append('r.response')
        maglst.append(row.psfMag_r - 0.16)
        errlst.append(row.psfMagErr_r)
        lameff.append(6156.0) 
    if (~np.isnan(row.psfMag_i)) & ((7471.0 / (1.0 + row.z_IR)) > 1250.0) & (~np.isnan(row.psfMagErr_i)):
        ftrlst.append('i.response')
        maglst.append(row.psfMag_i - 0.37)
        errlst.append(row.psfMagErr_i)
        lameff.append(7471.0) 
    if (~np.isnan(row.psfMag_z)) & ((8918.0 / (1.0 + row.z_IR)) > 1250.0) & (~np.isnan(row.psfMagErr_z)):
        ftrlst.append('z.response')
        maglst.append(row.psfMag_z - 0.54)
        errlst.append(row.psfMagErr_z)
        lameff.append(8918.0) 

    if (~np.isnan(row.VHS_YAperMag3)) & (~np.isnan(row.VHS_YAperMag3Err)):
        ftrlst.append('VISTA_Filters_at80K_forETC_Y2.txt')
        maglst.append(row.VHS_YAperMag3)
        errlst.append(row.VHS_YAperMag3Err)
        lameff.append(10210.0) 
    elif (~np.isnan(row.Viking_YAperMag3)) & (~np.isnan(row.Viking_YAperMag3Err)):
        ftrlst.append('VISTA_Filters_at80K_forETC_Y2.txt')
        maglst.append(row.Viking_YAperMag3)
        errlst.append(row.Viking_YAperMag3Err)
        lameff.append(10210.0) 
    elif (~np.isnan(row.UKIDSS_YAperMag3)) & (~np.isnan(row.UKIDSS_YAperMag3Err)): 
        ftrlst.append('Y.response')  
        maglst.append(row.UKIDSS_YAperMag3)
        errlst.append(row.UKIDSS_YAperMag3Err)
        lameff.append(10305.0) 

    if (~np.isnan(row.VHS_JAperMag3)) & (~np.isnan(row.VHS_JAperMag3Err)):
        ftrlst.append('VISTA_Filters_at80K_forETC_J2.txt')
        maglst.append(row.VHS_JAperMag3)
        errlst.append(row.VHS_JAperMag3Err)
        lameff.append(12540.0) 
    elif (~np.isnan(row.Viking_JAperMag3)) & (~np.isnan(row.Viking_JAperMag3Err)):
        ftrlst.append('VISTA_Filters_at80K_forETC_J2.txt')
        maglst.append(row.Viking_JAperMag3)
        errlst.append(row.Viking_JAperMag3Err)
        lameff.append(12540.0) 
    elif (~np.isnan(row.UKIDSS_J_1AperMag3)) & (~np.isnan(row.UKIDSS_J_1AperMag3Err)):  
        ftrlst.append('J.response')  
        maglst.append(row.UKIDSS_J_1AperMag3)
        errlst.append(row.UKIDSS_J_1AperMag3Err)
        lameff.append(12483.0) 
    elif (~np.isnan(row['2massMag_j'])) & (~np.isnan(row['2massMagErr_j'])): 
        ftrlst.append('J2MASS.response')  
        maglst.append(row['2massMag_j'])
        errlst.append(row['2massMagErr_j'])
        lameff.append(12350.0) 

    if (~np.isnan(row.VHS_HAperMag3)) & (~np.isnan(row.VHS_HAperMag3Err)):
        ftrlst.append('VISTA_Filters_at80K_forETC_H2.txt') 
        maglst.append(row.VHS_HAperMag3)
        errlst.append(row.VHS_HAperMag3Err)
        lameff.append(16460.0) 
    elif (~np.isnan(row.Viking_HAperMag3)) & (~np.isnan(row.Viking_HAperMag3Err)):
        ftrlst.append('VISTA_Filters_at80K_forETC_H2.txt') 
        maglst.append(row.Viking_HAperMag3)
        errlst.append(row.Viking_HAperMag3Err)
        lameff.append(16460.0) 
    elif (~np.isnan(row.UKIDSS_HAperMag3)) & (~np.isnan(row.UKIDSS_HAperMag3Err)):    
        ftrlst.append('H.response')     
        maglst.append(row.UKIDSS_HAperMag3)
        errlst.append(row.UKIDSS_HAperMag3Err)
        lameff.append(16313.0) 
    elif (~np.isnan(row['2massMag_h'])) & (~np.isnan(row['2massMagErr_h'])): 
        ftrlst.append('H2MASS.response')         
        maglst.append(row['2massMag_h'])
        errlst.append(row['2massMagErr_h'])
        lameff.append(16620.0) 

    if (~np.isnan(row.VHS_KAperMag3)) & (~np.isnan(row.VHS_KAperMag3Err)):
        ftrlst.append('VISTA_Filters_at80K_forETC_Ks2.txt')
        maglst.append(row.VHS_KAperMag3)
        errlst.append(row.VHS_KAperMag3Err)
        lameff.append(21490.0) 
    elif (~np.isnan(row.Viking_KsAperMag3)) & (~np.isnan(row.Viking_KsAperMag3Err)):
        ftrlst.append('VISTA_Filters_at80K_forETC_Ks2.txt')
        maglst.append(row.Viking_KsAperMag3)
        errlst.append(row.Viking_KsAperMag3Err)
        lameff.append(21490.0) 
    elif (~np.isnan(row.UKIDSS_KAperMag3)) & (~np.isnan(row.UKIDSS_KAperMag3Err)):     
        ftrlst.append('K.response')
        maglst.append(row.UKIDSS_KAperMag3)
        errlst.append(row.UKIDSS_KAperMag3Err)
        lameff.append(22010.0) 
    elif (~np.isnan(row['2massMag_k'])) & (~np.isnan(row['2massMagErr_k'])): 
        ftrlst.append('K2MASS.response') 
        maglst.append(row['2massMag_k'])  
        errlst.append(row['2massMagErr_k'])
        lameff.append(21590.0) 

    ftrlst, maglst, errlst, lameff = np.array(ftrlst), np.array(maglst), np.array(errlst), np.array(lameff)


    
       
    #-------Filters---------------------------------------------
    nftr = len(ftrlst)
    bp = np.empty(nftr,dtype='object')
    dlam = np.zeros(nftr)
    
    for nf in range(nftr):
        with open(os.path.join('/home/lc585/Dropbox/IoA/QSOSED/Model/Filter_Response/', ftrlst[nf]), 'r') as f:
            wavtmp, rsptmp = np.loadtxt(f,unpack=True)
        dlam[nf] = (wavtmp[1] - wavtmp[0])
        bptmp = np.ndarray(shape=(2,len(wavtmp)), dtype=float)
        bptmp[0,:], bptmp[1,:] = wavtmp, rsptmp
        bp[nf] = bptmp
    
    #--------------------------------------------------------------------------------
    
    f_0 = np.zeros(nftr) # flux zero points
    fvega = '/data/vault/phewett/vista_work/vega_2007.lis' 
    vspec = np.loadtxt(fvega) 
    vf = interp1d(vspec[:,0], vspec[:,1])
    
    for nf in range(nftr):
        sum1 = np.sum( bp[nf][1] * vf(bp[nf][0]) * bp[nf][0] * dlam[nf])
        sum2 = np.sum( bp[nf][1] * bp[nf][0] * dlam[nf])
        f_0[nf] = sum1 / sum2

    flxlst = f_0 * 10.0**(-0.4 * maglst) # data fluxes in erg/cm^2/s/A
    flxerrlst = flxlst * (-0.4) * np.log(10) * errlst 

    axs[1].scatter(lameff / (1.0 + row.z_IR), flxlst*1e16, s=50, facecolor=cs[5], edgecolor='black', zorder=10, label='Photometry')
   

    plslp1 = 0.46
    plslp2 = 0.03
    plbrk = 2822.0
    bbt = 1216.0
    bbflxnrm = 0.24
    elscal = 0.71
    scahal = 0.86
    galfra = 0.31
    ebv = 0.0
    imod = 18.0
    
    with open('/home/lc585/Dropbox/IoA/QSOSED/Model/qsofit/input.yml', 'r') as f:
        parfile = yaml.load(f)
    
    fittingobj = load(parfile)
    
    lin = fittingobj.get_lin()
    galspc = fittingobj.get_galspc()
    ext = fittingobj.get_ext()
    galcnt = fittingobj.get_galcnt()
    ignmin = fittingobj.get_ignmin()
    ignmax = fittingobj.get_ignmax()
    wavlen_rest = fittingobj.get_wavlen()
    ztran = fittingobj.get_ztran()
    lyatmp = fittingobj.get_lyatmp()
    lybtmp = fittingobj.get_lybtmp()
    lyctmp = fittingobj.get_lyctmp()
    whmin = fittingobj.get_whmin()
    whmax = fittingobj.get_whmax()
    cosmo = {'omega_M_0':0.3, 'omega_lambda_0':0.7, 'h':0.7}
    cosmo = cd.set_omega_k_0(cosmo)
    flxcorr = np.array( [1.0] * len(wavlen_rest) )
    
    params = Parameters()
    params.add('plslp1', value = plslp1, vary=False)
    params.add('plslp2', value = plslp2, vary=False)
    params.add('plbrk', value = plbrk, vary=False)
    params.add('bbt', value = bbt, vary=False)
    params.add('bbflxnrm', value = bbflxnrm, vary=False)
    params.add('elscal', value = elscal, vary=False)
    params.add('scahal', value = scahal, vary=False)
    params.add('galfra', value = galfra, vary=False)
    params.add('ebv', value = ebv, vary=True)
    params.add('imod', value = imod, vary=False)
    params.add('norm', value = 1e-17, vary=True)

    resid_p = partial(resid_mag_fit,
                      flx=flxlst,
                      err=flxerrlst,
                      parfile=parfile,
                      wavlen_rest=wavlen_rest,
                      z=row.z_IR,
                      lin=lin,
                      galspc=galspc,
                      ext=ext,
                      galcnt=galcnt,
                      ignmin=ignmin,
                      ignmax=ignmax,
                      ztran=ztran,
                      lyatmp=lyatmp,
                      lybtmp=lybtmp,
                      lyctmp=lyctmp,
                      whmin=whmin,
                      whmax=whmax,
                      cosmo=cosmo,
                      flxcorr=flxcorr,
                      bp=bp,
                      dlam=dlam) 
    
    
    result = minimize(resid_p, params, method='leastsq')

    
    # ---------------------------------------------------------------------------------------
    
    wav_sed, flux_sed = qsrmod(result.params,
                               parfile,
                               wavlen_rest,
                               row.z_IR,
                               lin,
                               galspc,
                               ext,
                               galcnt,
                               ignmin,
                               ignmax,
                               ztran,
                               lyatmp,
                               lybtmp,
                               lyctmp,
                               whmin,
                               whmax,
                               cosmo,
                               flxcorr)

    spc = interp1d(wavlen_rest, flux_sed * result.params['norm'].value, bounds_error=True, fill_value=0.0)
    
    xs = np.arange(1000, 10000, 10)

    
    axs[1].plot(xs, spc(xs)*1e16, color='black', lw=1, label='Model', zorder=2)

    

    # do error weighted fit of spectra to SED model
    # Hewett et al. 1985 

    # mask out regions between bandpasses 

    wav_nir_obs = wav_nir * (1.0 + row.z_IR)
    goodinds = ((wav_nir_obs > 11800.0) & (wav_nir_obs < 13100.0))\
               | ((wav_nir_obs > 15000.0) & (wav_nir_obs < 17500.0))\
               | ((wav_nir_obs > 19500.0) & (wav_nir_obs < 23500.0))

    wav_nir = wav_nir[goodinds]
    flux_nir = flux_nir[goodinds]
    err_nir = err_nir[goodinds]

    goodinds = err_nir > 0.0 

    wav_nir = wav_nir[goodinds]
    flux_nir = flux_nir[goodinds]
    err_nir = err_nir[goodinds]

    k = np.nansum((flux_nir * spc(wav_nir)) / err_nir**2) / np.nansum((spc(wav_nir) / err_nir)**2)
    
    inds = np.argsort(np.diff(wav_nir))[-2:]
    wav_nir[inds] = np.nan
    flux_nir[inds] = np.nan

    axs[1].plot(wav_nir, flux_nir*1e16 / k, color=cs[0], zorder=1)
    
 
    # modified this so need to reload
    wav_nir, dw_nir, flux_nir, err_nir = get_nir_spec(row.NIR_PATH, row.INSTR)      
    
    wav_nir = wav_nir / (1.0 + row.z_IR)


    axs[1].plot(wav_nir[wav_nir > 2800.0], flux_nir[wav_nir > 2800.0]*1e16 / k, color=cs_light[0], label='Near-IR', zorder=0)

    axs[1].set_xlim(1250, 9000)
    axs[1].set_ylim(0, 5)

    axs[1].set_xlabel(r'Rest-frame wavelength [${\mathrm \AA}$]')
    axs[1].set_ylabel(r'F$_{\lambda}$ [Arbitary units]')

    # -------------------------------------------------

    axs[1].legend(scatterpoints=1)

    axs[0].text(0.1, 0.93, '(a) J092952+355450',
                horizontalalignment='left',
                verticalalignment='center',
                transform = axs[0].transAxes)

    axs[1].text(0.1, 0.93, '(b) J100247+002104',
                horizontalalignment='left',
                verticalalignment='center',
                transform = axs[1].transAxes)



    fig.tight_layout()

    fig.savefig('/home/lc585/thesis/figures/chapter02/normalise_to_sdss.pdf')

    plt.show()

    return None 
Пример #38
0
import numpy as np


def mass(theta, dl, ds, dls, c=3 * (10**8), G=4.3 * (10**-3)):
    """
    theta in units of rad
    dmin in units of ???
    dl, ds, dls in units of Mpc    
    c in units of m/s
    G in units of m^2 Mpc Msun^-1 s^-2    
    """
    return theta**2 * ((c**2) / (4 * G)) * (dl * ds / dls)


if __name__ == '__main__':

    theta = [np.radians(v / (60. * 60.)) for v in [1.20, 1.50]]
    zlens = 0.222
    zring = 0.609  # of the inner ring
    params = {'omega_M_0': 0.3, 'omega_lambda_0': 0.7, 'h': 0.72}
    params = cosmo.set_omega_k_0(params)

    dl = cosmo.angular_diameter_distance(zlens, **params)
    ds = cosmo.angular_diameter_distance(zring, **params)
    dls = cosmo.angular_diameter_distance(zring, zlens, **params)[0]

    print('DL = {0}\nDS = {1}\nDLS = {2}'.format(dl, ds, dls))

    for th in theta:
        m = mass(th, dl, ds, dls)
        print('Mlens = {0}'.format(m))
Пример #39
0
def ensamble(name,
             dir1,
             dir3,
             dir4,
             fo,
             fi=0,
             fii=0,
             pdf=1,
             rx=[0, 0.5, 1.0, 1.5],
             fits_f=0):
    rad = 1.0
    names = name.split("-")
    dir3 = dir3  #+"/"+names[1]+"/"+names[2]
    dir_map = dir3 + "/" + names[1] + "/" + names[2]
    DIRS = dir3.split("/")
    DRT = ""
    for DR in DIRS:
        DRT = DRT + DR + "/"
        call = "mkdir -p " + DRT
        sycall(call)
    DIRS = dir_map.split("/")
    DRT = ""
    for DR in DIRS:
        DRT = DRT + DR + "/"
        call = "mkdir -p " + DRT
        sycall(call)
    speed_of_light = 299792.458
    dir1 = dir1 + "/" + names[1] + "/" + names[2]
    #    dir2=dir2+"/"+names[1]+"-"+names[2]
    file = dir1 + "/" + name + ".SFH.cube.fits.gz"
    file2 = dir1 + "/" + name + ".p_e.pdl_r.fits"
    #file2=dir2+"/"+name+".photo.r_Lc_rad.fits"
    #file3=dir1+"/"+'mask.'+name+'.V.fits.gz'
    file3 = dir1 + "/" + 'DMASK.' + name + '.fits.gz'
    [pdl_cube, hdr] = gdata(file, 0, header=True)
    [pdl_rad, hdr2] = gdata(file2, 0, header=True)
    [pdl_mask, hdr3] = gdata(file3, 0, header=True)
    pdl_mask = 1.0 - pdl_mask
    if np.sum(pdl_mask) == 0:
        pdl_mask[:, :] = 1.0
    ind = []
    inda = []
    nr = len(rx)
    for ii in range(0, nr - 1):
        nt = np.where(pdl_rad < rx[ii + 1] * rad)
        nta = np.where(pdl_rad[nt] >= rx[ii] * rad)
        ind.extend([nt])
        inda.extend([nta])
#    n2=np.where(pdl_rad< r2*rad)
#    n2a=np.where(pdl_rad[n2]>= r1*rad)
#    n3=np.where(pdl_rad< r3*rad)
#    n3a=np.where(pdl_rad[n3]>= r2*rad)
#    n4=np.where(pdl_rad< r4*rad)
#    n4a=np.where(pdl_rad[n4]>= r3*rad)
    SN_file = "norm_SN_" + name + ".CS.fits.gz"
    if ptt.exists(dir1 + "/" + SN_file) == True:
        [pdl_SN, hdr000] = gdata(dir1 + "/" + SN_file, 0, header=True)
    Ha_file = "map.CS." + name + "_flux_6562.fits.gz"
    if ptt.exists(dir1 + "/" + Ha_file) == True:
        [pdl_ha, hdr001] = gdata(dir1 + "/" + Ha_file, 0, header=True)
        pdl_ha = pdl_ha * pdl_mask  #[0,:,:]#-z_r*speed_of_light
        pdl_ha[np.isnan(pdl_ha)] = 0
    Av_file = "map.CS." + name + "_Av_ssp.fits.gz"
    [pdl_Av, hdr002] = gdata(dir1 + "/" + Av_file, 0, header=True)
    Av_file_e = "map.CS." + name + "_e_Av_ssp.fits.gz"
    if ptt.exists(dir1 + "/" + Av_file_e) == True:
        [pdl_Av_e, hdr002e] = gdata(dir1 + "/" + Av_file_e, 0, header=True)
        pdl_Av_e[np.isnan(pdl_Av_e)] = 0
    else:
        pdl_Av_e = np.zeros(Av_file.shape)
    nt = hdr['NAXIS3'] - 5  #5#4#n_met
    flux_file = "map.CS." + name + "_flux_ssp.fits.gz"
    [pdl_flux, hdr0] = gdata(dir1 + "/" + flux_file, 0, header=True)
    flux_file_e = "map.CS." + name + "_e_flux_ssp.fits.gz"
    if ptt.exists(dir1 + "/" + flux_file_e) == True:
        [pdl_flux_e, hdr0e] = gdata(dir1 + "/" + flux_file_e, 0, header=True)
        pdl_flux_e[np.isnan(pdl_flux_e)] = 0
    else:
        pdl_flux_e = np.zeros(pdl_flux.shape)
    mass_file = "map.CS." + name + "_Mass_dust_cor_ssp.fits.gz"  #dust_cor_
    [pdl_mass, hdr00] = gdata(dir1 + "/" + mass_file, 0, header=True)
    pdl_mass[np.isnan(pdl_mass)] = 1
    MassT2 = np.log10(np.sum(10.0**pdl_mass))
    #print MassT2, name

    f = open(dir4 + "/BASE.gsd01", "r")
    #f=open(dir4+"/BASE.bc17_salp_Agelin_Metlin_330","r")
    yunk = f.readline()
    age_t = []
    met_t = []
    cor_t = []
    for line in f:
        if not "#" in line:
            data = line.split(" ")
            data = filter(None, data)
            age_t.extend([float_(data[1])])
            met_t.extend([float_(data[2])])
            cor_t.extend([float_(data[4])])
    n_t = len(age_t)
    age_t = np.array(age_t)
    met_t = np.array(met_t)
    cor_t = np.array(cor_t)
    age_t = np.around(age_t / 1e9, decimals=4)
    met_t = np.around(met_t, decimals=4)
    f.close()
    a_redshift = []
    filet = "auto_ssp.CS." + name + ".rss.out"
    f = open(dir1 + "/" + filet, "r")
    for line in f:
        if not "#" in line:
            data = line.split(",")
            data = filter(None, data)
            #print data
            a_redshift.extend([float_(data[7])])
    f.close()
    a_redshift = np.array(a_redshift)
    redshift = np.median(a_redshift)
    cosmo = {'omega_M_0': 0.27, 'omega_lambda_0': 0.73, 'h': 0.71}
    cosmo = cd.set_omega_k_0(cosmo)
    DL1 = cd.luminosity_distance(redshift, **cosmo)
    #print DL, redshift
    ratio = 3.08567758e24
    modz = 5.0 * np.log10(DL1) + 25.0
    DL = DL1 * ratio
    DA = DL1 / (1 + redshift)**2.0 * 1e6 * np.pi / 180. / 3600.
    L = 4.0 * np.pi * (DL**2.0)  #/(1+$redshift);
    Factor = (L * 1e-16) / 3.826e33
    filed = "coeffs_auto_ssp.CS." + name + ".rss.out"
    f2 = open(dir1 + "/" + filed, "r")
    n = 0
    n_ini = 0
    n_ssp = 156  #330
    ML = np.zeros(n_ssp)
    a_age = []
    a_met = []
    n_age = 0
    n_met = 0
    AGE = np.zeros(n_ssp)
    MET = np.zeros(n_ssp)
    COR = np.zeros(n_ssp)
    for line in f2:
        if n_ini < n_ssp:
            if not "#" in line:
                data = line.split(" ")
                data = filter(None, data)
                n = int(data[0])
                AGE[n] = float_(data[1])
                MET[n] = float_(data[2])
                ML[n] = float_(data[5])
                diff_age = 1
                for i in range(0, n):
                    if AGE[n] == AGE[i]:
                        diff_age = 0
                if diff_age == 1:
                    a_age.extend([AGE[n]])
                    n_age = n_age + 1
                diff_met = 1
                for i in range(0, n):
                    if MET[n] == MET[i]:
                        diff_met = 0
                if diff_met == 1:
                    a_met.extend([MET[n]])
                    n_met = n_met + 1
                n_ini = n_ini + 1
                for jt in range(0, n_t):
                    if age_t[jt] == 0.02:
                        age_t[jt] = 0.0199
                    if AGE[n] == age_t[jt]:
                        if MET[n] == met_t[jt]:
                            COR[n] = cor_t[jt]
    f2.close()
    n = n + 1
    MassT = 0
    LighT = 0
    massN = np.zeros(nr)
    massN_e = np.zeros(nr)
    lightN = np.zeros(nr)
    lightN_e = np.zeros(nr)
    #mas1=0
    #mas2=0
    #mas3=0
    #mas4=0
    mass = np.zeros([n_age, nr])
    mass_e = np.zeros([n_age, nr])
    light = np.zeros([n_age, nr])
    light_e = np.zeros([n_age, nr])
    ages = np.zeros([n_age])
    sfrt = np.zeros([n_age, nr])
    sfdt = np.zeros([n_age])
    [nz, nx, ny] = pdl_cube.shape
    temp_a = np.zeros([nx, ny])
    mass_age = np.zeros([nx, ny])
    pdl_cube[np.isnan(pdl_cube)] = 1

    pdl_cube_e = np.zeros([n, nx, ny])
    #print name
    for i in range(0, n):
        norm_file = dir1 + "/" + "map.CS." + name + "_eNORM_" + str(
            i) + "_ssp.fits.gz"
        if ptt.exists(norm_file) == True:
            pdl_cube_e[i, :, :] = gdata(norm_file)
        else:
            pdl_cube_e[i, :, :] = np.zeros([nx, ny])
    pdl_cube_e[np.isnan(pdl_cube_e)] = 0
    #print AGE
    #sys.exit()
    for i in range(nt - 1, n_ssp - 1, -1):
        label = hdr['FILE_' + str(i)]
        #        print label,n_age,n_met,n_age-i+n_ssp-1,-i+n_ssp,i
        time = label.replace('_NORM_age.fits.gz', '')
        time = float_(time.replace('map.CS.' + name + '_', ''))
        ages[n_age - i + n_ssp - 1] = np.log10(time) + 9
    #print np.log10(time)+9, 38-i+156
    #temp=pdl_cube[i,:,:]
    #temp=temp*10.0**(ML[i-156])*pdl_flux*Factor
    #temp_a=temp+temp_a
#    MassT=MassT+np.sum(temp)
#    mas1=np.sum(temp[n1])+mas1
#    mas2=np.sum(temp[n2][n2a])+mas2
#    mas3=np.sum(temp[n3][n3a])+mas3
#    mas4=np.sum(temp[n4][n4a])+mas4
#    mass[38-i+156,0]=np.log10(mas1)
#    mass[38-i+156,1]=np.log10(mas2)
#    mass[38-i+156,2]=np.log10(mas3)
#    mass[38-i+156,3]=np.log10(mas4)
    f2 = open(dir3 + "/" + name + "_Ensemble.csv", "w")
    f2.write(
        "#  LOG_AGE  N_MASSR_1  N_MASSR_2  N_MASSR_3  N_MASSR_4  LOG_MASSR_1  LOG_MASSR_2  LOG_MASSR_3  LOG_MASSR_4 \n"
    )
    fL2 = open(dir3 + "/" + name + "_Ensemble_L.csv", "w")
    fL2.write(
        "#  LOG_AGE  N_LIGHTR_1  N_LIGHTR_2  N_LIGHTR_3  N_LIGHTR_4  LOG_LIGHTR_1  LOG_LIGHTR_2  LOG_LIGHTR_3  LOG_LIGHTR_4 \n"
    )
    mass_age_t = np.zeros([n_age, nx, ny])
    mass_age_t_2 = np.zeros([n_age, nx, ny])
    mass_age_t_e = np.zeros([n_age, nx, ny])
    light_age_t = np.zeros([n_age, nx, ny])
    light_age_t_2 = np.zeros([n_age, nx, ny])
    light_age_t_e = np.zeros([n_age, nx, ny])
    for i in range(0, n_age):
        age_now = a_age[i]
        pdl_age = np.zeros([nx, ny])
        pdl_age_2 = np.zeros([nx, ny])
        pdl_age_e = np.zeros([nx, ny])
        pdl_ageL = np.zeros([nx, ny])
        pdl_age_2L = np.zeros([nx, ny])
        pdl_age_eL = np.zeros([nx, ny])
        for j in range(0, n):
            if age_now == AGE[j]:
                #if AGE[j] <= 2:
                pdl_age = pdl_age + pdl_cube[j, :, :] * 10.0**(
                    ML[j]) * pdl_flux * Factor * 10.0**(
                        0.4 * pdl_Av) * pdl_mask  #*0.25/np.pi#/1.47
                pdl_age_e = pdl_age_e + (
                    (pdl_cube_e[j, :, :] / pdl_cube[j, :, :])**2.0 +
                    (pdl_flux_e / pdl_flux)**2.0 +
                    (np.log(10.0) * 0.4 * pdl_Av_e)**2.0) * (
                        pdl_cube[j, :, :] * 10.0**(ML[j]) * pdl_flux * Factor *
                        pdl_mask * 10.0**(0.4 * pdl_Av))**2.0
                pdl_age_2 = pdl_age_2 + pdl_cube[j, :, :] * 10.0**(
                    ML[j]) * pdl_flux * Factor * pdl_mask * 10.0**(
                        0.4 * pdl_Av) / COR[j]
                pdl_age_2L = pdl_age_2L + pdl_cube[
                    j, :, :] * pdl_flux * Factor * pdl_mask * 10.0**(
                        0.4 * pdl_Av) / COR[j]
                pdl_ageL = pdl_ageL + pdl_cube[
                    j, :, :] * pdl_flux * Factor * 10.0**(0.4 *
                                                          pdl_Av) * pdl_mask
                pdl_age_eL = pdl_age_eL + (
                    (pdl_cube_e[j, :, :] / pdl_cube[j, :, :])**2.0 +
                    (pdl_flux_e / pdl_flux)**2.0 +
                    (np.log(10.0) * 0.4 * pdl_Av_e)**2.0
                ) * (pdl_cube[j, :, :] * pdl_flux * Factor * pdl_mask * 10.0**
                     (0.4 * pdl_Av))**2.0
                #else:
                #    pdl_age=pdl_age+pdl_cube[j,:,:]*10.0**(ML[j])*pdl_flux*Factor*pdl_mask*COR[j]
        pdl_age[np.where(np.isfinite(pdl_age) == False)] = 0
        pdl_age_2[np.where(np.isfinite(pdl_age_2) == False)] = 0
        pdl_age_e[np.where(np.isfinite(pdl_age_e) == False)] = 0
        pdl_ageL[np.where(np.isfinite(pdl_ageL) == False)] = 0
        pdl_age_2L[np.where(np.isfinite(pdl_age_2L) == False)] = 0
        pdl_age_eL[np.where(np.isfinite(pdl_age_eL) == False)] = 0
        #pdl_age_e[np.isnan(pdl_age_e)]=0
        for k in range(0, n_age):
            if np.log10(age_now) + 9 == ages[k]:
                mass_age_t[k, :, :] = pdl_age
                mass_age_t_2[k, :, :] = pdl_age_2
                mass_age_t_e[k, :, :] = pdl_age_e
                light_age_t[k, :, :] = pdl_ageL
                light_age_t_2[k, :, :] = pdl_age_2L
                light_age_t_e[k, :, :] = pdl_age_eL
    temp5 = np.sum(mass_age_t, axis=0) + 0.01
    #    temp6=np.log10(np.sum(mass_age_t,axis=0)+1.0)#QUITAR
    #    wfits(dir3+"/"+name+"mass_tot.fits",temp6,hdr001)#QUITAR
    upvalue = math.ceil(np.log10(np.amax(temp5)) / .05) * .05
    if np.isinf(upvalue):
        upvalue = 8
    if upvalue - 1 <= 6.5:
        lovalue = math.ceil(np.log10(np.amin(temp5)) / .05) * .05
        if upvalue - 2 > lovalue:
            lovalue = upvalue - 2
    else:
        lovalue = 6.5
    mass_temp_total = 0
    light_temp_total = 0
    for i in range(0, n_age):
        if i == 0:
            age_s = 10.0**((ages[i] + ages[i + 1]) / 2.0)
            age_i = 0.0
        elif i == n_age - 1:
            age_i = 10.0**((ages[i] + ages[i - 1]) / 2.0)
            age_s = 2.0 * 10.0**(ages[i]) - age_i
        else:
            age_i = 10.0**((ages[i] + ages[i - 1]) / 2.0)
            age_s = 10.0**((ages[i] + ages[i + 1]) / 2.0)
        Dt_age = np.abs(age_s - age_i)
        sfdt[i] = Dt_age / 1e6
        temp = mass_age_t[i, :, :]
        temp_2 = mass_age_t_2[i, :, :]
        temp_e = mass_age_t_e[i, :, :]
        tempL = light_age_t[i, :, :]
        temp_2L = light_age_t_2[i, :, :]
        temp_eL = light_age_t_e[i, :, :]
        #temp[np.where(np.isfinite(temp) == False)]=0
        #temp_e[np.where(np.isfinite(temp_e) == False)]=1
        #temp_2[np.where(np.isfinite(temp_2) == False)]=0
        if i == 0:
            if fits_f == 1:
                [nx, ny] = temp.shape
                MASS_map_cube = np.zeros([n_age, nx, ny])
                MGH_map_cube = np.zeros([n_age, nx, ny])
                SFH_map_cube = np.zeros([n_age, nx, ny])
                LIGHT_map_cube = np.zeros([n_age, nx, ny])
                LGH_map_cube = np.zeros([n_age, nx, ny])
            temp1 = temp
            temp1L = tempL
        else:
            temp1 = temp1 + temp
            temp1L = temp1L + tempL
        if fits_f == 1:
            MASS_map_cube[i, :, :] = temp
            MGH_map_cube[i, :, :] = temp1
            SFH_map_cube[i, :, :] = temp_2 / Dt_age
            LIGHT_map_cube[i, :, :] = tempL
            LGH_map_cube[i, :, :] = temp1L
        #if pdf==1:
        #map_plot(temp1,ages[i],pdl_rad,dir=dir_map+"/",pdf=1,title=name,form='pdf',fname=name+'_smap_'+str(i),minval=lovalue,maxval=upvalue)
        MassT = MassT + np.sum(temp)
        LighT = LighT + np.sum(tempL)
        for ii in range(0, nr - 1):
            # print ind[ii],inda[ii],ii
            #            print temp[ind[ii]]
            #            print len(temp[ind[ii]]),np.amax(inda[ii])
            Dt_mass = np.sum(temp[ind[ii]][inda[ii]])
            Dt_mass_e = np.sum(temp_e[ind[ii]][inda[ii]])
            Dt_mass_2 = np.sum(temp_2[ind[ii]][inda[ii]])
            Dt_light = np.sum(tempL[ind[ii]][inda[ii]])
            Dt_light_e = np.sum(temp_eL[ind[ii]][inda[ii]])
            Dt_light_2 = np.sum(temp_2L[ind[ii]][inda[ii]])
            massN[ii] = Dt_mass + massN[ii]
            massN_e[ii] = Dt_mass_e + massN_e[ii]
            lightN[ii] = Dt_light + lightN[ii]
            lightN_e[ii] = Dt_light_e + lightN_e[ii]
            #mas2=np.sum(temp[n2][n2a])+mas2
            #mas3=np.sum(temp[n3][n3a])+mas3
            #mas4=np.sum(temp[n4][n4a])+mas4
            #            print temp[ind[ii]][inda[ii]]
            mass[i, ii] = np.log10(massN[ii])
            mass_e[i, ii] = massN_e[ii]
            light[i, ii] = np.log10(lightN[ii])
            light_e[i, ii] = lightN_e[ii]
            sfrt[
                i,
                ii] = Dt_mass_2 / Dt_age  #/massN[ii]#/(np.pi*(rx[ii+1]**2.0-rx[ii]**2.0)*rad**2.0)#/(float_(len(temp[ind[ii]][inda[ii]]))*(0.5*DA)**2.0)
        #mass[i,1]=np.log10(mas2)
        #mass[i,2]=np.log10(mas3)
        #mass[i,3]=np.log10(mas4)
    mass_temp_total = np.log10(np.sum(10**mass[nt - n_ssp - 1, :]))
    light_temp_total = np.log10(np.sum(10**light[nt - n_ssp - 1, :]))
    MassT = np.log10(MassT)
    MassT = mass_temp_total
    LighT = np.log10(LighT)
    LighT = light_temp_total
    if fits_f == 1:
        #if ptt.exists() == True:
        h1 = pyf.PrimaryHDU(MASS_map_cube)  #.header
        h2 = pyf.PrimaryHDU(SFH_map_cube)  #.header
        h3 = pyf.PrimaryHDU(MGH_map_cube)
        h4 = pyf.PrimaryHDU(LIGHT_map_cube)
        h5 = pyf.PrimaryHDU(LGH_map_cube)
        h = h1.header
        h["NAXIS"] = 3
        h["NAXIS3"] = n_age
        h["NAXIS1"] = nx
        h["NAXIS2"] = ny
        hlist = pyf.HDUList([h1])
        hlist.update_extend()
        wfits_ext(dir_map + "/" + "MASS_maps_" + name + ".fits", hlist)
        #if ptt.exists() == True:
        h = h2.header
        h["NAXIS"] = 3
        h["NAXIS3"] = n_age
        h["NAXIS1"] = nx
        h["NAXIS2"] = ny
        hlist = pyf.HDUList([h2])
        hlist.update_extend()
        wfits_ext(dir_map + "/" + "SFH_maps_" + name + ".fits", hlist)
        #if ptt.exists() == True:
        h = h3.header
        h["NAXIS"] = 3
        h["NAXIS3"] = n_age
        h["NAXIS1"] = nx
        h["NAXIS2"] = ny
        hlist = pyf.HDUList([h3])
        hlist.update_extend()
        wfits_ext(dir_map + "/" + "MGH_maps_" + name + ".fits", hlist)
        h = h4.header
        h["NAXIS"] = 3
        h["NAXIS3"] = n_age
        h["NAXIS1"] = nx
        h["NAXIS2"] = ny
        hlist = pyf.HDUList([h4])
        hlist.update_extend()
        wfits_ext(dir_map + "/" + "LIGHT_maps_" + name + ".fits", hlist)
        h = h5.header
        h["NAXIS"] = 3
        h["NAXIS3"] = n_age
        h["NAXIS1"] = nx
        h["NAXIS2"] = ny
        hlist = pyf.HDUList([h5])
        hlist.update_extend()
        wfits_ext(dir_map + "/" + "LGH_maps_" + name + ".fits", hlist)
    #print MassT,name
    if ptt.exists(dir1 + "/" + SN_file) == True:
        SN = np.zeros(nr - 1)
        sn_l = ''
        for ii in range(0, nr - 1):
            SN[ii] = np.average(pdl_SN[ind[ii]][inda[ii]])
            sn_l = sn_l + ' , ' + str(SN[ii])
        if fi != 0:
            fi.write(name + sn_l + ' \n')
    if ptt.exists(dir1 + "/" + Ha_file) == True:
        Ha = np.zeros(nr - 1)
        ha_l = ''
        for ii in range(0, nr - 1):
            Ha[ii] = np.sum(pdl_ha[ind[ii]][inda[ii]] * 10.0**
                            (0.4 * pdl_Av[ind[ii]][inda[ii]])) * (L * 1e-16)
            ha_l = ha_l + ' , ' + str(Ha[ii])
        if fii != 0:
            fii.write(name + ha_l + ' \n')
    else:
        ha_l = ''
        for ii in range(0, nr - 1):
            ha_l = ha_l + ' , ' + str(-100)
        if fii != 0:
            fii.write(name + ha_l + ' \n')
    #print Ha,(L*1e-16)
    #sys.exit(0)
    mass_n = 10**(10**(mass - mass[nt - n_ssp - 1, :]))
    mass_n_e = np.sqrt(
        (10**(mass - mass[nt - n_ssp - 1, :]))**2.0 *
        ((mass_e / 10**(2.0 * mass)) +
         (mass_e[nt - n_ssp - 1, :] / 10**(2.0 * mass[nt - n_ssp - 1, :]))))
    light_n = 10**(10**(light - light[nt - n_ssp - 1, :]))
    light_n_e = np.sqrt(
        (10**(light - light[nt - n_ssp - 1, :]))**2.0 *
        ((light_e / 10**(2.0 * light)) +
         (light_e[nt - n_ssp - 1, :] / 10**(2.0 * light[nt - n_ssp - 1, :]))))
    #mass_n=10**(mass-mass[nt-156-1,:])
    #mass_n=(mass-mass[nt-156-1,:])
    for i in range(0, n_age):
        #print ages[i],a_age[i],"test_ages"
        line = ''
        line = line + str(ages[i])
        for ii in range(0, nr - 1):
            line = line + ';' + str(mass_n[i, ii])
        for ii in range(0, nr - 1):
            line = line + ';' + str(mass[i, ii])
        for ii in range(0, nr - 1):
            line = line + ';' + str(sfrt[i, ii])
        for ii in range(0, nr - 1):
            line = line + ';' + str(mass_n_e[i, ii])
        line = line + ';' + str(sfdt[i])
        line = line + ' \n'
        f2.write(line)
        lineL = ''
        lineL = lineL + str(ages[i])
        for ii in range(0, nr - 1):
            lineL = lineL + ';' + str(light_n[i, ii])
        for ii in range(0, nr - 1):
            lineL = lineL + ';' + str(light[i, ii])
        for ii in range(0, nr - 1):
            lineL = lineL + ';' + str(sfrt[i, ii])
        for ii in range(0, nr - 1):
            lineL = lineL + ';' + str(light_n_e[i, ii])
        lineL = lineL + ';' + str(sfdt[i])
        lineL = lineL + ' \n'
        fL2.write(lineL)
    #if not pdf == 0:
    #dev=dir3+"/"+name+"_Relative_Mass.pdf"
    ##if pdf == 1:
    #    #matplotlib.use('Agg')
    #import matplotlib.pyplot as plt
    ##plt.axis([8, 10.5, 0, 10])
    #plt.xlabel("$log_{10}(time/yr)$",fontsize=14)
    #plt.ylabel("$10^{M(t)/M_{0}}$",fontsize=14)
    #plt.title(name+' $\log M_{tot}='+('%7.2f' % MassT)+'$',fontsize=15)
    ##plt.semilogx('log')
    #for ii in range(0, nr-1):
    #    plt.plot(ages,mass_n[:,ii],label='$'+('%6.1f' % rx[ii])+'R_e<R<'+('%6.1f' % rx[ii+1])+'R_e$')
    ##plt.plot(ages,mass_n[:,1],label='$'+('%6.1f' % r1)+'<R<'+('%6.1f' % r2)+'R_e$')
    ##plt.plot(ages,mass_n[:,2],label='$'+('%6.1f' % r2)+'<R<'+('%6.1f' % r3)+'R_e$')
    ##plt.plot(ages,mass_n[:,3],label='$'+('%6.1f' % r3)+'<R<'+('%6.1f' % r4)+'R_e$')
    #plt.legend(loc=3)
    #if pdf == 1:
    #    plt.savefig(dev,dpi = 1000)
    #else:
    #    plt.show()
    #plt.close()
    if not pdf == 0:
        dev = dir3 + '/' + name + "_Relative_Mass2.pdf"
        #if pdf == 1:
        #    matplotlib.use('Agg')
        import matplotlib.pyplot as plt
        fig, ax = plt.subplots(figsize=(6, 5.5))
        ax.set_xlabel("$log_{10}(time/yr)$", fontsize=14)
        ax.set_ylabel("$M(t)/M_{0}$", fontsize=14)
        #MassT=10.32
        ax.set_title(name + ' $\log M_{tot}=' + ('%7.2f' % MassT) + '$',
                     fontsize=15)
        ax.set_xlim(8.6, 10.1)
        #ax.set_ylim(0,12)
        ax.set_ylim(func_plot(np.log10(1.78), ftype=1),
                    func_plot(np.log10(12), ftype=1))
        for ii in range(0, nr - 1):
            plt.plot(ages,
                     func_plot(np.log10(mass_n[:, ii]), ftype=1),
                     label='$' + ('%6.1f' % rx[ii]) + 'R_e<R<' +
                     ('%6.1f' % rx[ii + 1]) + 'R_e$')
        plt.legend(loc=3)
        plt.plot(np.arange(0, 20, .1),
                 np.ones(200) * func_plot(0.95, ftype=1),
                 '--',
                 color='black')
        plt.plot(np.arange(0, 20, .1),
                 np.ones(200) * func_plot(0.50, ftype=1),
                 '--',
                 color='green')
        fig.canvas.draw()
        labels = [item.get_text() for item in ax.get_yticklabels()]
        for i in range(0, len(labels)):
            labels[i] = labels[i].replace(u'\u2212', '-')
        for i in range(0, len(labels)):
            if labels[i] != u'':
                if float_(labels[i]) == 0:
                    labels[i] = u'%3.2f' % 10**(0)
                else:
                    labels[i] = u'%3.2f' % 10**(float_(labels[i]))
        ax.set_yticklabels(labels)
        if pdf == 1:
            fig.tight_layout()
            plt.savefig(dev)  #,dpi = 1000)
        else:
            plt.show()
        plt.close()
    f2.close()
    fL2.close()
    fo.write(name + " " + str(MassT) + " " + str(redshift) + " ")
Пример #40
0
def int_ensamble(name, dir1, dir3, dir4, fo, fi=0, pdf=1, fits_f=0, m_t=""):
    names = name.split("-")
    dir3 = dir3  #+"/"+names[1]+"/"+names[2]
    dir_map = dir3 + "/" + names[1] + "/" + names[2] + m_t
    DIRS = dir3.split("/")
    DRT = ""
    for DR in DIRS:
        DRT = DRT + DR + "/"
        call = "mkdir -p " + DRT
        sycall(call)
    DIRS = dir_map.split("/")
    DRT = ""
    for DR in DIRS:
        DRT = DRT + DR + "/"
        call = "mkdir -p " + DRT
        #sycall(call)
    call = "mkdir -p " + dir3 + '/Plots'
    sycall(call)
    speed_of_light = 299792.458
    #dir1=dir1+"/"+names[1]+"/stadi/"+names[2]+m_t
    dir1 = dir1 + "/" + names[1] + "/" + names[2] + m_t
    dir2 = dir1
    file1 = dir1 + "/coeffs_auto_ssp." + name + ".int.out"
    file2 = dir2 + "/auto_ssp." + name + ".int.out"
    pdl_cube = []
    pdl_cube_e = []
    ages = []
    cont = 0
    f1 = open(file1, "r")
    for line in f1:
        if not "#" in line:
            line = line.replace("\n", "")
            data = line.split(" ")
            data = filter(None, data)
            if (cont % 4) == 0:
                ages.extend([np.log10(float_(data[1])) + 9.0])
            pdl_cube.extend([float_(data[3])])
            pdl_cube_e.extend([float_(data[8])])
            cont = cont + 1
    f1.close()
    f2 = open(file2, "r")
    for line in f2:
        if not "#" in line:
            line = line.replace("\n", "")
            data = line.split(",")
            data = filter(None, data)
            pdl_flux = float_(data[13])
            pdl_flux_e = float_(data[14])
            pdl_Av = float_(data[5])
            pdl_Av_e = float_(data[6])
            redshift = float_(data[7])
    f2.close()
    ages = np.array(ages)
    ages = sorted(ages, reverse=True)
    #np.sor
    #sys.exit()
    pdl_cube = np.array(pdl_cube)
    pdl_cube_e = np.array(pdl_cube_e)

    #    f=open(dir4+"/BASE.gsd01","r")
    f = open(dir4 + "/BASE.bc17_salp_Agelin_Metlin_330", "r")
    yunk = f.readline()
    age_t = []
    met_t = []
    cor_t = []
    for line in f:
        if not "#" in line:
            data = line.split(" ")
            data = filter(None, data)
            age_t.extend([float_(data[1])])
            met_t.extend([float_(data[2])])
            cor_t.extend([float_(data[4])])
    n_t = len(age_t)
    age_t = np.array(age_t)
    met_t = np.array(met_t)
    cor_t = np.array(cor_t)
    age_t = np.around(age_t / 1e9, decimals=4)
    met_t = np.around(met_t, decimals=4)
    f.close()

    cosmo = {'omega_M_0': 0.27, 'omega_lambda_0': 0.73, 'h': 0.71}
    cosmo = cd.set_omega_k_0(cosmo)
    DL1 = cd.luminosity_distance(redshift, **cosmo)
    #print DL, redshift
    ratio = 3.08567758e24
    modz = 5.0 * np.log10(DL1) + 25.0
    DL = DL1 * ratio
    DA = DL1 / (1 + redshift)**2.0 * 1e6 * np.pi / 180. / 3600.
    L = 4.0 * np.pi * (DL**2.0)  #/(1+$redshift);
    Factor = (L * 1e-16) / 3.826e33
    filed = file1
    f2 = open(filed, "r")
    n = 0
    n_ini = 0
    ML = np.zeros(156)
    a_age = []
    a_met = []
    n_age = 0
    n_met = 0
    AGE = np.zeros(156)
    MET = np.zeros(156)
    COR = np.zeros(156)
    for line in f2:
        if n_ini < 156:
            if not "#" in line:
                data = line.split(" ")
                data = filter(None, data)
                n = int(data[0])
                AGE[n] = float_(data[1])
                MET[n] = float_(data[2])
                ML[n] = float_(data[5])
                diff_age = 1
                for i in range(0, n):
                    if AGE[n] == AGE[i]:
                        diff_age = 0
                if diff_age == 1:
                    a_age.extend([AGE[n]])
                    n_age = n_age + 1
                diff_met = 1
                for i in range(0, n):
                    if MET[n] == MET[i]:
                        diff_met = 0
                if diff_met == 1:
                    a_met.extend([MET[n]])
                    n_met = n_met + 1
                n_ini = n_ini + 1
                for jt in range(0, n_t):
                    if age_t[jt] == 0.02:
                        age_t[jt] = 0.0199
                    if AGE[n] == age_t[jt]:
                        if MET[n] == met_t[jt]:
                            COR[n] = cor_t[jt]
    f2.close()
    n = n + 1
    MassT = 0
    LighT = 0
    massN = 0
    massN_e = 0
    lightN = 0
    lightN_e = 0
    #mas1=0
    #mas2=0
    #mas3=0
    #mas4=0
    mass = np.zeros([n_age])
    mass_e = np.zeros([n_age])
    light = np.zeros([n_age])
    light_e = np.zeros([n_age])
    #ages=np.zeros([n_age])
    sfrt = np.zeros([n_age])
    sfdt = np.zeros([n_age])
    nz = len(pdl_cube)
    temp_a = 0
    mass_age = 0
    #    pdl_cube[np.isnan(pdl_cube)]=1

    #sys.exit()
    #for i in range(nt-1, 155, -1):
    #    label=hdr['FILE_'+str(i)]
    #    time=label.replace('_NORM_age.fits.gz','')
    #    time=float_(time.replace('map.CS.'+name+'_',''))
    #    ages[38-i+156]=np.log10(time)+9
    #print np.log10(time)+9, 38-i+156
    #temp=pdl_cube[i,:,:]
    #temp=temp*10.0**(ML[i-156])*pdl_flux*Factor
    #temp_a=temp+temp_a
    #    MassT=MassT+np.sum(temp)
    #    mas1=np.sum(temp[n1])+mas1
    #    mas2=np.sum(temp[n2][n2a])+mas2
    #    mas3=np.sum(temp[n3][n3a])+mas3
    #    mas4=np.sum(temp[n4][n4a])+mas4
    #    mass[38-i+156,0]=np.log10(mas1)
    #    mass[38-i+156,1]=np.log10(mas2)
    #    mass[38-i+156,2]=np.log10(mas3)
    #    mass[38-i+156,3]=np.log10(mas4)
    f2 = open(dir3 + "/" + name + m_t + "_Ensemble_int.csv", "w")
    f2.write(
        "#  LOG_AGE  N_MASSR_1  N_MASSR_2  N_MASSR_3  N_MASSR_4  LOG_MASSR_1  LOG_MASSR_2  LOG_MASSR_3  LOG_MASSR_4 \n"
    )
    fL2 = open(dir3 + "/" + name + m_t + "_Ensemble_int_L.csv", "w")
    fL2.write(
        "#  LOG_AGE  N_LIGHTR_1  N_LIGHTR_2  N_LIGHTR_3  N_LIGHTR_4  LOG_LIGHTR_1  LOG_LIGHTR_2  LOG_LIGHTR_3  LOG_LIGHTR_4 \n"
    )
    mass_age_t = np.zeros([n_age])
    mass_age_t_2 = np.zeros([n_age])
    mass_age_t_e = np.zeros([n_age])
    light_age_t = np.zeros([n_age])
    light_age_t_2 = np.zeros([n_age])
    light_age_t_e = np.zeros([n_age])
    for i in range(0, n_age):
        age_now = a_age[i]
        pdl_age = 0
        pdl_age_2 = 0
        pdl_age_e = 0
        pdl_ageL = 0
        pdl_age_2L = 0
        pdl_age_eL = 0
        for j in range(0, n):
            if age_now == AGE[j]:
                #if AGE[j] <= 2:
                pdl_age = pdl_age + pdl_cube[j] * 10.0**(
                    ML[j]) * pdl_flux * Factor * 10.0**(0.4 * pdl_Av
                                                        )  #*0.25/np.pi#/1.47
                pdl_age_e = pdl_age_e + (
                    (pdl_cube_e[j] / pdl_cube[j])**2.0 +
                    (pdl_flux_e / pdl_flux)**2.0 +
                    (np.log(10.0) * 0.4 * pdl_Av_e)**2.0
                ) * (pdl_cube[j] * 10.0**(ML[j]) * pdl_flux * Factor * 10.0**
                     (0.4 * pdl_Av))**2.0
                pdl_age_2 = pdl_age_2 + pdl_cube[j] * 10.0**(
                    ML[j]) * pdl_flux * Factor * 10.0**(0.4 * pdl_Av) / COR[j]
                pdl_age_2L = pdl_age_2L + pdl_cube[
                    j] * pdl_flux * Factor * 10.0**(0.4 * pdl_Av) / COR[j]
                pdl_ageL = pdl_ageL + pdl_cube[j] * pdl_flux * Factor * 10.0**(
                    0.4 * pdl_Av)
                pdl_age_eL = pdl_age_eL + (
                    (pdl_cube_e[j] / pdl_cube[j])**2.0 +
                    (pdl_flux_e / pdl_flux)**2.0 +
                    (np.log(10.0) * 0.4 * pdl_Av_e)**2.0) * (
                        pdl_cube[j] * pdl_flux * Factor * 10.0**
                        (0.4 * pdl_Av))**2.0
                #print age_now

        if np.isfinite(pdl_age) == False:
            pdl_age = 0
        if np.isfinite(pdl_age_2) == False:
            pdl_age_2 = 0
        if np.isfinite(pdl_age_e) == False:
            pdl_age_e = 0
        if np.isfinite(pdl_ageL) == False:
            pdl_ageL = 0
        if np.isfinite(pdl_age_2L) == False:
            pdl_age_2L = 0
        if np.isfinite(pdl_age_eL) == False:
            pdl_age_eL = 0
        #  pdl_age_e[np.isnan(pdl_age_e)]=0
        for k in range(0, n_age):
            if np.log10(age_now) + 9 == ages[k]:
                #print ages[k],np.log10(age_now)+9,age_now
                mass_age_t[k] = pdl_age
                mass_age_t_2[k] = pdl_age_2
                mass_age_t_e[k] = pdl_age_e
                light_age_t[k] = pdl_ageL
                light_age_t_2[k] = pdl_age_2L
                light_age_t_e[k] = pdl_age_eL
    #print mass_age_t

    mass_temp_total = 0
    light_temp_total = 0
    #sys.exit()
    for i in range(0, n_age):
        if i == 0:
            age_s = 10.0**((ages[i] + ages[i + 1]) / 2.0)
            age_i = 0.0
        elif i == n_age - 1:
            age_i = 10.0**((ages[i] + ages[i - 1]) / 2.0)
            age_s = 2.0 * 10.0**(ages[i]) - age_i
        else:
            age_i = 10.0**((ages[i] + ages[i - 1]) / 2.0)
            age_s = 10.0**((ages[i] + ages[i + 1]) / 2.0)
        Dt_age = np.abs(age_s - age_i)
        sfdt[i] = Dt_age / 1e6
        temp = mass_age_t[i]
        temp_2 = mass_age_t_2[i]
        temp_e = mass_age_t_e[i]
        tempL = light_age_t[i]
        temp_2L = light_age_t_2[i]
        temp_eL = light_age_t_e[i]
        #temp[np.where(np.isfinite(temp) == False)]=0
        #temp_e[np.where(np.isfinite(temp_e) == False)]=1
        #temp_2[np.where(np.isfinite(temp_2) == False)]=0
        if i == 0:
            if fits_f == 1:
                MASS_map_cube = np.zeros([n_age])
                MGH_map_cube = np.zeros([n_age])
                SFH_map_cube = np.zeros([n_age])
                LIGHT_map_cube = np.zeros([n_age])
                LGH_map_cube = np.zeros([n_age])
            temp1 = temp
            temp1L = tempL
        else:
            temp1 = temp1 + temp
            temp1L = temp1L + tempL
        if fits_f == 1:
            MASS_map_cube[i] = temp
            MGH_map_cube[i] = temp1
            SFH_map_cube[i] = temp_2 / Dt_age
            LIGHT_map_cube[i] = tempL
            LGH_map_cube[i] = temp1L
        #if pdf==1:
        #map_plot(temp1,ages[i],pdl_rad,dir=dir_map+"/",pdf=1,title=name,form='pdf',fname=name+'_smap_'+str(i),minval=lovalue,maxval=upvalue)
        MassT = MassT + np.sum(temp)
        LighT = LighT + np.sum(tempL)
        #            print temp[ind[ii]]
        #            print len(temp[ind[ii]]),np.amax(inda[ii])
        Dt_mass = temp
        Dt_mass_e = temp_e
        Dt_mass_2 = temp_2
        Dt_light = tempL
        Dt_light_e = temp_eL
        Dt_light_2 = temp_2L
        massN = Dt_mass + massN
        massN_e = Dt_mass_e + massN_e
        lightN = Dt_light + lightN
        lightN_e = Dt_light_e + lightN_e
        #mas2=np.sum(temp[n2][n2a])+mas2
        #mas3=np.sum(temp[n3][n3a])+mas3
        #mas4=np.sum(temp[n4][n4a])+mas4
        #            print temp[ind[ii]][inda[ii]]
        mass[i] = np.log10(massN)
        mass_e[i] = massN_e
        light[i] = np.log10(lightN)
        light_e[i] = lightN_e
        sfrt[
            i] = Dt_mass_2 / Dt_age  #/massN[ii]#/(np.pi*(rx[ii+1]**2.0-rx[ii]**2.0)*rad**2.0)#/(float_(len(temp[ind[ii]][inda[ii]]))*(0.5*DA)**2.0)
        #mass[i,1]=np.log10(mas2)
        #mass[i,2]=np.log10(mas3)
        #mass[i,3]=np.log10(mas4)
    # print mass[i],massN
    mass_temp_total = np.log10(np.sum(10**mass[n_age - 1]))
    light_temp_total = np.log10(np.sum(10**light[n_age - 1]))
    MassT = np.log10(MassT)
    MassT = mass_temp_total
    LighT = np.log10(LighT)
    LighT = light_temp_total
    if fits_f == 1:
        #if ptt.exists() == True:
        h1 = pyf.PrimaryHDU(MASS_map_cube)  #.header
        h2 = pyf.PrimaryHDU(SFH_map_cube)  #.header
        h3 = pyf.PrimaryHDU(MGH_map_cube)
        h4 = pyf.PrimaryHDU(LIGHT_map_cube)
        h5 = pyf.PrimaryHDU(LGH_map_cube)
        h = h1.header
        h["NAXIS"] = 2
        h["NAXIS1"] = n_age
        h["NAXIS2"] = 1
        hlist = pyf.HDUList([h1])
        hlist.update_extend()
        wfits_ext(dir_map + "/" + "MASS_maps_" + name + ".fits", hlist)
        #if ptt.exists() == True:
        h = h2.header
        h["NAXIS"] = 2
        h["NAXIS1"] = n_age
        h["NAXIS2"] = 1
        hlist = pyf.HDUList([h2])
        hlist.update_extend()
        wfits_ext(dir_map + "/" + "SFH_maps_" + name + ".fits", hlist)
        #if ptt.exists() == True:
        h = h3.header
        h["NAXIS"] = 2
        h["NAXIS1"] = n_age
        h["NAXIS2"] = 1
        hlist = pyf.HDUList([h3])
        hlist.update_extend()
        wfits_ext(dir_map + "/" + "MGH_maps_" + name + ".fits", hlist)
        h = h4.header
        h["NAXIS"] = 2
        h["NAXIS1"] = n_age
        h["NAXIS2"] = 1
        hlist = pyf.HDUList([h4])
        hlist.update_extend()
        wfits_ext(dir_map + "/" + "LIGHT_maps_" + name + ".fits", hlist)
        h = h5.header
        h["NAXIS"] = 2
        h["NAXIS1"] = n_age
        h["NAXIS2"] = 1
        hlist = pyf.HDUList([h5])
        hlist.update_extend()
        wfits_ext(dir_map + "/" + "LGH_maps_" + name + ".fits", hlist)
    #print MassT,name

    #print Ha,(L*1e-16)
    #sys.exit(0)
    mass_n = 10**(10**(mass - mass[n_age - 1]))
    mass_n_e = np.sqrt((10**(mass - mass[n_age - 1]))**2.0 *
                       ((mass_e / 10**(2.0 * mass)) +
                        (mass_e[n_age - 1] / 10**(2.0 * mass[n_age - 1]))))
    light_n = 10**(10**(light - light[n_age - 1]))
    light_n_e = np.sqrt((10**(light - light[n_age - 1]))**2.0 *
                        ((light_e / 10**(2.0 * light)) +
                         (light_e[n_age - 1] / 10**(2.0 * light[n_age - 1]))))
    #print mass_n
    #mass_n=10**(mass-mass[nt-156-1,:])
    #mass_n=(mass-mass[nt-156-1,:])
    for i in range(0, n_age):
        #print ages[i],a_age[i],"test_ages"
        line = ''
        line = line + str(ages[i])
        line = line + ';' + str(mass_n[i])
        line = line + ';' + str(mass[i])
        line = line + ';' + str(sfrt[i])
        line = line + ';' + str(mass_n_e[i])
        line = line + ';' + str(sfdt[i])
        #print line
        line = line + ' \n'
        f2.write(line)
        lineL = ''
        lineL = lineL + str(ages[i])
        lineL = lineL + ';' + str(light_n[i])
        lineL = lineL + ';' + str(light[i])
        lineL = lineL + ';' + str(sfrt[i])
        lineL = lineL + ';' + str(light_n_e[i])
        lineL = lineL + ';' + str(sfdt[i])
        lineL = lineL + ' \n'
        fL2.write(lineL)

# print mass_n.shape,ages.shape
    if not pdf == 0:
        dev = dir3 + '/Plots/' + name + m_t + "_int_Relative_Mass2.pdf"
        #if pdf == 1:
        #    matplotlib.use('Agg')
        import matplotlib.pyplot as plt
        fig, ax = plt.subplots(figsize=(6, 5.5))
        ax.set_xlabel("$log_{10}(time/yr)$", fontsize=14)
        ax.set_ylabel("$M(t)/M_{0}$", fontsize=14)
        #MassT=10.32
        ax.set_title(name + ' $\log M_{tot}=' + ('%7.2f' % MassT) + '$',
                     fontsize=15)
        ax.set_xlim(8.6, 10.1)
        #ax.set_ylim(0,12)
        ax.set_ylim(func_plot(np.log10(1.78), ftype=1),
                    func_plot(np.log10(12), ftype=1))
        plt.plot(
            ages, func_plot(np.log10(mass_n), ftype=1)
        )  #,label='$'+('%6.1f' % rx[ii])+'R_e<R<'+('%6.1f' % rx[ii+1])+'R_e$'
        plt.legend(loc=3)
        plt.plot(np.arange(0, 20, .1),
                 np.ones(200) * func_plot(0.95, ftype=1),
                 '--',
                 color='black')
        plt.plot(np.arange(0, 20, .1),
                 np.ones(200) * func_plot(0.50, ftype=1),
                 '--',
                 color='green')
        fig.canvas.draw()
        labels = [item.get_text() for item in ax.get_yticklabels()]
        for i in range(0, len(labels)):
            labels[i] = labels[i].replace(u'\u2212', '-')
        for i in range(0, len(labels)):
            if labels[i] != u'':
                if float_(labels[i]) == 0:
                    labels[i] = u'%3.2f' % 10**(0)
                else:
                    labels[i] = u'%3.2f' % 10**(float_(labels[i]))
        ax.set_yticklabels(labels)
        if pdf == 1:
            fig.tight_layout()
            plt.savefig(dev)  #,dpi = 1000)
        else:
            plt.show()
        plt.close()
    f2.close()
    fL2.close()
    fo.write(name + " " + str(MassT) + " " + str(redshift) + " \n")
Пример #41
0
print("Comoving distance to z=6 is %.lf Mpc"%(d_co))

from cosmolopy import*
d_a=cd.angular_diameter_distance(6,**fidcosmo)
print("Angular-diameter distance to z=6 is %.lf Mpc"%(d_a))
d_light=cd.light_travel_distance(6,**fidcosmo)
print"Light-travel distance to z=6 is %.lf Mpc"%(d_light)"""

import cosmolopy.perturbation as cp
import numpy as np
import math
log_k=np.arange(np.log10(2*math.pi/10000) ,np.log10(2*math.pi/40),0.05) #k Mpc^-1
print(log_k.shape)
k=10**log_k
print("k=",k)
cosmology={'omega_M_0' : 0.308,'omega_b_0' : 0.022,'omega_n_0' : 0.0,'N_nu' : 3,'omega_lambda_0' : 0.692,
'h' : 0.72,'n' : 0.95,'sigma_8' : 0.8}
spectrum=cp.power_spectrum(10**log_k,1.,**cosmology)
print("spectrum" ,spectrum)
import matplotlib.pyplot as mpl
mpl.plot(log_k,spectrum,color="r",lw=2, alpha=0.8)
mpl.ylabel('Power spectrum')
mpl.xlabel('wave number log10_k (Mpc^-1)')
mpl.show()

cosmology=cd.set_omega_k_0(cosmology)
print(cosmology['omega_k_0'])

from ed_functions_memo2 import sigma_v_function
from ed_functions_memo2 import sigma_v_nonlinear
Пример #42
0
	def gal_stack_3D(self,bin_range,gal_num,run_num,code_num):	

		'''This function is the program 3D_gal_stack.py'''

		# last update: 1/24/13

		# This program takes 3D galaxy data of 100 halo sample from Gerard Lemson from the MDB
		# and stacks the data by mass bin and uses the M_Phi technique. Note:(each bin is an ensemble cluster)

		import cosmolopy.distance as cd

		## DEFINE CONSTANTS ##

		h = 0.72 		# Hubble Constant / 100.0
		r_limit = 2		# Radius Limit of data in terms of R_crit200
		H0 = h*100.0		# Hubble constant
		q = 10.0
		c = 300000.0
		cosmo = {'omega_M_0':0.3, 'omega_lambda_0':0.7, 'h':H0/100.0}
		cosmo = cd.set_omega_k_0(cosmo)
		halo_num = 100		# Total number of halos
		gal_num = gal_num	# Number of galaxies stacked per halo for en. clusters
		bin_range = bin_range	# Number of halos per ensemble cluster
		run_num = run_num	# Number of ensemble halos to run caustic mass est. over

		## DEFINE FLAGS ##
	
		use_mems = False
		use_vdisp = True
	
		## INITIALIZATION ##
	
		G = galaxies()
		C = caustic()
		U = universal()

		### PROGRAM ###

		print '...loading halos'

		HaloID, R_crit200, M_crit200, HPX, HPY, HPZ, HVX, HVY, HVZ, HVD, SRAD, ESRAD, Z = U.load_halos(h)

		HaloID, R_crit200, M_crit200, HPX, HPY, HPZ, HVX, HVY, HVZ, HVD, SRAD, ESRAD, Z = U.sort_halos(HaloID, R_crit200, M_crit200, HPX, HPY, HPZ, HVX, HVY, HVZ, HVD, SRAD, ESRAD, Z)

		print '...loading galaxies'

		R, V, MAGS, GPX, GPY, GPZ = G.configure_galaxies(HaloID,h,HPX,HPY,HPZ,HVX,HVY,HVZ,Z,r_limit,R_crit200,HVD,halo_num)

		print '...binning data'	
		# All variables beginning with 'ENC_' stand for ensemble cluster variable, similar to *bin variables

		ENC_R,ENC_V,ENC_MAG,ENC_M200,ENC_R200,ENC_HVD,ENC_SRAD,ENC_ESRAD,ENC_GPX,ENC_GPY,ENC_GPZ = G.bin_data_mag(HaloID,R,V,MAGS,SRAD,ESRAD,M_crit200,R_crit200,HVD,halo_num,bin_range,gal_num,GPX,GPY,GPZ)

		print '...running caustic'

		x_range,ENC_INF_NFWMASS,ENC_DIA_NFWMASS,ENC_INF_CAUMASS,ENC_DIA_CAUMASS,ENC_INF_MPROF,ENC_INF_NFW,ENC_INF_CAU,ENC_DIA_MPROF,ENC_DIA_NFW,ENC_DIA_CAU = G.kernel_caustic_masscalc(ENC_R,ENC_V,ENC_M200,ENC_R200,ENC_SRAD,ENC_ESRAD,ENC_HVD,halo_num,bin_range,gal_num,H0,q,r_limit,run_num,use_mems)

		print ''
		bias1 = mean( (ENC_M200[run_num[0]:run_num[1]]-ENC_INF_NFWMASS) / ENC_M200[run_num[0]:run_num[1]] )
		bias2 = mean( abs(ENC_M200[run_num[0]:run_num[1]]-ENC_INF_NFWMASS) / ENC_M200[run_num[0]:run_num[1]] )
		bias3 = mean( log(ENC_INF_NFWMASS/ENC_M200[run_num[0]:run_num[1]]) )	
		
		if code_num == 1:	
			return bias1, bias2, -1*bias3
		if code_num == 2:
			return x_range,ENC_INF_NFWMASS,ENC_DIA_NFWMASS,ENC_INF_CAUMASS,ENC_DIA_CAUMASS,ENC_INF_MPROF,ENC_INF_NFW,ENC_INF_CAU,ENC_DIA_MPROF,ENC_DIA_NFW,ENC_DIA_CAU,ENC_M200,ENC_R200,ENC_R,ENC_V,ENC_MAG
Пример #43
0
## DEFINE FLAGS ##

use_mems = False
use_vdisp = True

use_gals = True			# Use galaxies, or else particles 

## DEFINE CONSTANTS ##

h = 0.72 			# Hubble Constant / 100.0
r_limit = 2			# Radius Limit of data in Mpc
H0 = h*100.0			# Hubble constant
q = 10.0
c = 300000.0
cosmo = {'omega_M_0':0.3, 'omega_lambda_0':0.7, 'h':H0/100.0}
cosmo = cd.set_omega_k_0(cosmo)
bin_range = 1			# Needed b/c technically it is working on ensemble code
halo_num = 100			# Number of halos in sample
run_num = [0,1]			# Number of halos to run program over, particles

if use_gals==True:
	gal_num = 10		# Number of galaxies per mass calculation
else:
	gal_num = 10000		# Number of particles per mass calculation

## DEFINE FUNCTIONS ##

def load_gals(h,gal_num,HaloID,HPX,HPY,HPZ,HVX,HVY,HVZ,Z,r_crit200):
	R = []
	V = []
	MAGS = []
def mono_lum(mag=18.0,
             magsys='AB',
             mono_wav=5100.0,
             z=1.0,
             ftrwav=np.ones(100),
             ftrtrans=np.ones(100)):

    plslp1 = 0.46
    plslp2 = 0.03
    plbrk = 2822.0
    bbt = 1216.0
    bbflxnrm = 0.24
    elscal = 0.71
    scahal = 0.86
    galfra = 0.31
    ebv = 0.0
    imod = 18.0

    with open('/home/lc585/Dropbox/IoA/QSOSED/Model/qsofit/input.yml',
              'r') as f:
        parfile = yaml.load(f)

    fittingobj = qsrload(parfile)

    lin = fittingobj.get_lin()
    galspc = fittingobj.get_galspc()
    ext = fittingobj.get_ext()
    galcnt = fittingobj.get_galcnt()
    ignmin = fittingobj.get_ignmin()
    ignmax = fittingobj.get_ignmax()
    wavlen_rest = fittingobj.get_wavlen()
    ztran = fittingobj.get_ztran()
    lyatmp = fittingobj.get_lyatmp()
    lybtmp = fittingobj.get_lybtmp()
    lyctmp = fittingobj.get_lyctmp()
    whmin = fittingobj.get_whmin()
    whmax = fittingobj.get_whmax()
    cosmo = {'omega_M_0': 0.3, 'omega_lambda_0': 0.7, 'h': 0.7}
    cosmo = cd.set_omega_k_0(cosmo)
    flxcorr = np.array([1.0] * len(wavlen_rest))

    params = Parameters()
    params.add('plslp1', value=plslp1)
    params.add('plslp2', value=plslp2)
    params.add('plbrk', value=plbrk)
    params.add('bbt', value=bbt)
    params.add('bbflxnrm', value=bbflxnrm)
    params.add('elscal', value=elscal)
    params.add('scahal', value=scahal)
    params.add('galfra', value=galfra)
    params.add('ebv', value=ebv)
    params.add('imod', value=imod)

    wavlen, flux = qsrmod(params, parfile, wavlen_rest, z, lin, galspc, ext,
                          galcnt, ignmin, ignmax, ztran, lyatmp, lybtmp,
                          lyctmp, whmin, whmax, cosmo, flxcorr)

    if magsys == 'AB':

        # Calculate AB zero point
        sum1 = np.sum(ftrtrans[:-1] * (0.10893 / (ftrwav[:-1]**2)) *
                      ftrwav[:-1] * np.diff(ftrwav))
        sum2 = np.sum(ftrtrans[:-1] * ftrwav[:-1] * np.diff(ftrwav))
        zromag = -2.5 * np.log10(sum1 / sum2)

    if magsys == 'VEGA':

        # Calculate vega zero point
        fvega = '/data/vault/phewett/vista_work/vega_2007.lis'
        vspec = np.loadtxt(fvega)
        vf = interp1d(vspec[:, 0], vspec[:, 1])
        sum1 = np.sum(ftrtrans[:-1] * vf(ftrwav[:-1]) * ftrwav[:-1] *
                      np.diff(ftrwav))
        sum2 = np.sum(ftrtrans[:-1] * ftrwav[:-1] * np.diff(ftrwav))
        zromag = -2.5 * np.log10(sum1 / sum2)

    def resid(p, mag, flux, wavlen, zromag, ftrwav, ftrtrans):

        newflux = p['norm'].value * flux
        spc = interp1d(wavlen, newflux, bounds_error=False, fill_value=0.0)

        sum1 = np.sum(ftrtrans[:-1] * spc(ftrwav[:-1]) * ftrwav[:-1] *
                      np.diff(ftrwav))
        sum2 = np.sum(ftrtrans[:-1] * ftrwav[:-1] * np.diff(ftrwav))
        ftrmag = (-2.5 * np.log10(sum1 / sum2)) - zromag

        return [mag - ftrmag]

    resid_p = partial(resid,
                      mag=mag,
                      flux=flux,
                      wavlen=wavlen,
                      zromag=zromag,
                      ftrwav=ftrwav,
                      ftrtrans=ftrtrans)

    p = Parameters()
    p.add('norm', value=1e-17)

    result = minimize(resid_p, p, method='leastsq')

    indmin = np.argmin(np.abs((wavlen / (1.0 + z)) - (mono_wav - 5.0)))
    indmax = np.argmin(np.abs((wavlen / (1.0 + z)) - (mono_wav + 5.0)))

    # Flux density in erg/cm2/s/A
    f5100 = p['norm'].value * np.median(flux[indmin:indmax])

    f5100 = f5100 * (u.erg / u.cm / u.cm / u.s / u.AA)

    lumdist = cosmoWMAP.luminosity_distance(z).to(u.cm)

    # Monochromatic luminosity at 5100A
    l5100 = f5100 * (1 + z) * 4 * math.pi * lumdist**2

    l5100 = l5100 * 5100.0 * (u.AA)

    # print l5100

    return l5100
Пример #45
0
def calc_pdf(density=1e-7,
             L_nu=1e50,
             sigma=1,
             gamma=2.19,
             logMu_range=[-10, 6],
             N_Mu_bins=200,
             z_limits=[0.04, 10.],
             nzbins=120,
             Lum_limits=[1e45, 1e54],
             nLbins=120,
             flux_to_mu=10763342917.859608):
    """
    Parameter:
        - density in 1/Mpc^3
        - L_nu in erg/yr
        - sigma in dex
        - gamma
        - flux_to_mu

    Integration Parameters
        - logMu_range = [-10,6]      # expected range in log mu,
        - N_Mu_bins = 200            # number of bins for log nu histogram
        - z_limits = [0.04, 10.]     # Redshift limits
        - nzbins = 120               # number of z bins
        - Lum_limits = [1e45,1e54]   # Luminosity limits
        - nLbins = 120               # number of logLuminosity bins
    """
    # Conversion Factors
    Mpc_to_cm = 3.086e+24
    erg_to_GeV = 624.151
    year2sec = 365 * 24 * 3600

    cosmology = {'omega_M_0': 0.308, 'omega_lambda_0': 0.692, 'h': 0.678}
    cosmology = set_omega_k_0(cosmology)  # Flat universe

    ### Define the Redshift and Luminosity Evolution
    redshift_evolution = lambda z: HopkinsBeacom2006StarFormationRate(z)
    LF = lambda z, logL: redshift_evolution(z)*np.log(10)*10**logL * \
        lognorm.pdf(10**logL, np.log(10)*sigma,
                    scale=L_nu*np.exp(-0.5*(np.log(10)*sigma)**2))

    N_tot, int_norm = tot_num_src(redshift_evolution, cosmology, z_limits[-1],
                                  density)
    print "Total number of sources {:.0f} (All-Sky)".format(N_tot)

    # Setup Arrays
    logMu_array = np.linspace(logMu_range[0], logMu_range[1], N_Mu_bins)
    Flux_from_fixed_z = []

    zs = np.linspace(z_limits[0], z_limits[1], nzbins)
    deltaz = (float(z_limits[1]) - float(z_limits[0])) / nzbins

    Ls = np.linspace(np.log10(Lum_limits[0]), np.log10(Lum_limits[1]), nLbins)
    deltaL = (np.log10(Lum_limits[1]) - np.log10(Lum_limits[0])) / nLbins

    # Integration
    t0 = time.time()
    Count_array = np.zeros(N_Mu_bins)
    muError = []
    tot_bins = nLbins * nzbins
    print('Starting Integration...Going to evaluate {} bins'.format(tot_bins))
    N_sum = 0

    Flux_from_fixed_z.append([])
    print "-" * 20

    # Loop over redshift bins
    for z_count, z in enumerate(zs):
        # Conversion Factor for given z
        bz = calc_conversion_factor(z, gamma)
        dlz = luminosity_distance(z, **cosmology)
        tot_flux_from_z = 0.

        # Loop over Luminosity bins
        for l_count, lum in enumerate(Ls):
            run_id = z_count * nLbins + l_count
            if run_id % (tot_bins / 10) == 0.:
                print "{}%".format(100 * run_id / tot_bins)
            # Number of Sources in
            dN = calc_dN(LF, lum, z, deltaL, deltaz, N_tot, int_norm,
                         cosmology)
            N_sum += dN

            #Flux to Source Strength
            logmu = np.log10(flux_to_mu * erg_to_GeV * 10**lum / year2sec /
                             (4 * np.pi * (Mpc_to_cm * dlz)**2) * bz)

            # Add dN to Histogram
            if logmu < logMu_range[1] and logmu > logMu_range[0]:
                tot_flux_from_z += dN * 10**logmu
                idx = int((logmu - logMu_range[0]) * N_Mu_bins /
                          (logMu_range[1] - logMu_range[0]))
                Count_array[idx] += dN
            else:
                muError.append(logmu)

        Flux_from_fixed_z.append(tot_flux_from_z)

    print "Number of Mu out of Range: {}".format(len(muError))
    print "Num Sou {}".format(N_sum)
    t1 = time.time()

    print "-" * 20
    print "\n Time needed for {}x{} bins: {}s".format(nzbins, nLbins,
                                                      int(t1 - t0))

    return logMu_array, Count_array, zs, Flux_from_fixed_z
Пример #46
0
def haloStellarMass(filename="vt-stellar-masses.txt",
                    outfile="vt-halos-stellar-masses.txt",
                    verbose=1):

    #ra,dec,z,zerr,id,central,hostid, gr,gre, gi,gie, kri,krie, kii,kiie, \
    #    i,distmod,rabs,iabs, mcMass, taMass, maiss, std = np.genfromtxt(filename, unpack=True)
    #id, gr_o,  grostd,  gi_o, giostd, kri, kristd, kii, kiistd, i,  distmod, r_abs, i_abs, mcMass, taMass, mass, std, bestsp,COADD_OBJECTS_ID_1,hostid,ra,dec,MAG_AUTO_G,MAG_AUTO_R,MAG_AUTO_I,MAG_AUTO_Z,P_RADIAL,P_REDSHIFT,GR_P_COLOR,RI_P_COLOR,IZ_P_COLOR,GR_P_MEMBER,RI_P_MEMBER,IZ_P_MEMBER,DIST_TO_CENTER,GRP_ED,GRP_BLUE,GRP_BACKGROUND,RIP_RED,RIP_BLUE,RIP_BACKGROUND,IZP_RED,IZP_BLUE,IZP_BG,MAGERR_AUTO_G,MAGERR_AUTO_R, MAGERR_AUTO_I,MAGERR_AUTO_Z,z,R200,M200,N200,LAMBDA_CHISQ,GR_SEP_FLAG,RI_SEP_FLAG,IZ_SEP_FLAG = np.genfromtxt(filename, unpack=True)

    #For xmm?
    #id,mass,std,COADD_OBJECTS_ID_1,hostid,ZP,ZPE,MAG_AUTO_G,MAG_AUTO_R,MAG_AUTO_I,MAG_AUTO_Z,P_RADIAL,P_REDSHIFT,GR_P_COLOR,RI_P_COLOR,IZ_P_COLOR,GR_P_MEMBER,RI_P_MEMBER, IZ_P_MEMBER, AMAG_R,DIST_TO_CENTER,GRP_RED,GRP_BLUE,GRP_BG,RIP_RED,RIP_BLUE, RIP_BG,IZP_RED,IZP_BLUE,IZP_BG,RESTP_RED,RESTP_BLUE,RESTP_BG,REST_P_COLOR,REST_P_MEMBER,MAGERR_AUTO_G,MAGERR_AUTO_R,MAGERR_AUTO_I,MAGERR_AUTO_Z,z,R200,M200,N200,GR_SEP_FLAG,RI_SEP_FLAG,IZ_SEP_FLAG = np.genfromtxt(filename, unpack=True)

    #For Chandra
    id, hostid, gr_o, std, gi_o, std, kri, std, kii, std, i, distmod, r_abs, i_abs, mcMass, taMass, mass, std, z, sfr, sfrstd, age, agestd, bestsp, best_zmet, mean_zmet, GR_P_COLOR, RI_P_COLOR, IZ_P_COLOR, GR_P_MEMBER, RI_P_MEMBER, IZ_P_MEMBER, DIST_TO_CENTER, GRP_RED, GRP_BLUE, RIP_RED, RIP_BLUE, IZP_RED, IZP_BLUE, R200, M200 = np.genfromtxt(
        filename, unpack=True, delimiter=",")

    # FOR X-ray cat
    #    id,gr_o, std,gi_o, std,kri,std,kii,std,i,  distmod,r_abs,  i_abs,  mcMass,taMass,mass,std,sfr, sfrstd,age,agestd,bestsp,COADD_OBJECTS_ID,hostid,RA,  DEC,ZP,ZPE,  DERED_G_1,DERED_R_1,DERED_I_1,DERED_Z_1,P_RADIAL,  P_REDSHIFT, GR_P_COLOR,  RI_P_COLOR,IZ_P_COLOR,  GR_P_MEMBER, RI_P_MEMBER, IZ_P_MEMBER, AMAG_R, DIST_TO_CENTER,GRP_RED,GRP_BLUE, GRP_BG, RIP_RED,RIP_BLUE, RIP_BG, IZP_RED,IZP_BLUE, IZP_BG, RESTP_RED, RESTP_BLUE,RESTP_BG,REST_P_COLOR, REST_P_MEMBER,OBJID,  RA_1,DEC_1, ZPHOT,  ZPHOTERR,DERED_U,  DERED_G_2,DERED_R_2,DERED_I_2,DERED_Z_2,ERR_U,ERR_G, ERR_R, ERR_I, ERR_Z, ID,z,R200,  M200,N200,LAMBDA,GR_SLOPE,GR_INTERCEPT,GRMU_R,GRMU_B, GRSIGMA_R, GRSIGMA_B, GRW_R,GRW_B,  RI_SLOPE, RI_INTERCEPT,RIMU_R, RIMU_B, RISIGMA_R, RISIGMA_B,  RIW_R,  RIW_B,  GRMU_BG,  GRSIGMA_BG,GRW_BG,  RIMU_BG,  RISIGMA_BG,RIW_BG, IZ_SLOPE, IZ_INTERCEPT,IZMU_R, IZMU_B,IZSIGMA_R,  IZSIGMA_B, IZW_R,  IZW_B,  IZMU_BG,IZSIGMA_BG,IZW_BG, GR_SEP_FLAG,RI_SEP_FLAG,IZ_SEP_FLAG,REST_SLOPE,  REST_INTERCEPT,RESTMU_R,  RESTMU_B,  RESTMU_BG,RESTSIGMA_R,RESTSIGMA_B,RESTSIGMA_BG,RESTW_R,  RESTW_B,  RESTW_BG,REST_SEP_FLAG = np.genfromtxt(filename, unpack=True)

    #id,mass,std,COADD_OBJECTS_ID_1,hostid,ra,dec,ZP,ZPE,MAG_AUTO_G,MAG_AUTO_R,MAG_AUTO_I,MAG_AUTO_Z,P_RADIAL,P_REDSHIFT,GR_P_COLOR,RI_P_COLOR,IZ_P_COLOR,GR_P_MEMBER,RI_P_MEMBER,IZ_P_MEMBER,AMAG_R,DIST_TO_CENTER,GRP_RED,GRP_BLUE,GRP_BG,RIP_RED,RIP_BLUE,RIP_BG,IZP_RED,IZP_BLUE,IZP_BG,RESTP_RED,RESTP_BLUE,RESTP_BG,REST_P_COLOR,REST_P_MEMBER,MAGERR_AUTO_G,MAGERR_AUTO_R,MAGERR_AUTO_I,MAGERR_AUTO_Z,z,R200,M200,N200,GR_SEP_FLAG,RI_SEP_FLAG,IZ_SEP_FLAG,RESTW_R,RESTW_B,RESTW_BG,REST_SEP_FLAG = np.genfromtxt(filename, unpack=True)

    cosmo = {'omega_M_0': 0.23, 'omega_lambda_0': 0.77, 'h': 1.}
    cosmo = cd.set_omega_k_0(cosmo)

    rmax = 2.9  #maximum test radius in mpc. rmax will always be included as a test radius regardless of rmin,step
    rmin = 0.3  #minimum test radius in mpc. rmin always included as test radius
    step = 0.1  #step size, stepping from rmin to rmax, in mpc

    id = np.array(id).astype(int)
    #central = np.array(central).astype(int)
    hostid = np.array(hostid).astype(int)

    fd = open(outfile, "w")

    halos = np.unique(hostid)
    halos = np.sort(halos)
    if verbose:
        print "#halo,rad_cut, ngals, sum_mass, sum_mass_std,lambda_iz,mass_std_iz,lambda_gr_err_jk"
    #fd.write("# hostid z  median(ra) median(dec) median(z) ngals log(stellar_mass) std lambda_gr std lambda_ri std lambda_iz std M200 izflag lambda_gr_red lambda_ri_red lambda_iz_red std lambda_gr_blue lambda_ri_blue lambda_iz_blue std (h=0.7, Om=0.3, flat)\n")
    fd.write(
        "# halo, ngals, sum_mass, sum_mass_std, lambda_gr,lambda_gr_err_jk, lambda_ri, lambda_ri_err_jk,lambda_iz,lambda_iz_err_jk, lambda_gr_red, lambda_ri_red, lambda_iz_red,lambda_iz_red_err_jk, lambda_gr_blue, lambda_ri_blue, lambda_iz_blue,lambda_iz_blue_err_jk\n"
    )

    for halo in halos:
        ix_cl = np.nonzero(hostid == halo)
        z_cl = z[ix_cl][0]
        r200_cl = R200[ix_cl][0]
        #print DIST_TO_CENTER[ix_cl]
        #ang_diam_dist=cd.angular_diameter_distance(z_cl,z0=0,**cosmo)
        #distmpc = ang_diam_dist*DIST_TO_CENTER[ix_cl]*pi/180.
        distmpc = DIST_TO_CENTER[ix_cl]

        mass_cl = mass[ix_cl]
        GR_P_MEMBER_cl = GR_P_MEMBER[ix_cl]
        RI_P_MEMBER_cl = RI_P_MEMBER[ix_cl]
        IZ_P_MEMBER_cl = IZ_P_MEMBER[ix_cl]
        std_cl = std[ix_cl]
        GR_P_COLOR_cl = GR_P_COLOR[ix_cl]
        RI_P_COLOR_cl = RI_P_COLOR[ix_cl]
        IZ_P_COLOR_cl = IZ_P_COLOR[ix_cl]
        GRP_RED_cl = GRP_RED[ix_cl]
        RIP_RED_cl = RIP_RED[ix_cl]
        IZP_RED_cl = IZP_RED[ix_cl]
        GRP_BLUE_cl = GRP_BLUE[ix_cl]
        RIP_BLUE_cl = RIP_BLUE[ix_cl]
        IZP_BLUE_cl = IZP_BLUE[ix_cl]
        #        P_RADIAL_cl = P_RADIAL[ix_cl]

        radii = np.r_[rmin:rmax:step, rmax]
        radii = np.append(radii, r200_cl)
        #radii = [0.2,r200_cl]

        for rad_cut in radii:
            ix = np.nonzero(distmpc <= rad_cut)

            #zmedian = np.median(z[ix])
            #ramedian = np.median(ra[ix])
            #decmedian = np.median(dec[ix])
            ngals = id[ix].size
            if ngals > 0.:
                linear_mass = 10**mass_cl[ix]
                linear_mass_weight_gr = 10**mass_cl[ix] * GR_P_MEMBER_cl[
                    ix]  #/P_RADIAL_cl[ix]
                linear_mass_weight_ri = 10**mass_cl[ix] * RI_P_MEMBER_cl[
                    ix]  #/P_RADIAL_cl[ix]
                linear_mass_weight_iz = 10**mass_cl[ix] * IZ_P_MEMBER_cl[
                    ix]  #/P_RADIAL_cl[ix]
                mass_errors = np.log(10.) * linear_mass * std_cl[ix]
                mass_std = np.sqrt((mass_errors**2).sum())

                mass_err_gr = np.log(
                    10.) * linear_mass * std_cl[ix] * GR_P_MEMBER_cl[ix]
                mass_err_ri = np.log(
                    10.) * linear_mass * std_cl[ix] * RI_P_MEMBER_cl[ix]
                mass_err_iz = np.log(
                    10.) * linear_mass * std_cl[ix] * IZ_P_MEMBER_cl[ix]
                mass_std_gr = 10**(-10) * np.sqrt((mass_err_gr**2).sum())
                mass_std_ri = 10**(-10) * np.sqrt((mass_err_ri**2).sum())
                mass_std_iz = 10**(-10) * np.sqrt((mass_err_iz**2).sum())

                linear_mass_weight_gr_red = 10**mass_cl[ix] * GR_P_MEMBER_cl[
                    ix] / GR_P_COLOR_cl[ix] * GRP_RED_cl[ix]
                linear_mass_weight_ri_red = 10**mass_cl[ix] * RI_P_MEMBER_cl[
                    ix] / RI_P_COLOR_cl[ix] * RIP_RED_cl[ix]
                linear_mass_weight_iz_red = 10**mass_cl[ix] * IZ_P_MEMBER_cl[
                    ix] / IZ_P_COLOR_cl[ix] * IZP_RED_cl[ix]  #/P_RADIAL_cl[ix]

                linear_mass_weight_gr_blue = 10**mass_cl[ix] * GR_P_MEMBER_cl[
                    ix] / GR_P_COLOR_cl[ix] * GRP_BLUE_cl[ix]
                linear_mass_weight_ri_blue = 10**mass_cl[ix] * RI_P_MEMBER_cl[
                    ix] / RI_P_COLOR_cl[ix] * RIP_BLUE_cl[ix]
                linear_mass_weight_iz_blue = 10**mass_cl[ix] * IZ_P_MEMBER_cl[
                    ix] / IZ_P_COLOR_cl[ix] * IZP_BLUE_cl[ix]

                #jackknife for errors on lambda_star
                weightmass_gr = 10**mass_cl[ix] * GR_P_MEMBER_cl[
                    ix]  #/P_RADIAL_cl[ix]
                lambda_gr_err_jk = (jackknife_var(weightmass_gr,
                                                  lambda_star_jk))**0.5
                weightmass_ri = 10**mass_cl[ix] * RI_P_MEMBER_cl[
                    ix]  #/P_RADIAL_cl[ix]
                lambda_ri_err_jk = (jackknife_var(weightmass_ri,
                                                  lambda_star_jk))**0.5
                weightmass_iz = 10**mass_cl[ix] * IZ_P_MEMBER_cl[
                    ix]  #/P_RADIAL_cl[ix]
                lambda_iz_err_jk = (jackknife_var(weightmass_iz,
                                                  lambda_star_jk))**0.5
                weightmass_iz_red = 10**mass_cl[ix] * IZ_P_MEMBER_cl[
                    ix] / IZ_P_COLOR_cl[ix] * IZP_RED_cl[ix]
                lambda_iz_red_err_jk = (jackknife_var(weightmass_iz_red,
                                                      lambda_star_jk))**0.5
                weightmass_iz_blue = 10**mass[ix] * IZ_P_MEMBER_cl[
                    ix] / IZ_P_COLOR_cl[ix] * IZP_BLUE_cl[ix]
                lambda_iz_blue_err_jk = (jackknife_var(weightmass_iz_blue,
                                                       lambda_star_jk))**0.5

                sum_mass = linear_mass.sum()
                lambda_gr = (linear_mass_weight_gr.sum()) / 10.**10.
                lambda_ri = (linear_mass_weight_ri.sum()) / 10.**10.
                lambda_iz = (linear_mass_weight_iz.sum()) / 10.**10.
                sum_mass_std = mass_std / sum_mass / np.log(10.)
                sum_mass = np.log10(sum_mass)

                lambda_gr_red = (linear_mass_weight_gr_red.sum()) / 10.**10.
                lambda_ri_red = (linear_mass_weight_ri_red.sum()) / 10.**10.
                lambda_iz_red = (linear_mass_weight_iz_red.sum()) / 10.**10.

                lambda_gr_blue = (linear_mass_weight_gr_blue.sum()) / 10.**10.
                lambda_ri_blue = (linear_mass_weight_ri_blue.sum()) / 10.**10.
                lambda_iz_blue = (linear_mass_weight_iz_blue.sum()) / 10.**10.

                #sum_mass_gr = np.log10(sum_mass_gr)
                #sum_mass_ri = np.log10(sum_mass_ri)
                #sum_mass_iz = np.log10(sum_mass_iz)
                #lambda_rm = LAMBDA_CHISQ[ix[0][0]]

                #TO UNCOMMENT!!!!!!!
                #M200_GMM = M200[ix[0][0]]
                #zout = z[ix[0][0]]
                #iz_flag = IZ_SEP_FLAG[ix[0][0]]
                if verbose:
                    print "{:10d}  {:6.3f} {:4d}      {:6.3f} {:6.4f} {:6.3f} {:6.4f} {:6.4f}".format(
                        halo, rad_cut, ngals, sum_mass, sum_mass_std,
                        lambda_iz, mass_std_iz, lambda_gr_err_jk)

                fd.write(
                    "{:10d} {:6.3f} {:4d} {:6.3f} {:6.4f} {:6.3f} {:6.3f} {:6.3f}  {:6.3f} {:6.3f} {:6.3f} {:6.3f}  {:6.3f} {:6.3f} {:6.4f} {:6.3f} {:6.3f} {:6.3f} {:6.3f}\n"
                    .format(halo, rad_cut, ngals, sum_mass, sum_mass_std,
                            lambda_gr, lambda_gr_err_jk, lambda_ri,
                            lambda_ri_err_jk, lambda_iz, lambda_iz_err_jk,
                            lambda_gr_red, lambda_ri_red, lambda_iz_red,
                            lambda_iz_red_err_jk, lambda_gr_blue,
                            lambda_ri_blue, lambda_iz_blue,
                            lambda_iz_blue_err_jk))
        #LATEST:
        #fd.write("{:10d} {:6.3f}  {:6.3f}     {:4d}      {:6.3f} {:6.4f} {:6.3f} {:6.3f} {:6.3f}  {:6.3f} {:6.3f} {:6.3f} {:6.3f}  {:6.3f} {:6.3f} {:6.3f} {:6.4f} {:6.3f} {:6.3f} {:6.3f} {:6.3f} {:2.3f}\n".format(halo,zout, zmedian, ngals, sum_mass, sum_mass_std, lambda_gr,lambda_gr_err_jk, lambda_ri, lambda_ri_err_jk,lambda_iz,lambda_iz_err_jk,M200_GMM, lambda_gr_red, lambda_ri_red, lambda_iz_red,lambda_iz_red_err_jk, lambda_gr_blue, lambda_ri_blue, lambda_iz_blue,lambda_iz_blue_err_jk,iz_flag))
        #fd.write("{:10d} {:6.3f}  {:6.3f}     {:4d}      {:6.3f} {:6.4f} {:6.3f} {:6.3f} {:6.3f} {:6.3f} {:6.3f}  {:6.3f} {:6.3f} {:6.3f} {:6.4f} {:6.3f} {:6.3f} {:6.3f} {:6.3f} {:10d}\n".format(halo,zout, zmedian, ngals, sum_mass, sum_mass_std, lambda_gr,mass_err_gr, lambda_ri, mass_err_ri,lambda_iz,mass_err_iz,M200_GMM, lambda_gr_red, lambda_ri_red, lambda_iz_red, lambda_gr_blue, lambda_ri_blue, lambda_iz_blue,iz_flag))
        #fd.write("{:10d} {:6.3f}  {:6.3f}     {:4d}      {:6.3f} {:6.4f} {:6.3f} {:6.3f} {:6.3f} {:6.3f} {:3.0f} {:6.4f} {:6.3f} {:6.3f} {:6.3f} {:6.3f} {:6.3f}\n".format(halo,zout, zmedian, ngals, sum_mass, sum_mass_std, lambda_gr, lambda_ri, lambda_iz,mass_err_iz,M200_GMM,iz_flag, lambda_gr_red, lambda_ri_red, lambda_iz_red, lambda_gr_blue, lambda_ri_blue, lambda_iz_blue))

    fd.close()
from math import *
Пример #48
0
from scipy import asarray as ar, exp
import numpy as np
import sys
from scipy.integrate import simps

#
import cosmolopy.distance as cd
import cosmolopy.constants as cc

cosmo = {'omega_M_0': 0.27, 'omega_lambda_0': 0.73, 'h': 0.72}
cosmo = cd.set_omega_k_0(cosmo)
c = 3.e18  # A/s
chimax = 1.
mag0 = 25.0
m0set = mag0
d = 10**(73.6 / 2.5)  # From [ergs/s/cm2/A] to [ergs/s/cm2/Hz]
Mpc_cm = 3.08568025e+24  # cm/Mpc


def madau_igm_abs(xtmp, ytmp, zin):
    #
    # Returns; dust attenuated flux
    #
    # xtmp: RF wavelength
    # ytmp: flux in f_lambda
    # z_in: observed redshift
    #
    tau = np.zeros(len(xtmp), dtype='float32')
    xlya = 1216.
    xLL = 912.
    xLL = 1216.
Пример #49
0
zrec_cst = inhodist.x2z_inho_lightcone(lightcone_cst, h)

theseed=None
lightcone = ln.lightcone_1d(xmax, nnx, seed=theseed, pklib=pklib, omegam=(omegac+omegab)/h2, smoothing=None)
clf()
plot(lightcone.xx, lightcone.alldelta[0,:])
np.mean(lightcone.alldelta[0,:])



zrec = inhodist.x2z_inho_lightcone(lightcone, h)


zvals = np.linspace(0,np.max(zrec_cst),1000)
open = cd.set_omega_k_0({'omega_M_0' : (omegac+omegab)/h2, 'omega_lambda_0' : 0., 'h' : 0.7})
d_open = cd.comoving_distance_transverse(zvals, **open)
lcdm = cd.set_omega_k_0({'omega_M_0' : (omegac+omegab)/h2, 'omega_lambda_0' : 0.7, 'h' : 0.7})
d_lcdm = cd.comoving_distance_transverse(zvals, **lcdm)
empty = cd.set_omega_k_0({'omega_M_0' : 0, 'omega_lambda_0' : 0., 'h' : 0.7})
d_empty = cd.comoving_distance_transverse(zvals, **empty)


clf()
plot(zvals, d_open/d_lcdm, lw=2, label='Standard: Open $\Omega_m=0.3$')
plot(zvals, d_lcdm/d_lcdm, lw=2, label='Standard: $\Lambda$CDM')
plot(zvals, d_empty/d_lcdm, lw=2, label='Standard: Empty $\Omega_m=0.$')
plot(zrec_cst, lightcone_cst.xx/np.interp(zrec_cst, zvals, d_lcdm), '--', lw=2, label='JC: Uniform Open $\Omega_m=0.3$')
plot(zrec, lightcone.xx/np.interp(zrec, zvals, d_lcdm), lw=2, label='JC: Inhomogeneous Open $\Omega_m=0.3$')
legend(loc='upper left', fontsize=10, frameon=False)
Пример #50
0
	def halo_gal_3D(self,gal_num,run_num,code_num):		

		'''This function is the program 3D_halos.py'''

		# 3d galaxy m_phi code for non-stacked halos.

		# last update: 1/29/13
		
		############
		import cosmolopy.distance as cd

		## DEFINE FLAGS ##

		use_mems = False
		use_vdisp = True

		## DEFINE CONSTANTS ##

		h = 0.72 		# Hubble Constant / 100.0
		r_limit = 2		# Radius Limit of data in terms of R_crit200
		H0 = h*100.0		# Hubble constant
		q = 10.0
		c = 300000.0
		cosmo = {'omega_M_0':0.3, 'omega_lambda_0':0.7, 'h':H0/100.0}
		cosmo = cd.set_omega_k_0(cosmo)
		halo_num = 100		# Number of halos in sample
		run_num = run_num	# Number of halos to run program over, particles
		bin_range = 1		# Needed b/c technically it is working on ensemble code

		## DEFINE FUNCTIONS ##

		def load_gals(h,gal_num,HaloID,HPX,HPY,HPZ,HVX,HVY,HVZ,Z,R_crit200):
			R = []
			V = []
			MAGS = []
			GPX = []
			GPY = []
			GPZ = []	
			ID = loadtxt('/n/Christoq1/nkern/Documents/MDB_milliMil_halodata/Caustic/cmiller.csv',delimiter=',',dtype='str',usecols=(0,),unpack=True)
			for haloid,k in zip(list(HaloID),list(range(halo_num))):
				IDmatch = where(ID==str(haloid))[0][0]
				f = pyfits.open('/n/Christoq1/MILLENNIUM/particles/t_'+str(IDmatch)+'_cmiller_guo.fits')
				data = f[1].data
				z,gpx,gpy,gpz,gvx,gvy,gvz,mags = data.field(13),data.field(17),data.field(18),data.field(19),data.field(20),data.field(21),data.field(22),data.field(63)
				
				gpx,gpy,gpz = ( gpx/(1+Z[k])/h - HPX[k] ),( gpy/(1+Z[k])/h - HPY[k] ),( gpz/(1+Z[k])/h - HPZ[k] )
				gvx,gvy,gvz = gvx-HVX[k],gvy-HVY[k],gvz-HVZ[k]
			
				r = sqrt( (gpx)**2 + (gpy)**2 + (gpz)**2 ) 
				v = sqrt( (gvx)**2 + (gvy)**2 + (gvz)**2 )

				sort = argsort(mags)		# sorted by descending magnitude
				mags = array(mags[sort])
				r = array(r[sort])		 
				v = array(v[sort])
				gpx,gpy,gpz,gvx,gvy,gvz = array(gpx[sort]),array(gpy[sort]),array(gpz[sort]),array(gvx[sort]),array(gvy[sort]),array(gvz[sort])
				## LIMIT DATA ##
				cut = where((r<=r_limit*R_crit200[k]) & (v<=5000.0) & (v!=0) )[0][0:gal_num]	#skip BCG, no V 
				
				r,v,mags = r[cut],v[cut],mags[cut]
				gpx,gpy,gpz,gvx,gvy,gvz = gpx[cut],gpy[cut],gpz[cut],gvx[cut],gvy[cut],gvz[cut]	

				R.append(r)
				V.append(v)
				MAGS.append(mags)
				GPX.append(gpx)
				GPY.append(gpy)
				GPZ.append(gpz)
				
			R = array(R)
			V = array(V)
			MAGS = array(MAGS)
			GPX = array(GPX)
			GPY = array(GPY)
			GPZ = array(GPZ)
			
			return R,V,MAGS,GPX,GPY,GPZ	

			
		## INITIALIZATION ##

		U = universal()
		P = particles()
		G = galaxies()
		C = caustic()

		### PROGRAM ###

		print '...loading halos'

		HaloID, R_crit200, M_crit200, HPX, HPY, HPZ, HVX, HVY, HVZ, HVD, SRAD, ESRAD, Z = U.load_halos(h)

		HaloID, R_crit200, M_crit200, HPX, HPY, HPZ, HVX, HVY, HVZ, HVD, SRAD, ESRAD, Z = U.sort_halos(HaloID, R_crit200, M_crit200, HPX, HPY, HPZ, HVX, HVY, HVZ, HVD, SRAD, ESRAD, Z)

		print '...loading gals'
		R, V, MAGS, GPX, GPY, GPZ = load_gals(h,gal_num,HaloID,HPX,HPY,HPZ,HVX,HVY,HVZ,Z,R_crit200)

		print '...caustic!'
		x_range,INF_NFWMASS,DIA_NFWMASS,INF_CAUMASS,DIA_CAUMASS,INF_MPROF,INF_NFW,INF_CAU,DIA_MPROF,DIA_NFW,DIA_CAU = G.kernel_caustic_masscalc(R,V,M_crit200,R_crit200,SRAD,ESRAD,HVD,halo_num,bin_range,gal_num,H0,q,r_limit,run_num,use_mems)

		return x_range,INF_NFWMASS,DIA_NFWMASS,INF_CAUMASS,DIA_CAUMASS,INF_MPROF,INF_NFW,INF_CAU,DIA_MPROF,DIA_NFW,DIA_CAU,R,V,MAGS,M_crit200,R_crit200
Пример #51
0
def ionization_from_luminosity(z,
                               ratedensityfunc,
                               xHe=1.0,
                               rate_is_tfunc=False,
                               ratedensityfunc_args=(),
                               method='romberg',
                               **cosmo):
    """Integrate the ionization history given an ionizing luminosity
    function, ignoring recombinations.

    Parameters
    ----------
    
    ratedensityfunc: callable
        function giving comoving ionizing photon emission rate
        density, or ionizing emissivity (photons s^-1 Mpc^-3) as a
        function of redshift (or time).

    rate_is_tfunc: boolean
        Set to true if ratedensityfunc is a function of time rather than z.

    Notes
    -----

    Ignores recombinations.

    The ionization rate is computed as ratedensity / nn, where nn = nH
    + xHe * nHe. So if xHe is 1.0, we are assuming that helium becomes
    singly ionized at proportionally the same rate as hydrogen. If xHe
    is 2.0, we are assuming helium becomes fully ionizing at
    proportionally the same rate as hydrogen.

    The returened x is therefore the ionized fraction of hydrogen, and
    the ionized fraction of helium is xHe * x.

    """

    cosmo = cd.set_omega_k_0(cosmo)
    rhoc, rho0, nHe, nH = cden.baryon_densities(**cosmo)
    nn = (nH + xHe * nHe)
    if rate_is_tfunc:
        t = cd.age(z, **cosmo)[0]

        def dx_dt(t1):
            return numpy.nan_to_num(
                ratedensityfunc(t1, *ratedensityfunc_args) / nn)

        sorti = numpy.argsort(t)
        x = numpy.empty(t.shape)
        x[sorti] = cu.integrate_piecewise(dx_dt, t[sorti], method=method)
        return x
    else:
        dt_dz = lambda z1: cd.lookback_integrand(z1, **cosmo)

        def dx_dz(z1):
            z1 = numpy.abs(z1)
            return numpy.nan_to_num(
                dt_dz(z1) * ratedensityfunc(z1, *ratedensityfunc_args) / nn)

        sorti = numpy.argsort(-z)
        x = numpy.empty(z.shape)
        x[sorti] = cu.integrate_piecewise(dx_dz, -z[sorti], method=method)
        return x
Пример #52
0
	def halo_part_3D(self,gal_num,run_num,code_num):

		# 3d particle m_phi code for non-stacked halos.

		# last update: 1/29/13
		
		############
		import cosmolopy.distance as cd
		from numpy.random import randint
		## DEFINE FLAGS ##

		use_mems = False
		use_vdisp = True

		## DEFINE CONSTANTS ##

		h = 0.72 		# Hubble Constant / 100.0
		r_limit = 2		# Radius Limit of data in terms of R_crit200
		H0 = h*100.0		# Hubble constant
		q = 10.0
		c = 300000.0
		cosmo = {'omega_M_0':0.3, 'omega_lambda_0':0.7, 'h':H0/100.0}
		cosmo = cd.set_omega_k_0(cosmo)
		halo_num = 100		# Number of halos in sample
		run_num = run_num	# Number of halos to run program over, particles
		bin_range = 1		# Needed b/c technically working on ensemble code

		## DEFINE FUNCTIONS ##

		def load_parts(h,gal_num,r_limit,HaloID,HVX,HVY,HVZ,Z,R_crit200,HVD):
			R = []
			V = []
			PPX = []
			PPY = []
			PPZ = []

			for haloid,k in zip(list(HaloID[run_num[0]:run_num[1]]),list(arange(run_num[0],run_num[1]))):
				id = loadtxt('/n/Christoq1/MILLENNIUM/particles/cmiller.csv', dtype='str', delimiter=',', usecols=(0,), unpack=True)
				id = delete(id,0) 
				index = where(id==str(haloid))

				p = pyfits.open('/n/Christoq1/MILLENNIUM/particles/t'+str(index[0][0])+'_cmiller.dat.fits')
				data = p[1].data
				ppx = data.field(1)/h/(1+Z[k])
				ppy = data.field(2)/h/(1+Z[k])
				ppz = data.field(3)/h/(1+Z[k])
				pvx = data.field(4)/sqrt(1+Z[k])
				pvy = data.field(5)/sqrt(1+Z[k])
				pvz = data.field(6)/sqrt(1+Z[k])

				pvx,pvy,pvz = pvx-HVX[k],pvy-HVY[k],pvz-HVZ[k]

				r = sqrt( (ppx**2) + (ppy**2) + (ppz**2) ) 
				v = sqrt( pvx**2 + pvy**2 + pvz**2 )

				r = array(r)
				v = array(v)		

				## LIMIT AND SELECT DATA ##
				cut = where((r<=r_limit*R_crit200[k]) & (v<=5000.0))
				r = r[cut]
				v = v[cut]
				ppx,ppy,ppz = ppx[cut],ppy[cut],ppz[cut]
				pick = randint(0,r.size,gal_num)	## RANDOM NUMBER PARTICLE SELECTION
				r,v,ppx,ppy,ppz = r[pick],v[pick],ppx[pick],ppy[pick],ppz[pick]

				R.append(r)
				V.append(v)
				PPX.append(ppx)
				PPY.append(ppy)
				PPZ.append(ppz)				

				print 'done loading halo',k

			R = array(R)
			V = array(V)
			PPX = array(PPX)
			PPY = array(PPY)
			PPZ = array(PPZ)

			return R, V, PPX, PPY, PPZ	
			
			
		## INITIALIZATION ##

		U = universal()
		P = particles()
		G = galaxies()
		C = caustic()

		### PROGRAM ###

		print '...loading halos'

		HaloID, R_crit200, M_crit200, HPX, HPY, HPZ, HVX, HVY, HVZ, HVD, SRAD, ESRAD, Z = U.load_halos(h)

		HaloID, R_crit200, M_crit200, HPX, HPY, HPZ, HVX, HVY, HVZ, HVD, SRAD, ESRAD, Z = U.sort_halos(HaloID, R_crit200, M_crit200, HPX, HPY, HPZ, HVX, HVY, HVZ, HVD, SRAD, ESRAD, Z)

		print '...loading particles'
		R, V, PPX, PPY, PPZ = load_parts(h,gal_num,r_limit,HaloID,HVX,HVY,HVZ,Z,R_crit200,HVD)

		print '...caustic!'
		x_range,INF_NFWMASS,DIA_NFWMASS,INF_CAUMASS,DIA_CAUMASS,INF_MPROF,INF_NFW,INF_CAU,DIA_MPROF,DIA_NFW,DIA_CAU = P.kernel_caustic_masscalc(R,V,M_crit200,R_crit200,SRAD,ESRAD,HVD,halo_num,bin_range,gal_num,H0,q,r_limit,run_num,use_mems)

		return x_range,INF_NFWMASS,DIA_NFWMASS,INF_CAUMASS,DIA_CAUMASS,INF_MPROF,INF_NFW,INF_CAU,DIA_MPROF,DIA_NFW,DIA_CAU,R,V,HaloID,R_crit200,M_crit200
def Dv(z):
	'''__________ Dv_________________________'''
	cosmo = {'omega_M_0' : 0.24, 'omega_lambda_0' : 1.0 - 0.24, 'h' : 0.73}
	cosmo = cd.set_omega_k_0(cosmo)
	Dv = cd.diff_comoving_volume(z, **cosmo)
	return Dv