Beispiel #1
0
def myloglike_H0(cube, ndim, nparams):
        H0 = cube[0]
        Om0 = cube[1]
        Ode0 = cube[2]

        cosmo = LambdaCDM(H0=H0, Om0=Om0, Ode0=Ode0)

        c = 299792458.0*1e-3
        vr_mean, vr_std = redshift*c, redshift_error*c
 
        dc = cosmo.comoving_distance(redshift).value
        prob = np.log(kde_eval_single(kdedir_dist,[dc])[0])

        if np.isnan(prob):
            prob = -np.inf

        return prob
Beispiel #2
0
def myloglike_H0(cube, ndim, nparams):
    H0 = cube[0]
    Om0 = cube[1]
    Ode0 = cube[2]

    cosmo = LambdaCDM(H0=H0, Om0=Om0, Ode0=Ode0)

    prob = 0
    for name in data_struct.iterkeys():
        dc = cosmo.comoving_distance(data_struct[name]["redshift"]).value
        kdedir_dist = data_struct[name]["kdedir_dist"]
        prob = prob + np.log(kde_eval_single(kdedir_dist, [dc])[0])

        #pvp = (1/np.sqrt(2*np.pi*data_struct[name]["dist_std"]**2))*np.exp((-1/2.0)*((data_struct[name]["dist_median"]-dc)/data_struct[name]["dist_std"])**2)
        #prob = prob + np.log(pvp)

    if np.isnan(prob):
        prob = -np.inf

    return prob
Beispiel #3
0
class cosmo_LCDM:
    '''LambdaCDM cosmology.'''
    def __init__(self, H0, Om0, Ode0):
        self.H0 = H0
        self.cosmo = LambdaCDM(H0=H0, Om0=Om0, Ode0=Ode0)
        self.h = self.H0 / 100.

    def z2chi(self, z):
        '''Get comoving distance in [Mpc/h] from redshift'''
        return self.cosmo.comoving_distance(z).value * self.h

    def rdz2xyz(self, ra, dec, redshift):
        '''Convert RA, DEC, redshift to X, Y, Z'''
        co_dis = self.z2chi(redshift)
        theta = np.pi * (90. - dec) / 180.
        phi = np.pi * ra / 180.

        ngal = ra.size
        xyz = np.zeros(ngal * 3).reshape(ngal, 3)
        xyz[:, 0] = co_dis * np.sin(theta) * np.cos(phi)
        xyz[:, 1] = co_dis * np.sin(theta) * np.sin(phi)
        xyz[:, 2] = co_dis * np.cos(theta)

        return xyz

    def H_z(self, z):
        '''Get Hubble parameter at redshift z, i.e. H(z)'''
        return self.H0 * self.cosmo.efunc(z)

    def inv_H_z(self, z):
        '''Inverse of H(z), i.e. 1/H(z)'''
        return self.cosmo.inv_efunc(z) / self.H0

    def chi2z(self, chi, z_min, z_max, n_interp=1000):
        '''Get redshift from comoving distance with interpolation,
        make sure that all chi are included in the redshift bin set by z_min, z_max'''
        z_samp = np.linspace(z_min, z_max, n_interp)
        chi_samp = self.z2chi(z_samp)
        interp = interpolate.splrep(chi_samp, z_samp, s=0, k=1)

        return interpolate.splev(chi, interp, der=0)
Beispiel #4
0
def create_density_maps():
    # Get command line arguments
    args = {}
    if comm_rank == 0:
        args["simdir"] = sys.argv[1]
        args["hfdir"] = sys.argv[2]
        args["snapnum"] = int(sys.argv[3])
        args["ncells"] = int(sys.argv[4])
        args["outbase"] = sys.argv[5]
        args["nfileout"] = sys.argv[6]
    args = comm.bcast(args)
    label = args["simdir"].split('/')[-2].split('_')[2]

    # Organize devision of Sub-&Halos over Processes on Proc. 0
    if comm_rank == 0:
        # Sort Sub-&Halos over Processes
        df = pd.read_csv(args["hfdir"] + 'halos_%d.dat' % args["snapnum"],
                         sep='\s+',
                         skiprows=16,
                         usecols=[0, 2, 4, 9, 10, 11],
                         names=['ID', 'Mvir', 'Vrms', 'X', 'Y', 'Z'])
        df = df[df['Mvir'] > 5e11]
        sh_id = df['ID'].values.astype('float64')
        sh_vrms = df['Vrms'].values.astype('float64')
        sh_x = df['X'].values.astype('float64')
        sh_y = df['Y'].values.astype('float64')
        sh_z = df['Z'].values.astype('float64')
        del df
        hist_edges = histedges_equalN(sh_x, comm_size)
        sh_id, sh_vrms, sh_x, sh_y, sh_z, sh_split_size_1d, sh_split_disp_1d = cluster_subhalos(
            sh_id, sh_vrms, sh_x, sh_y, sh_z, hist_edges, comm_size)

        # Sort Particles over Processes
        s = read_hdf5.snapshot(args["snapnum"], args["simdir"])
        s.read(["Coordinates", "Masses", "GFM_StellarFormationTime"],
               parttype=[0, 1, 4])
        scale = 1e-3 * s.header.hubble
        ## Dark Matter
        dm_mass = (s.data['Masses']['dm']).astype('float64')
        dm_x = (s.data['Coordinates']['dm'][:, 0] * scale).astype('float64')
        dm_y = (s.data['Coordinates']['dm'][:, 1] * scale).astype('float64')
        dm_z = (s.data['Coordinates']['dm'][:, 2] * scale).astype('float64')
        dm_mass, dm_x, dm_y, dm_z, dm_split_size_1d, dm_split_disp_1d = cluster_particles(
            dm_mass, dm_x, dm_y, dm_z, hist_edges, comm_size)
        ## Gas
        gas_mass = (s.data['Masses']['gas']).astype('float64')
        gas_x = (s.data['Coordinates']['gas'][:, 0] * scale).astype('float64')
        gas_y = (s.data['Coordinates']['gas'][:, 1] * scale).astype('float64')
        gas_z = (s.data['Coordinates']['gas'][:, 2] * scale).astype('float64')
        gas_mass, gas_x, gas_y, gas_z, gas_split_size_1d, gas_split_disp_1d = cluster_particles(
            gas_mass, gas_x, gas_y, gas_z, hist_edges, comm_size)
        ## Stars
        star_mass = (s.data['Masses']['stars']).astype('float64')
        star_x = (s.data['Coordinates']['stars'][:, 0] *
                  scale).astype('float64')
        star_y = (s.data['Coordinates']['stars'][:, 1] *
                  scale).astype('float64')
        star_z = (s.data['Coordinates']['stars'][:, 2] *
                  scale).astype('float64')
        star_age = s.data['GFM_StellarFormationTime']['stars']
        star_x = star_x[star_age >= 0]  #[Mpc]
        star_y = star_y[star_age >= 0]  #[Mpc]
        star_z = star_z[star_age >= 0]  #[Mpc]
        star_mass = star_mass[star_age >= 0]
        del star_age
        star_mass, star_x, star_y, star_z, star_split_size_1d, star_split_disp_1d = cluster_particles(
            star_mass, star_x, star_y, star_z, hist_edges, comm_size)

        # Define Cosmology
        cosmo = LambdaCDM(H0=s.header.hubble * 100,
                          Om0=s.header.omega_m,
                          Ode0=s.header.omega_l)
        cosmosim = {
            'omega_M_0': s.header.omega_m,
            'omega_lambda_0': s.header.omega_l,
            'omega_k_0': 0.0,
            'h': s.header.hubble
        }
        redshift = s.header.redshift
        print(': Redshift: %f' % redshift)
    else:
        sh_id = None
        sh_vrms = None
        sh_x = None
        sh_y = None
        sh_z = None
        sh_split_size_1d = None
        sh_split_disp_1d = None
        dm_mass = None
        dm_x = None
        dm_y = None
        dm_z = None
        dm_split_size_1d = None
        dm_split_disp_1d = None
        gas_mass = None
        gas_x = None
        gas_y = None
        gas_z = None
        gas_split_size_1d = None
        gas_split_disp_1d = None
        star_mass = None
        star_x = None
        star_y = None
        star_z = None
        star_split_size_1d = None
        star_split_disp_1d = None
        cosmosim = None
        cosmo = None
        redshift = None

    # Broadcast variables over all processors
    sh_split_size_1d = comm.bcast(sh_split_size_1d, root=0)
    sh_split_disp_1d = comm.bcast(sh_split_disp_1d, root=0)
    dm_split_size_1d = comm.bcast(dm_split_size_1d, root=0)
    dm_split_disp_1d = comm.bcast(dm_split_disp_1d, root=0)
    gas_split_size_1d = comm.bcast(gas_split_size_1d, root=0)
    gas_split_disp_1d = comm.bcast(gas_split_disp_1d, root=0)
    star_split_size_1d = comm.bcast(star_split_size_1d, root=0)
    star_split_disp_1d = comm.bcast(star_split_disp_1d, root=0)
    cosmo = comm.bcast(cosmo, root=0)
    redshift = comm.bcast(redshift, root=0)

    # Initiliaze variables for each processor
    sh_id_local = np.zeros((int(sh_split_size_1d[comm_rank])))
    sh_vrms_local = np.zeros((int(sh_split_size_1d[comm_rank])))
    sh_x_local = np.zeros((int(sh_split_size_1d[comm_rank])))
    sh_y_local = np.zeros((int(sh_split_size_1d[comm_rank])))
    sh_z_local = np.zeros((int(sh_split_size_1d[comm_rank])))
    dm_mass_local = np.zeros((int(dm_split_size_1d[comm_rank])))
    dm_x_local = np.zeros((int(dm_split_size_1d[comm_rank])))
    dm_y_local = np.zeros((int(dm_split_size_1d[comm_rank])))
    dm_z_local = np.zeros((int(dm_split_size_1d[comm_rank])))
    gas_mass_local = np.zeros((int(gas_split_size_1d[comm_rank])))
    gas_x_local = np.zeros((int(gas_split_size_1d[comm_rank])))
    gas_y_local = np.zeros((int(gas_split_size_1d[comm_rank])))
    gas_z_local = np.zeros((int(gas_split_size_1d[comm_rank])))
    star_mass_local = np.zeros((int(star_split_size_1d[comm_rank])))
    star_x_local = np.zeros((int(star_split_size_1d[comm_rank])))
    star_y_local = np.zeros((int(star_split_size_1d[comm_rank])))
    star_z_local = np.zeros((int(star_split_size_1d[comm_rank])))

    # Devide Data over Processes
    comm.Scatterv([sh_id, sh_split_size_1d, sh_split_disp_1d, MPI.DOUBLE],
                  sh_id_local,
                  root=0)
    comm.Scatterv([sh_vrms, sh_split_size_1d, sh_split_disp_1d, MPI.DOUBLE],
                  sh_vrms_local,
                  root=0)
    comm.Scatterv([sh_x, sh_split_size_1d, sh_split_disp_1d, MPI.DOUBLE],
                  sh_x_local,
                  root=0)
    comm.Scatterv([sh_y, sh_split_size_1d, sh_split_disp_1d, MPI.DOUBLE],
                  sh_y_local,
                  root=0)
    comm.Scatterv([sh_z, sh_split_size_1d, sh_split_disp_1d, MPI.DOUBLE],
                  sh_z_local,
                  root=0)

    comm.Scatterv([dm_x, dm_split_size_1d, dm_split_disp_1d, MPI.DOUBLE],
                  dm_x_local,
                  root=0)
    comm.Scatterv([dm_y, dm_split_size_1d, dm_split_disp_1d, MPI.DOUBLE],
                  dm_y_local,
                  root=0)
    comm.Scatterv([dm_z, dm_split_size_1d, dm_split_disp_1d, MPI.DOUBLE],
                  dm_z_local,
                  root=0)
    comm.Scatterv([dm_mass, dm_split_size_1d, dm_split_disp_1d, MPI.DOUBLE],
                  dm_mass_local,
                  root=0)

    comm.Scatterv([gas_x, gas_split_size_1d, gas_split_disp_1d, MPI.DOUBLE],
                  gas_x_local,
                  root=0)
    comm.Scatterv([gas_y, gas_split_size_1d, gas_split_disp_1d, MPI.DOUBLE],
                  gas_y_local,
                  root=0)
    comm.Scatterv([gas_z, gas_split_size_1d, gas_split_disp_1d, MPI.DOUBLE],
                  gas_z_local,
                  root=0)
    comm.Scatterv([gas_mass, gas_split_size_1d, gas_split_disp_1d, MPI.DOUBLE],
                  gas_mass_local,
                  root=0)

    comm.Scatterv([star_x, star_split_size_1d, star_split_disp_1d, MPI.DOUBLE],
                  star_x_local,
                  root=0)
    comm.Scatterv([star_y, star_split_size_1d, star_split_disp_1d, MPI.DOUBLE],
                  star_y_local,
                  root=0)
    comm.Scatterv([star_z, star_split_size_1d, star_split_disp_1d, MPI.DOUBLE],
                  star_z_local,
                  root=0)
    comm.Scatterv(
        [star_mass, star_split_size_1d, star_split_disp_1d, MPI.DOUBLE],
        star_mass_local,
        root=0)
    print(
        ': Proc. %d got: \n\t %d Sub-&Halos \n\t %d dark matter \n\t %d gas \n\t %d stars \n'
        % (comm_rank, int(sh_split_size_1d[comm_rank]),
           int(dm_split_size_1d[comm_rank]), int(gas_split_size_1d[comm_rank]),
           int(star_split_size_1d[comm_rank])))

    comm.Barrier()

    SH = {
        "ID": sh_id_local,
        "Vrms": sh_vrms_local,
        "Pos": np.transpose([sh_x_local, sh_y_local, sh_z_local])
    }
    DM = {
        "Mass": np.unique(dm_mass_local),
        "Pos": np.transpose([dm_x_local, dm_y_local, dm_z_local])
    }
    Gas = {
        "Mass": gas_mass_local,
        "Pos": np.transpose([gas_x_local, gas_y_local, gas_z_local])
    }
    Star = {
        "Mass": star_mass_local,
        "Pos": np.transpose([star_x_local, star_y_local, star_z_local])
    }

    sigma_tot = []
    subhalo_id = []
    FOV = []
    ## Run over Sub-&Halos
    for ll in range(len(SH['ID'])):
        # Define field-of-view
        c = (const.c).to_value('km/s')
        fov_rad = 4 * np.pi * (SH['Vrms'][ll] / c)**2
        #TODO: for z=0 sh_dist=0!!!
        sh_dist = (cosmo.comoving_distance(redshift)).to_value('Mpc')
        alpha = 6  # multiplied by 4 because of Oguri&Marshall
        fov_Mpc = alpha * fov_rad * sh_dist  # is it the diameter?

        dm_sigma, h = DMf.projected_surface_density_smooth(
            DM['Pos'],  #[Mpc]
            SH['Pos'][ll],
            fov_Mpc,  #[Mpc]
            args["ncells"])
        dm_sigma *= DM['Mass']
        gas_sigma = projected_surface_density(
            Gas['Pos'],  #*a/h,
            Gas['Mass'],
            SH['Pos'][ll],  #*a/h,
            fov_Mpc,
            args["ncells"])
        gas_sigma = gaussian_filter(gas_sigma, sigma=h)
        star_sigma = projected_surface_density(
            Star['Pos'],  #*a/h,
            Star['Mass'],
            SH['Pos'][ll],  #*a/h,
            fov_Mpc,
            args["ncells"])
        star_sigma = gaussian_filter(star_sigma, sigma=h)

        # Check if all density maps are empty
        if ((np.count_nonzero(dm_sigma) == args["ncells"]**2)
                and (np.count_nonzero(gas_sigma) == (args["ncells"])**2)
                and (np.count_nonzero(star_sigma) == (args["ncells"])**2)):
            continue
        sigmatotal = dm_sigma + gas_sigma + star_sigma

        sigma_tot.append(sigmatotal)
        subhalo_id.append(int(SH['ID'][ll]))
        FOV.append(fov_Mpc)

    fname = args["outbase"] + 'z_' + str(
        args["snapnum"]) + '/' + 'DM_' + label + '_' + str(comm_rank) + '.h5'
    hf = h5py.File(fname, 'w')
    hf.create_dataset('density_map', data=sigma_tot)
    hf.create_dataset('subhalo_id', data=np.asarray(subhalo_id))
    hf.create_dataset('fov_width', data=np.asarray(FOV))
    #RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
    hf.close()
    ]
    slicserror_h = np.array(slicserrors)
    slicserror_l = np.array(slicserrors)

    #"""
    ######

    path_plots = '/%s/Plots/%s' % (path_sheardata, selection)

    # Define the part of the trough profile that contributes to the fit
    if Runit == 'arcmin':
        xmin = theta * 1.2
        xmax = 70.
    if Runit == 'Mpc':
        Rlist = theta * am_to_rad * (
            cosmo.comoving_distance(troughZ).to('Mpc')).value  #2.77797224336

        xmin = Rlist * 1.2
        xmax = 20.

    xmask = (xmin < data_x) & (data_x < xmax)
    xwhere = np.where(xmask)[0]

    # Plotting the ueber matrix
    Nbins = Npercs
    Nrows = Nbins / 5
    Ncolumns = int(Nbins / Nrows)

    fig = plt.figure(figsize=(12, 8))
    canvas = FigureCanvas(fig)
def create_density_maps():
    # Get command line arguments
    args = {}
    if comm_rank == 0:
        print(':Registered %d processes' % comm_size)
        args["simdir"] = sys.argv[1]
        args["hfdir"] = sys.argv[2]
        args["snapnum"] = int(sys.argv[3])
        args["zs"] = float(sys.argv[4]) / 10
    args = comm.bcast(args)
    label = args["simdir"].split('/')[-2].split('_')[2]

    # Organize devision of Sub-&Halos over Processes on Proc. 0
    if comm_rank == 0:
        # Load simulation
        s = read_hdf5.snapshot(args["snapnum"], args["simdir"])
        s.read(["Coordinates", "Masses", "GFM_StellarFormationTime"],
               parttype=[0, 1, 4])  #[0,1,4]
        scale = 1e-3 * s.header.hubble

        # Define Cosmology
        cosmo = LambdaCDM(H0=s.header.hubble * 100,
                          Om0=s.header.omega_m,
                          Ode0=s.header.omega_l)
        cosmosim = {
            'omega_M_0': s.header.omega_m,
            'omega_lambda_0': s.header.omega_l,
            'omega_k_0': 0.0,
            'h': s.header.hubble
        }
        redshift = s.header.redshift
        print(': Redshift: %f' % redshift)

        # Sort Sub-&Halos over Processes
        df = pd.read_csv(args["hfdir"] + 'halos_%d.dat' % args["snapnum"],
                         sep='\s+',
                         skiprows=16,
                         usecols=[0, 2, 4, 9, 10, 11],
                         names=['ID', 'Mvir', 'Vrms', 'X', 'Y', 'Z'])
        df = df[df['Mvir'] > 5e11]
        sh_id = df['ID'].values.astype('float64')
        sh_vrms = df['Vrms'].values.astype('float64')
        sh_x = df['X'].values.astype('float64')
        sh_y = df['Y'].values.astype('float64')
        sh_z = df['Z'].values.astype('float64')
        del df
        hist_edges = procdiv.histedges_equalN(sh_x, comm_size)
        SH = procdiv.cluster_subhalos(sh_id, sh_vrms, sh_x, sh_y, sh_z,
                                      hist_edges, comm_size)

        # Calculate overlap for particle cuboids
        c = (const.c).to_value('km/s')
        fov_rad = 4 * np.pi * (np.percentile(SH['Vrms'], 90) / c)**2
        sh_dist = (cosmo.comoving_distance(redshift)).to_value('Mpc')
        alpha = 6  # multiplied by 4 because of Oguri&Marshall
        overlap = 0.5 * alpha * fov_rad * sh_dist  #[Mpc] half of field-of-view
        print('Cuboids overlap is: %f [Mpc]' % overlap)

        # Sort Particles over Processes
        ## Dark Matter
        DM = {
            'Mass': (s.data['Masses']['dm']).astype('float64'),
            'Pos': (s.data['Coordinates']['dm'] * scale).astype('float64')
        }
        DM = procdiv.cluster_particles(DM, hist_edges, comm_size)
        ## Gas
        Gas = {
            'Mass': (s.data['Masses']['gas']).astype('float64'),
            'Pos': (s.data['Coordinates']['gas'] * scale).astype('float64')
        }
        Gas = procdiv.cluster_particles(Gas, hist_edges, comm_size)
        ## Stars
        age = (s.data['GFM_StellarFormationTime']['stars']).astype('float64')
        Star = {
            'Mass': (s.data['Masses']['stars'][age >= 0]).astype('float64'),
            'Pos': (s.data['Coordinates']['stars'][age >= 0, :] *
                    scale).astype('float64')
        }
        del age
        Star = procdiv.cluster_particles(Star, hist_edges, comm_size)

    else:
        c = None
        alpha = None
        overlap = None
        cosmosim = None
        cosmo = None
        redshift = None
        hist_edges = None
        SH = {
            'ID': None,
            'Vrms': None,
            'X': None,
            'Y': None,
            'Z': None,
            'split_size_1d': None,
            'split_disp_1d': None
        }
        DM = {
            'Mass': None,
            'X': None,
            'Y': None,
            'Z': None,
            'split_size_1d': None,
            'split_disp_1d': None
        }
        Gas = {
            'Mass': None,
            'X': None,
            'Y': None,
            'Z': None,
            'split_size_1d': None,
            'split_disp_1d': None
        }
        Star = {
            'Mass': None,
            'X': None,
            'Y': None,
            'Z': None,
            'split_size_1d': None,
            'split_disp_1d': None
        }

    # Broadcast variables over all processors
    sh_split_size_1d = comm.bcast(SH['split_size_1d'], root=0)
    dm_split_size_1d = comm.bcast(DM['split_size_1d'], root=0)
    gas_split_size_1d = comm.bcast(Gas['split_size_1d'], root=0)
    star_split_size_1d = comm.bcast(Star['split_size_1d'], root=0)
    c = comm.bcast(c, root=0)
    alpha = comm.bcast(alpha, root=0)
    overlap = comm.bcast(overlap, root=0)
    cosmo = comm.bcast(cosmo, root=0)
    redshift = comm.bcast(redshift, root=0)
    hist_edges = comm.bcast(hist_edges, root=0)

    SH = procdiv.scatter_subhalos(SH,
                                  sh_split_size_1d,
                                  comm_rank,
                                  comm,
                                  root_proc=0)
    DM = procdiv.scatter_particles(DM,
                                   dm_split_size_1d,
                                   comm_rank,
                                   comm,
                                   root_proc=0)
    Gas = procdiv.scatter_particles(Gas,
                                    gas_split_size_1d,
                                    comm_rank,
                                    comm,
                                    root_proc=0)
    Star = procdiv.scatter_particles(Star,
                                     star_split_size_1d,
                                     comm_rank,
                                     comm,
                                     root_proc=0)
    print(
        ': Proc. %d got: \n\t %d Sub-&Halos \n\t %d dark matter \n\t %d gas \n\t %d stars \n'
        % (comm_rank, int(sh_split_size_1d[comm_rank]),
           int(dm_split_size_1d[comm_rank]), int(gas_split_size_1d[comm_rank]),
           int(star_split_size_1d[comm_rank])))

    ## Run over Sub-&Halos
    zl = redshift
    zs = args["zs"]
    ncells = [512, 256, 128]
    nparts = [1, 2, 4, 8]
    M200 = np.ones(len(SH['ID']))
    ID = np.ones(len(SH['ID']))
    Rein = np.ones((len(SH['ID']), len(ncells), len(nparts)))
    for ll in range(len(SH['ID'])):
        # Define field-of-view
        fov_rad = 4 * np.pi * (SH['Vrms'][ll] / c)**2
        sh_dist = (cosmo.comoving_distance(redshift)).to_value('Mpc')
        fov_Mpc = alpha * fov_rad * sh_dist  #[Mpc] is it the diameter?
        fov_arc = (fov_Mpc / cf.Da(zl, cosmo) * u.rad).to_value('arcsec')
        sigma_cr = sigma_crit(zl, zs, cosmo).to_value('Msun Mpc-2')

        # Check cuboid boundary condition,
        # that all surface densities are filled with particles
        if ((SH['Pos'][ll,0]-hist_edges[comm_rank] < overlap) or
                (hist_edges[comm_rank+1]-overlap < \
                 SH['Pos'][ll,0]-hist_edges[comm_rank])):
            if fov_Mpc * 0.45 > overlap:
                print("FOV is bigger than cuboids overlap: %f > %f" % \
                        (fov_Mpc*0.45, overlap))
                continue

        ## Run over different Ncells
        for cc in range(len(ncells)):
            dsx_arc = fov_arc / ncells[cc]  #[arcsec] pixel size

            ## Run over particle reductions
            for mm in range(len(nparts)):
                smlpixel = 20  # maximum smoothing pixel length
                pos, indx = dmap.select_particles(
                    Gas['Pos'],
                    SH['Pos'][ll],  #*a/h,
                    fov_Mpc,
                    'box')
                gas_sigma = dmap.projected_density_pmesh_adaptive(
                    pos[::nparts[mm], :],
                    Gas['Mass'][indx][::nparts[mm]],
                    fov_Mpc,
                    ncells[cc],
                    hmax=smlpixel)
                pos, indx = dmap.select_particles(
                    Star['Pos'],
                    SH['Pos'][ll],  #*a/h,
                    fov_Mpc,
                    'box')
                star_sigma = dmap.projected_density_pmesh_adaptive(
                    pos[::nparts[mm], :],
                    Star['Mass'][indx][::nparts[mm]],
                    fov_Mpc,
                    ncells[cc],
                    hmax=smlpixel)
                pos, indx = dmap.select_particles(
                    DM['Pos'],
                    SH['Pos'][ll],  #*a/h,
                    fov_Mpc,
                    'box')
                dm_sigma = dmap.projected_density_pmesh_adaptive(
                    pos[::nparts[mm], :],
                    DM['Mass'][indx][::nparts[mm]],
                    fov_Mpc,  #[Mpc]
                    ncells[cc],
                    hmax=smlpixel)
                tot_sigma = dm_sigma + gas_sigma + star_sigma

                # Make sure that density-map is filled
                while 0.0 in tot_sigma:
                    smlpixel += 5
                    dm_sigma = dmap.projected_density_pmesh_adaptive(
                        pos[::nparts[mm], :],
                        DM['Mass'][indx][::nparts[mm]],
                        fov_Mpc,  #[Mpc]
                        ncells[cc],
                        hmax=smlpixel)
                    tot_sigma = dm_sigma + gas_sigma + star_sigma
                #tmap.plotting(tot_sigma, ncells[cc], fov_Mpc, zl)

                # initialize the coordinates of grids (light rays on lens plan)
                lpv = np.linspace(-(fov_arc - dsx_arc) / 2,
                                  (fov_arc - dsx_arc) / 2, ncells[cc])
                lp1, lp2 = np.meshgrid(lpv, lpv)  #[arcsec]

                fig = plt.figure()
                ax = fig.add_subplot(111)
                # Calculate convergence map
                kappa = tot_sigma / sigma_cr

                # Calculate Deflection Maps
                alpha1, alpha2, mu_map, phi, detA, lambda_t = cal_lensing_signals(
                    kappa, fov_arc, ncells[cc])
                # Calculate Einstein Radii
                Rein[ll, cc, mm] = einstein_radii(lp1, lp2, detA, lambda_t, zl,
                                                  cosmo, ax, 'med', ll)
                #print('Rein = %f' % Rein[ll, cc, mm])
                ID[ll] = SH['ID'][ll]
                #plt.close(fig)
    output = {}
    for cc in range(len(ncells)):
        for mm in range(len(nparts)):
            output[(str(ncells[cc]), str(nparts[mm]))] = Rein[:, cc, mm]
    df = pd.DataFrame.from_dict(output)
    df['ID'] = ID
    #self.df = pd.concat([self.df, dfp], axis=1)
    fname = 'DMConvTest_' + label + '_' + str(comm_rank) + '_zs150.h5'
    df.to_hdf(fname, key='Rein', mode='w')
    plt.close(fig)
Beispiel #7
0
IDang = fits.open(
    '/mnt/clemente/lensing/redMaPPer/redmapper_dr8_public_v6.3_catalog.fits'
)[1].data.ID

IDc = clusters.ID
IDf = clusters_full.ID
mid = np.in1d(IDang, IDc)
sindex = np.argsort(IDc)

# DISTANCIA AL VECINO

RA = clusters.RA
DEC = clusters.DEC
z = clusters.Z_LAMBDA

Dcosmo = cosmo.comoving_distance(z)
catalog = SkyCoord(ra=RA * u.degree, dec=DEC * u.degree, distance=Dcosmo)
idx, d2d, d3d = catalog.match_to_catalog_3d(catalog, nthneighbor=2)

#--------------------------------

ides = np.zeros(len(IDc))
t = np.zeros(len(IDc))
t_wl = np.zeros(len(IDc))
t_wd = np.zeros(len(IDc))
t_p = np.zeros(len(IDc))
t_pwl = np.zeros(len(IDc))
t_pwd = np.zeros(len(IDc))
pcen = np.zeros(len(IDc))

t_BG = clusters.deVPhi_BG
Beispiel #8
0
gamacat = pyfits.open(path_gamacat, ignore_missing_end=True)[1].data

print('Importing GAMA catalogue:', path_gamacat)

# Importing and correcting log(Mstar)
galIDlist = gamacat['ID']

logmstarlist = gamacat['logmstar']
ranklist = gamacat['RankBCG']

RAlist = gamacat['RA'] * u.degree
DEClist = gamacat['DEC'] * u.degree
zlist = gamacat['Z']

# Calculating galaxy distances
Dcllist = cosmo.comoving_distance(zlist)
Dallist = Dcllist / (1. + zlist)

# Creating gama coordinates
gamacoords = SkyCoord(ra=RAlist, dec=DEClist, distance=Dcllist)

cenmask = (ranklist <= 1)
BCGmask = (ranklist == 1)
isomask = (ranklist == -999)

cencoords = gamacoords[cenmask]
BCGcoords = gamacoords[BCGmask]
isocoords = gamacoords[isomask]

# Define Rmax, the maximum radius around each galaxy to which the signal is measured
Rmax2 = 2. * u.Mpc
Beispiel #9
0
def create_density_maps():
    # Get command line arguments
    args = {}
    if comm_rank == 0:
        print(':Registered %d processes' % comm_size)
        args["simdir"] = sys.argv[1]
        args["hfname"] = sys.argv[2]
        args["hfdir"] = sys.argv[3]
        args["snapnum"] = int(sys.argv[4])
        args["ncells"] = int(sys.argv[5])
        args["smlpixel"] = int(sys.argv[6])
        args["outbase"] = sys.argv[7]
    args = comm.bcast(args)
    label = args["simdir"].split('/')[-2].split('_')[2]

    # Organize devision of Sub-&Halos over Processes on Proc. 0
    if comm_rank == 0:
        # Load simulation
        s = read_hdf5.snapshot(args["snapnum"], args["simdir"])
        s.read(["Coordinates", "Masses", "GFM_StellarFormationTime"],
               parttype=[0, 1, 4, 5])

        unitlength = dmaps.define_unit(s.header.unitlength)
        # Define Cosmology
        cosmo = LambdaCDM(H0=s.header.hubble * 100,
                          Om0=s.header.omega_m,
                          Ode0=s.header.omega_l)
        redshift = s.header.redshift
        print(': Redshift: %f' % redshift)

        # Sort Sub-&Halos over Processes
        SH = subhalo_data(args["hfdir"], args["hfname"], args["snapnum"],
                          s.header.hubble, s.header.unitlength)
        hist_edges = procdiv.histedges_equalN(SH['X'], comm_size)
        SH = procdiv.cluster_subhalos_box(SH, hist_edges, comm_size)

        # Calculate overlap for particle cuboids
        c = (const.c).to_value('km/s')
        fov_rad = 4 * np.pi * (np.percentile(SH['Vrms'], 90) / c)**2
        sh_dist = (cosmo.comoving_distance(redshift)).to_value(unitlength)
        alpha = 2  # multiplied by 4 because of Oguri&Marshall
        overlap = 0.5 * alpha * fov_rad * sh_dist  # half of field-of-view
        print('Cuboids overlap is: %f [%s]' % (overlap, unitlength))

        # Sort Particles over Processes
        DM, Gas, Star, BH = particle_data(s.data, s.header.hubble, unitlength)
        DM = procdiv.cluster_particles(DM, hist_edges, comm_size)
        Gas = procdiv.cluster_particles(Gas, hist_edges, comm_size)
        Star = procdiv.cluster_particles(Star, hist_edges, comm_size)
        BH = procdiv.cluster_particles(BH, hist_edges, comm_size)

    else:
        c = None
        alpha = None
        overlap = None
        unitlength = None
        cosmo = None
        redshift = None
        hist_edges = None
        SH = {
            'ID': None,
            'Vrms': None,
            'X': None,
            'Y': None,
            'Z': None,
            'split_size_1d': None,
            'split_disp_1d': None
        }
        DM = {
            'Mass': None,
            'X': None,
            'Y': None,
            'Z': None,
            'split_size_1d': None,
            'split_disp_1d': None
        }
        Gas = {
            'Mass': None,
            'X': None,
            'Y': None,
            'Z': None,
            'split_size_1d': None,
            'split_disp_1d': None
        }
        Star = {
            'Mass': None,
            'X': None,
            'Y': None,
            'Z': None,
            'split_size_1d': None,
            'split_disp_1d': None
        }
        BH = {
            'Mass': None,
            'X': None,
            'Y': None,
            'Z': None,
            'split_size_1d': None,
            'split_disp_1d': None
        }

    # Broadcast variables over all processors
    sh_split_size_1d = comm.bcast(SH['split_size_1d'], root=0)
    sh_split_disp_1d = comm.bcast(SH['split_disp_1d'], root=0)
    dm_split_size_1d = comm.bcast(DM['split_size_1d'], root=0)
    dm_split_disp_1d = comm.bcast(DM['split_disp_1d'], root=0)
    gas_split_size_1d = comm.bcast(Gas['split_size_1d'], root=0)
    gas_split_disp_1d = comm.bcast(Gas['split_disp_1d'], root=0)
    star_split_size_1d = comm.bcast(Star['split_size_1d'], root=0)
    star_split_disp_1d = comm.bcast(Star['split_disp_1d'], root=0)
    bh_split_size_1d = comm.bcast(BH['split_size_1d'], root=0)
    bh_split_disp_1d = comm.bcast(BH['split_disp_1d'], root=0)
    c = comm.bcast(c, root=0)
    unitlength = comm.bcast(unitlength, root=0)
    alpha = comm.bcast(alpha, root=0)
    overlap = comm.bcast(overlap, root=0)
    cosmo = comm.bcast(cosmo, root=0)
    redshift = comm.bcast(redshift, root=0)
    hist_edges = comm.bcast(hist_edges, root=0)

    SH = procdiv.scatter_subhalos(SH,
                                  sh_split_size_1d,
                                  comm_rank,
                                  comm,
                                  root_proc=0)
    DM = procdiv.scatter_particles(DM,
                                   dm_split_size_1d,
                                   comm_rank,
                                   comm,
                                   root_proc=0)
    Gas = procdiv.scatter_particles(Gas,
                                    gas_split_size_1d,
                                    comm_rank,
                                    comm,
                                    root_proc=0)
    Star = procdiv.scatter_particles(Star,
                                     star_split_size_1d,
                                     comm_rank,
                                     comm,
                                     root_proc=0)
    BH = procdiv.scatter_particles(BH,
                                   star_split_size_1d,
                                   comm_rank,
                                   comm,
                                   root_proc=0)

    print(
        ': Proc. %d got: \n\t %d Sub-&Halos \n\t %d dark matter \n\t %d gas \n\t %d stars \n'
        % (comm_rank, int(sh_split_size_1d[comm_rank]),
           int(dm_split_size_1d[comm_rank]), int(gas_split_size_1d[comm_rank]),
           int(star_split_size_1d[comm_rank])))

    sigma_tot = []
    subhalo_id = []
    FOV = []
    ## Run over Sub-&Halos
    for ll in range(len(SH['ID'])):
        # Define field-of-view edge-length
        fov_rad = 4 * np.pi * (SH['Vrms'][ll] / c)**2
        #TODO: for z=0 sh_dist=0!!!
        sh_dist = (cosmo.comoving_distance(redshift)).to_value(unitlength)
        alpha = 1.4
        fov = alpha * fov_rad * sh_dist  #[kpc] edge-length of box

        # Check cuboid boundary condition,
        # that all surface densities are filled with particles
        if ((SH['Pos'][ll][0]-hist_edges[comm_rank] < overlap) or
                (hist_edges[comm_rank+1]-overlap < \
                 SH['Pos'][ll][0]-hist_edges[comm_rank])):
            if fov * 0.45 > overlap:
                print("FOV is bigger than cuboids overlap: %f > %f" % \
                        (fov*0.45, overlap))
                continue

        ## BH
        pos, indx = dmaps.select_particles(
            BH['Pos'],
            SH['Pos'][ll],  #*a/h,
            fov,
            'box')
        bh_sigma = dmaps.projected_density_pmesh(pos, BH['Mass'][indx], fov,
                                                 args["ncells"])

        ## Gas
        pos, indx = dmaps.select_particles(
            Gas['Pos'],
            SH['Pos'][ll],  #*a/h,
            fov,
            'box')
        gas_sigma = dmaps.projected_density_pmesh_adaptive(
            pos, Gas['Mass'][indx], fov, args["ncells"], hmax=args["smlpixel"])
        ## Star
        pos, indx = dmaps.select_particles(
            Star['Pos'],
            SH['Pos'][ll],  #*a/h,
            fov,
            'box')
        star_sigma = dmaps.projected_density_pmesh_adaptive(
            pos,
            Star['Mass'][indx],
            fov,
            args["ncells"],
            hmax=args["smlpixel"])
        ## DM
        pos, indx = dmaps.select_particles(
            DM['Pos'],
            SH['Pos'][ll],  #*a/h,
            fov,
            'box')
        dm_sigma = dmaps.projected_density_pmesh_adaptive(
            pos, DM['Mass'][indx], fov, args["ncells"], hmax=args["smlpixel"])
        sigmatotal = dm_sigma + gas_sigma + star_sigma

        # Make sure that density-map if filled
        extention = 0
        while (0.0 in sigmatotal) and (extention < 60):
            extention += 5
            dm_sigma = dmaps.projected_density_pmesh_adaptive(
                pos,
                DM['Mass'][indx],
                fov,
                args["ncells"],
                hmax=args["smlpixel"] + extention)
            sigmatotal = dm_sigma + gas_sigma + star_sigma

        #tmap.plotting(sigmatotal, args["ncells"], fov, 0.57)
        sigma_tot.append(sigmatotal)
        subhalo_id.append(int(SH['ID'][ll]))
        FOV.append(fov)

    fname = args["outbase"] + 'z_' + str(
        args["snapnum"]) + '/' + 'DM_' + label + '_' + str(comm_rank) + '.h5'
    hf = h5py.File(fname, 'w')
    hf.create_dataset('DMAP',
                      data=sigma_tot)  # density map in unit of simulation
    hf.create_dataset('HFID',
                      data=np.asarray(subhalo_id))  # Rockstar sub-&halo id
    hf.create_dataset(
        'FOV', data=np.asarray(FOV))  # field-of-view in units #[kpc, Mpc]
    #RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
    hf.close()
Beispiel #10
0
if 'pc' in Runit:

    ### Calculate the ESD profile

    config = {'min_sep': Rarcmin, 'max_sep': Rarcmax, 'nbins': Nbins,\
        'metric': 'Rlens', 'min_rpar': 0, 'verbose': 0}

    kg = treecorr.KGCorrelation(config)
    print('Rbins (pc):', Rarcmin, Rarcmax, Nbins)
    
    # Define the source redshift bins
    nZbins = 20
    
    Zlims = np.linspace(srcZmin, srcZmax, nZbins+1)
    Zbins = Zlims[0:-1] + np.diff(Zlims)/2.
    Dcbins = (cosmo.comoving_distance(Zbins).to('pc')).value
    print('Zbins:', Zlims, 'dZ:', (np.amax(Zlims)-np.amin(Zlims))/nZbins)
    
    Zmasks = [(Zlims[b] < srcZ) & (srcZ <= Zlims[b+1]) for b in np.arange(nZbins)]
    Ngals = [np.sum(Zmasks[b]) for b in np.arange(nZbins)]
    
    print('Ngalaxies:', Ngals, np.sum(Ngals), len(srcZ))
    
    lenscat_list = []
    srccat_list = []

    # For every source redshift bin...
    for b in np.arange(nZbins):

        # Select sources in the redshift bin
        binZ, binDc, binDa = Zbins[b], Dcbins[b], Dcbins[b]/(1+Zbins[b])
from glob import glob

from astropy import constants as const, units as u
from astropy.coordinates import SkyCoord
from collections import Counter
from astropy.cosmology import LambdaCDM, z_at_value

from matplotlib import pyplot as plt
from matplotlib.colors import LogNorm
from matplotlib import gridspec
from matplotlib import rc, rcParams

import modules_EG as utils

h, O_matter, O_lambda = [0.7, 0.25, 0.75]
cosmo = LambdaCDM(H0=h * 100, Om0=O_matter, Ode0=O_lambda)

# angular_diameter_distance

zlens = 0.2
Dlens = cosmo.comoving_distance(zlens) / (1 + zlens)
Dsat = (Dlens + 3 * u.Mpc)

Dsource = 2 * Dlens

Sigma_crit_lens = Dsource / (Dlens * (Dsource - Dlens))
Sigma_crit_sat = Dsource / (Dsat * (Dsource - Dsat))

print(Sigma_crit_lens)
print(Sigma_crit_sat)
def create_density_maps():
    # Get command line arguments
    args = {}
    if comm_rank == 0:
        print(':Registered %d processes' % comm_size)
        args["simdir"]       = sys.argv[1]
        args["hfdir"]        = sys.argv[2]
        args["lcdir"]        = sys.argv[3]
        args["ncells"]       = int(sys.argv[4])
        args["outbase"]      = sys.argv[5]
    args = comm.bcast(args)
    label = args["simdir"].split('/')[-2].split('_')[2]
   
    # Organize devision of Sub-&Halos over Processes on Proc. 0
    if comm_rank == 0:
        # Characteristics
        hflabel = whichhalofinder(args["lcdir"])

        # Load LightCone Contents
        lchdf = h5py.File(args["lcdir"], 'r')
        lcdfhalo = pd.DataFrame(
                {'HF_ID' : lchdf['Halo_Rockstar_ID'].value,
                 'ID' : lchdf['Halo_ID'].value,
                 'Halo_z' : lchdf['Halo_z'].value,
                 'snapnum' : lchdf['snapnum'].value,
                 #'Vrms' : lchdf['VelDisp'].value,
                 #'fov_Mpc' : lchdf['FOV'][:, 1],
                 ('HaloPosBox', 'X') : lchdf['HaloPosBox'][:, 0],
                 ('HaloPosBox', 'Y') : lchdf['HaloPosBox'][:, 1],
                 ('HaloPosBox', 'Z') : lchdf['HaloPosBox'][:, 2],})

        nhalo_per_snapshot = lcdfhalo.groupby('snapnum').count()['HF_ID']

        print('Number of Sub-&Halos in Snapshot:')
        print(nhalo_per_snapshot.values)
        print(np.sum(nhalo_per_snapshot.values))
        print(nhalo_per_snapshot.index.values)
        print(nhalo_per_snapshot.values[0])

        if nhalo_per_snapshot.values[0] > comm_size:
            hist_edges =  procdiv.histedges_equalN(lcdfhalo[('HaloPosBox', 'X')],
                                                   comm_size)
            SH = procdiv.cluster_subhalos(lcdfhalo['ID'].values,
                                          #lcdfhalo['Vrms'],
                                          #lcdfhalo['fov_Mpc'],
                                          lcdfhalo[('HaloPosBox', 'X')].values,
                                          lcdfhalo[('HaloPosBox', 'X')].values,
                                          lcdfhalo[('HaloPosBox', 'Y')].values,
                                          lcdfhalo[('HaloPosBox', 'Z')].values,
                                          hist_edges, comm_size)
            print('dict test', SH.keys())
        elif nhalo_per_snapshot.values[0] < comm_size:
            pass

        # Define Cosmology
        cosmo = LambdaCDM(H0=s.header.hubble*100,
                          Om0=s.header.omega_m,
                          Ode0=s.header.omega_l)
        cosmosim = {'omega_M_0' : s.header.omega_m,
                    'omega_lambda_0' : s.header.omega_l,
                    'omega_k_0' : 0.0,
                    'h' : s.header.hubble}
        redshift = s.header.redshift
        print(': Redshift: %f' % redshift)

        # Sort Sub-&Halos over Processes
        df = pd.read_csv(args["hfdir"]+'halos_%d.dat' % args["snapnum"],
                         sep='\s+', skiprows=16,
                         usecols=[0, 2, 4, 9, 10, 11],
                         names=['ID', 'Mvir', 'Vrms', 'X', 'Y', 'Z'])
        df = df[df['Mvir'] > 5e11]
        sh_id = df['ID'].values.astype('float64')
        sh_vrms = df['Vrms'].values.astype('float64')
        sh_x = df['X'].values.astype('float64')
        sh_y = df['Y'].values.astype('float64')
        sh_z = df['Z'].values.astype('float64')
        del df
        hist_edges =  procdiv.histedges_equalN(sh_x, comm_size)
        SH = cluster_subhalos(sh_id, sh_vrms, sh_x, sh_y, sh_z, hist_edges, comm_size)
      
        # Load simulation
        s = read_hdf5.snapshot(45, args["simdir"])
        s.read(["Coordinates", "Masses", "GFM_StellarFormationTime"],
               parttype=[0, 1, 4])
        scale = 1e-3*s.header.hubble
        
        # Calculate overlap for particle cuboids
        c = (const.c).to_value('km/s')
        fov_rad = 4*np.pi*(np.percentile(sh_vrms, 90)/c)**2
        sh_dist = (cosmo.comoving_distance(redshift)).to_value('Mpc')
        alpha = 6  # multiplied by 4 because of Oguri&Marshall
        overlap = 0.5*alpha*fov_rad*sh_dist  #[Mpc] half of field-of-view
        print('Cuboids overlap is: %f [Mpc]' % overlap)

        # Sort Particles over Processes
        ## Dark Matter
        dm_mass = (s.data['Masses']['dm']).astype('float64')
        dm_x = (s.data['Coordinates']['dm'][:, 0]*scale).astype('float64')
        dm_y = (s.data['Coordinates']['dm'][:, 1]*scale).astype('float64')
        dm_z = (s.data['Coordinates']['dm'][:, 2]*scale).astype('float64')
        dm_mass, dm_x, dm_y, dm_z, dm_split_size_1d, dm_split_disp_1d = cluster_particles(
                dm_mass, dm_x, dm_y, dm_z, hist_edges, comm_size)
        ## Gas
        gas_mass = (s.data['Masses']['gas']).astype('float64')
        gas_x = (s.data['Coordinates']['gas'][:, 0]*scale).astype('float64')
        gas_y = (s.data['Coordinates']['gas'][:, 1]*scale).astype('float64')
        gas_z = (s.data['Coordinates']['gas'][:, 2]*scale).astype('float64')
        gas_mass, gas_x, gas_y, gas_z, gas_split_size_1d, gas_split_disp_1d = cluster_particles(gas_mass, gas_x, gas_y, gas_z, hist_edges, comm_size)
        ## Stars
        star_mass = (s.data['Masses']['stars']).astype('float64')
        star_x = (s.data['Coordinates']['stars'][:, 0]*scale).astype('float64')
        star_y = (s.data['Coordinates']['stars'][:, 1]*scale).astype('float64')
        star_z = (s.data['Coordinates']['stars'][:, 2]*scale).astype('float64')
        star_age = s.data['GFM_StellarFormationTime']['stars']
        star_x = star_x[star_age >= 0]  #[Mpc]
        star_y = star_y[star_age >= 0]  #[Mpc]
        star_z = star_z[star_age >= 0]  #[Mpc]
        star_mass = star_mass[star_age >= 0]
        del star_age
        star_mass, star_x, star_y, star_z, star_split_size_1d, star_split_disp_1d = cluster_particles(star_mass, star_x, star_y, star_z, hist_edges, comm_size)

    else:
        c=None; alpha=None; overlap=None
        cosmosim=None; cosmo=None; redshift=None; hist_edges=None;
        SH=None;
        dm_mass=None; dm_x=None; dm_y=None; dm_z=None
        dm_split_size_1d=None; dm_split_disp_1d=None
        gas_mass=None; gas_x=None; gas_y=None; gas_z=None
        gas_split_size_1d=None; gas_split_disp_1d=None
        star_mass=None; star_x=None; star_y=None; star_z=None
        star_split_size_1d=None; star_split_disp_1d=None
      
    # Broadcast variables over all processors
    sh_split_size_1d = comm.bcast(SH['sh_split_size_1d'], root=0)
    sh_split_disp_1d = comm.bcast(SH['sh_split_disp_1d'], root=0)
    dm_split_size_1d = comm.bcast(dm_split_size_1d, root=0)
    dm_split_disp_1d = comm.bcast(dm_split_disp_1d, root=0)
    gas_split_size_1d = comm.bcast(gas_split_size_1d, root=0)
    gas_split_disp_1d = comm.bcast(gas_split_disp_1d, root=0)
    star_split_size_1d = comm.bcast(star_split_size_1d, root=0)
    star_split_disp_1d = comm.bcast(star_split_disp_1d, root=0)
    c = comm.bcast(c, root=0)
    alpha = comm.bcast(alpha, root=0)
    overlap = comm.bcast(overlap, root=0)
    cosmo = comm.bcast(cosmo, root=0)
    redshift = comm.bcast(redshift, root=0)
    hist_edges = comm.bcast(hist_edges, root=0)

    # Initiliaze variables for each processor
    sh_id_local = np.zeros((int(sh_split_size_1d[comm_rank])))
    sh_vrms_local = np.zeros((int(sh_split_size_1d[comm_rank])))
    sh_x_local = np.zeros((int(sh_split_size_1d[comm_rank])))
    sh_y_local = np.zeros((int(sh_split_size_1d[comm_rank])))
    sh_z_local = np.zeros((int(sh_split_size_1d[comm_rank])))
    dm_mass_local = np.zeros((int(dm_split_size_1d[comm_rank])))
    dm_x_local = np.zeros((int(dm_split_size_1d[comm_rank])))
    dm_y_local = np.zeros((int(dm_split_size_1d[comm_rank])))
    dm_z_local = np.zeros((int(dm_split_size_1d[comm_rank])))
    gas_mass_local = np.zeros((int(gas_split_size_1d[comm_rank])))
    gas_x_local = np.zeros((int(gas_split_size_1d[comm_rank])))
    gas_y_local = np.zeros((int(gas_split_size_1d[comm_rank])))
    gas_z_local = np.zeros((int(gas_split_size_1d[comm_rank])))
    star_mass_local = np.zeros((int(star_split_size_1d[comm_rank])))
    star_x_local = np.zeros((int(star_split_size_1d[comm_rank])))
    star_y_local = np.zeros((int(star_split_size_1d[comm_rank])))
    star_z_local = np.zeros((int(star_split_size_1d[comm_rank])))
def com_dist(z):
    obj = LambdaCDM(H0=69.6, Om0=0.286, Ode0=1 - 0.286)
    dist = obj.comoving_distance(z).value  # MPC
    return dist
ax = plt.subplot(111)
for ii, name in enumerate(data_struct.keys()):
    parts = plt.violinplot(data_struct[name]["dist"],[np.log10(data_struct[name]["redshift"])])
    for partname in ('cbars','cmins','cmaxes'):
        vp = parts[partname]
        vp.set_edgecolor(color1)
        vp.set_linewidth(1)
    for pc in parts['bodies']:
        pc.set_facecolor(color1)
        pc.set_edgecolor(color1)

cosmo = LambdaCDM(H0=H0_best, Om0=Om0_best, Ode0=Ode0_best)
print(H0_best, Om0_best, Ode0_best)

redshifts = np.logspace(-3,1,100)
plt.plot(np.log10(redshifts), cosmo.comoving_distance(redshifts).value, '--', color=color2, label=r'$\Lambda_\mathrm{CDM}$')
plt.xlabel(r'$\log_{10}$ Redshift')
plt.ylabel('Distance [Mpc]')
plt.xlim([-2.5,-0.5])
plt.ylim([0,2500])
#plt.xlim([-2.5,1.0])
plt.ylim([0,8000])
plt.grid(True)
plt.show()
plotName = os.path.join(plotDir,'redshift.pdf')
plt.savefig(plotName)
plt.close()

fig = plt.figure(figsize=(9,6))
gs = gridspec.GridSpec(4, 1)
ax1 = fig.add_subplot(gs[0:3, 0])
extinction=table.field('EXTINCTION')
extinction_r=extinction[:,3]
flux=table.field('MODELFLUX')
flux_r=flux[:,3]

M_g=Calculate_Magnitude(flux_r,extinction_r,redshift)

array=np.column_stack((ra,dec,redshift,fibcol,poly,M_g))
array=array[ (array[:,2] > 0.43) & (array[:,2] < 0.55)  ]

nden_target=0.00013
sky_coverage=2.036048 #in rad
percent_sky=sky_coverage / ( 4 * np.pi )


big_distance=cosmo.comoving_distance(np.max(array[:,2]))
small_distance=cosmo.comoving_distance(np.min(array[:,2]))



max_volume=4./3. * np.pi * cosmo.comoving_distance(np.max(array[:,2]))**3
min_volume=4./3. * np.pi * cosmo.comoving_distance(np.min(array[:,2]))**3

volume= percent_sky * (max_volume-min_volume)



number_of_galaxies=nden_target*volume

print number_of_galaxies
Beispiel #16
0
from astropy import constants as const, units as u
from astropy.cosmology import LambdaCDM
from astropy.io import ascii, fits as pyfits

O_matter, O_lambda, h = [0.315, 0.685, 0.7]
cosmo = LambdaCDM(H0=h * 100., Om0=O_matter, Ode0=O_lambda)

#path_lenscat = '/data2/brouwer/KidsCatalogues/KV450_Lephare_Masked_trim_ZBlt0p6.fits'
path_lenscat = '/data2/brouwer/MergedCatalogues/GAMACatalogue_2.0.fits'

lenscat = pyfits.open(path_lenscat, ignore_missing_end=True)[1].data
galZlist = np.array(lenscat['Z'])

galZbins = np.sort(
    np.unique(galZlist))  # Find and sort the unique redshift values
Dclbins = np.array((cosmo.comoving_distance(galZbins).to('pc')
                    ).value)  # Calculate the corresponding distances
Dcllist = Dclbins[np.digitize(galZlist, galZbins) -
                  1]  # Assign the appropriate Dcl to all lens redshifts

print('Zbins:', galZbins)
print('Dclbins:', Dclbins)

try:
    # Testing whether new-old distances gives zero
    Dcllist_old = np.array((cosmo.comoving_distance(galZlist).to('pc')).value)
    print('Null test:', np.sum(Dcllist - Dcllist_old))
except:
    'No null test possible'
Beispiel #17
0
nt = (n+u)

print(gt)
print(g1)
print(crit)
# plt.hist(crit, 50)
# plt.hist(g1, 50)
plt.hist(gt, 1000)
plt.show()



exit()

cos = LambdaCDM(H0=70, Om0=0.31, Ode0=0.69)
r = cos.comoving_distance(0.4743854)
r1 = 2.99792458*1e5/70*0.4198876
print(r*0.7,r1*0.7)
exit()
def psf(flux, psf_scale, size, ellip, theta):
    my, mx = numpy.mgrid[0:size, 0:size] - size/2.
    r_scale_sq = 9
    m = 3.5

    rot_1 = numpy.cos(theta)
    rot_2 = numpy.sin(theta)
    q = (1+ellip)/(1-ellip)

    # mx_r = mx*r1 + my*r2
    # my_r = -mx*r2 + my*r1
Beispiel #18
0
from astropy.cosmology import LambdaCDM

h, O_matter, O_lambda = [0.7, 0.29, 0.71]
cosmo = LambdaCDM(H0=h * 100, Om0=O_matter, Ode0=O_lambda)
troughZ = np.array([0.1539674, 0.24719192, 0.33112174, 0.42836386])
am_to_rad = np.pi / (60. * 180.)

data = np.load(
    '/data2/brouwer/shearprofile/trough_results_final/slics_mockZ_nomask/Redshift_bins_covariance.npy'
)
data_x = np.array([np.array(data[0][x + 1][0]) for x in range(len(data[0]))])

#data_R = np.array([ data_x[x]*am_to_rad * (cosmo.angular_diameter_distance(troughZ[x]).to('Mpc')).value \
#    for x in range(len(data_x))])

data_R = np.array([ data_x[x]*am_to_rad * (cosmo.comoving_distance(troughZ[x]).to('Mpc')).value \
    for x in range(len(data_x))])

covariance_tot = np.array((data[2][1]).values())

print(covariance_tot[0])
print()
print(data[2][1][0])

#print(data_cov)
#print(data_y)

print(np.shape(data))

# 0: R-bins
# 1: ESD profiles, 1-4: Redshifts, 0-9: Percentiles
Beispiel #19
0
    print()
    print('Diff. Fraction Z:', np.mean(diff_Z/(1.+Z_gama_matched)))
    print('Stand. Dev. Z:', std_diff_Z)
    print( 'std(z)/(1+z):', std_diff_Z / (1.+np.mean(Z_kids_matched)) )

    # Stellar mass offset and standard deviation between KiDS and GAMA
    diff_logmstar = (logmstar_kids_matched-logmstar_gama_matched)
    std_diff_logmstar = np.std(diff_logmstar)
    print()
    print('Diff. Mstar:', np.mean(diff_logmstar))
    print('Stand. Dev. Mstar:', std_diff_logmstar)
    print('std(M)/M:', np.std( \
        10.**logmstar_kids_matched-10.**logmstar_gama_matched)/np.mean(10.**logmstar_gama_matched))

    # Estimate the mass uncertainty caused by the redshift uncertainty
    dist_kids_matched = (1.+Z_kids_matched) * cosmo.comoving_distance(Z_kids_matched)
    dist_gama_matched = (1.+Z_gama_matched) * cosmo.comoving_distance(Z_gama_matched)
    std_diff_dist = np.std(dist_kids_matched - dist_gama_matched)
    std_diff_lum = np.std(dist_kids_matched**2 - dist_gama_matched**2)
    print()
    print('dD/D:', std_diff_dist / np.mean(dist_gama_matched) )
    print('L+dL/L = %s dex'%(np.log10(std_diff_lum/np.mean(dist_gama_matched**2)+1.)) )

    # Estimate the mass uncertainty caused by the magnitude uncertainty
    diff_rmag = rmag_kids_sersic[Zmask_kids*magmask_kids] - rmag_kids_auto[Zmask_kids*magmask_kids]
    
    std_ratio_flux = 10.**(0.4*np.std(diff_rmag))
    mean_ratio_flux = 10.**(0.4*np.mean(diff_rmag))
    
    print()
    print('Mag offset: mean(dm)=', np.mean(diff_rmag))
def create_density_maps():
    time_start = time.time()
    # Get command line arguments
    args = {}
    if comm_rank == 0:
        args["simdir"] = sys.argv[1]
        args["hfdir"] = sys.argv[2]
        args["lcdir"] = sys.argv[3]
        args["ncells"] = int(sys.argv[4])
        args["walltime"] = int(sys.argv[5])
        args["outbase"] = sys.argv[6]
    args = comm.bcast(args, root=0)
    label = args["simdir"].split('/')[-2].split('_')[2]
    hflabel = whichhalofinder(args["lcdir"])

    # Load LightCone Contents
    if comm_rank == 0:
        lchdf = h5py.File(args["lcdir"], 'r')
        dfhalo = pd.DataFrame({
            'HF_ID': lchdf['Halo_Rockstar_ID'].value,
            'ID': lchdf['Halo_ID'].value,
            'Halo_z': lchdf['Halo_z'].value,
            'snapnum': lchdf['snapnum'].value,
            'Vrms': lchdf['VelDisp'].value,
            'fov_Mpc': lchdf['FOV'][:][1],
            ('HaloPosBox', 'X'): lchdf['HaloPosBox'][:, 0],
            ('HaloPosBox', 'Y'): lchdf['HaloPosBox'][:, 1],
            ('HaloPosBox', 'Z'): lchdf['HaloPosBox'][:, 2]
        })
        nhalo_per_snapshot = dfhalo.groupby('snapnum').count()['HF_ID']
        snapshots = dfhalo.groupby('snapnum').count().index.values
        dfhalo = dfhalo.sort_values(by=['snapnum'])
    else:
        nhalo_per_snapshot = None
    nhalo_per_snapshot = comm.bcast(nhalo_per_snapshot, root=0)

    sigma_tot = []
    out_hfid = []
    out_lcid = []
    out_redshift = []
    out_snapshot = []
    out_vrms = []
    out_fov = []
    ## Run over Snapshots
    for ss in range(len(nhalo_per_snapshot))[-2:]:
        print('Snapshot %d of %d' % (ss, len(nhalo_per_snapshot)))

        if comm_rank == 0:
            dfhalosnap = dfhalo.loc[dfhalo['snapnum'] == snapshots[ss]]
            # Load simulation
            s = read_hdf5.snapshot(snapshots[ss], args["simdir"])
            s.read(["Coordinates", "Masses", "GFM_StellarFormationTime"],
                   parttype=[0, 1, 4, 5])
            scale = 1e-3 * s.header.hubble
            cosmo = LambdaCDM(H0=s.header.hubble * 100,
                              Om0=s.header.omega_m,
                              Ode0=s.header.omega_l)
            print(': Redshift: %f' % s.header.redshift)

            sh_hfid = dfhalosnap['HF_ID'].values
            sh_id = dfhalosnap['ID'].values
            sh_red = dfhalosnap['Halo_z'].values
            sh_snap = dfhalosnap['snapnum'].values
            sh_vrms = dfhalosnap['Vrms'].values
            sh_fov = dfhalosnap['fov_Mpc'].values
            sh_x = dfhalosnap[('HaloPosBox', 'X')].values
            sh_y = dfhalosnap[('HaloPosBox', 'Y')].values
            sh_z = dfhalosnap[('HaloPosBox', 'Z')].values
            hist_edges = procdiv.histedges_equalN(sh_x, comm_size)
            SH = procdiv.cluster_subhalos_lc(sh_hfid, sh_id, sh_red, sh_snap,
                                             sh_vrms, sh_fov, sh_x, sh_y, sh_z,
                                             hist_edges, comm_size)

            ## Dark Matter
            DM = {
                'Mass': (s.data['Masses']['dm']).astype('float64'),
                'Pos': (s.data['Coordinates']['dm'] * scale).astype('float64')
            }
            ## Gas
            Gas = {
                'Mass': (s.data['Masses']['gas']).astype('float64'),
                'Pos': (s.data['Coordinates']['gas'] * scale).astype('float64')
            }
            ## Stars
            age = (
                s.data['GFM_StellarFormationTime']['stars']).astype('float64')
            Star = {
                'Mass':
                (s.data['Masses']['stars'][age >= 0]).astype('float64'),
                'Pos': (s.data['Coordinates']['stars'][age >= 0, :] *
                        scale).astype('float64')
            }
            ## BH
            BH = {
                'Mass': (s.data['Masses']['bh']).astype('float64'),
                'Pos': (s.data['Coordinates']['bh'] * scale).astype('float64')
            }

            # Calculate overlap for particle cuboids
            c = (const.c).to_value('km/s')
            fov_rad = 4 * np.pi * (np.percentile(SH['Vrms'], 90) / c)**2
            sh_dist = (cosmo.comoving_distance(
                s.header.redshift)).to_value('Mpc')
            alpha = 6  # multiplied by 4 because of Oguri&Marshall
            overlap = 0.5 * alpha * fov_rad * sh_dist  #[Mpc] half of field-of-view
            print('Cuboids overlap is: %f [Mpc]' % overlap)

            DM = procdiv.cluster_particles(DM, hist_edges, comm_size)
            Gas = procdiv.cluster_particles(Gas, hist_edges, comm_size)
            Star = procdiv.cluster_particles(Star, hist_edges, comm_size)
            BH = procdiv.cluster_particles(BH, hist_edges, comm_size)
        else:
            overlap = None
            hist_edges = None
            SH = {
                'HF_ID': None,
                'ID': None,
                'redshift': None,
                'snapshot': None,
                'Vrms': None,
                'fov_Mpc': None,
                'X': None,
                'Y': None,
                'Z': None,
                'split_size_1d': None,
                'split_disp_1d': None
            }
            DM = {
                'Mass': None,
                'X': None,
                'Y': None,
                'Z': None,
                'split_size_1d': None,
                'split_disp_1d': None
            }
            Gas = {
                'Mass': None,
                'X': None,
                'Y': None,
                'Z': None,
                'split_size_1d': None,
                'split_disp_1d': None
            }
            Star = {
                'Mass': None,
                'X': None,
                'Y': None,
                'Z': None,
                'split_size_1d': None,
                'split_disp_1d': None
            }
            BH = {
                'Mass': None,
                'X': None,
                'Y': None,
                'Z': None,
                'split_size_1d': None,
                'split_disp_1d': None
            }
        # Broadcast variables over all processors
        overlap = comm.bcast(overlap, root=0)
        hist_edges = comm.bcast(hist_edges, root=0)
        sh_split_size_1d = comm.bcast(SH['split_size_1d'], root=0)
        dm_split_size_1d = comm.bcast(DM['split_size_1d'], root=0)
        gas_split_size_1d = comm.bcast(Gas['split_size_1d'], root=0)
        star_split_size_1d = comm.bcast(Star['split_size_1d'], root=0)
        bh_split_size_1d = comm.bcast(BH['split_size_1d'], root=0)

        SH = procdiv.scatter_subhalos_lc(SH,
                                         sh_split_size_1d,
                                         comm_rank,
                                         comm,
                                         root_proc=0)
        DM = procdiv.scatter_particles(DM,
                                       dm_split_size_1d,
                                       comm_rank,
                                       comm,
                                       root_proc=0)
        Gas = procdiv.scatter_particles(Gas,
                                        gas_split_size_1d,
                                        comm_rank,
                                        comm,
                                        root_proc=0)
        Star = procdiv.scatter_particles(Star,
                                         star_split_size_1d,
                                         comm_rank,
                                         comm,
                                         root_proc=0)
        BH = procdiv.scatter_particles(BH,
                                       bh_split_size_1d,
                                       comm_rank,
                                       comm,
                                       root_proc=0)

        print(
            ': Proc. %d got: \n\t %d Sub-&Halos \n\t %d dark matter \n\t %d gas \n\t %d stars \n'
            % (comm_rank, int(
                sh_split_size_1d[comm_rank]), int(dm_split_size_1d[comm_rank]),
               int(gas_split_size_1d[comm_rank]),
               int(star_split_size_1d[comm_rank])))

        ## Run over Sub-&Halos
        for ll in range(len(SH['ID'])):
            print('Lens %d' % (ll))
            #TODO: for z=0 sh_dist=0!!!

            smlpixel = 20  # maximum smoothing pixel length
            ## BH
            pos, indx = dmaps.select_particles(
                BH['Pos'],
                SH['Pos'][ll],  #*a/h,
                SH['fov_Mpc'][ll],
                'box')
            bh_sigma = dmaps.projected_density_pmesh_adaptive(
                pos,
                BH['Mass'][indx],
                SH['fov_Mpc'][ll],
                args["ncells"],
                hmax=smlpixel)
            ## Star
            pos, indx = dmaps.select_particles(
                Star['Pos'],
                SH['Pos'][ll],  #*a/h,
                SH['fov_Mpc'][ll],
                'box')
            star_sigma = dmaps.projected_density_pmesh_adaptive(
                pos,
                Star['Mass'][indx],
                SH['fov_Mpc'][ll],
                args["ncells"],
                hmax=smlpixel)
            ## Gas
            pos, indx = dmaps.select_particles(
                Gas['Pos'],
                SH['Pos'][ll],  #*a/h
                SH['fov_Mpc'][ll],
                'box')
            gas_sigma = dmaps.projected_density_pmesh_adaptive(
                pos,
                Gas['Mass'][indx],
                SH['fov_Mpc'][ll],
                args["ncells"],
                hmax=smlpixel)
            ## DM
            pos, indx = dmaps.select_particles(
                DM['Pos'],
                SH['Pos'][ll],  #*a/h
                SH['fov_Mpc'][ll],
                'box')
            dm_sigma = dmaps.projected_density_pmesh_adaptive(
                pos,
                DM['Mass'][indx],
                SH['fov_Mpc'][ll],  #[Mpc]
                args["ncells"],
                hmax=smlpixel)
            sigmatotal = dm_sigma + gas_sigma + star_sigma + bh_sigma

            # Make sure that density-map if filled
            while 0.0 in sigmatotal:
                smlpixel += 5
                dm_sigma = dmaps.projected_density_pmesh_adaptive(
                    pos,
                    DM['Mass'][indx],
                    SH['fov_Mpc'][ll],  #[Mpc]
                    args["ncells"],
                    hmax=smlpixel)
                sigmatotal = dm_sigma + gas_sigma + star_sigma + bh_sigma

            #tmap.plotting(sigmatotal, args["ncells"],
            #              SH['fov_Mpc'][ll], SH['redshift'][ll])
            sigma_tot.append(sigmatotal)
            out_hfid.append(SH['HF_ID'][ll])
            out_lcid.append(SH['ID'][ll])
            out_redshift.append(SH['redshift'][ll])
            out_snapshot.append(SH['snapshot'][ll])
            out_vrms.append(SH['Vrms'][ll])
            out_fov.append(SH['fov_Mpc'][ll])
            if args["walltime"] - (time_start - time.time()) / (60 *
                                                                60) < 0.25:
                fname = args["outbase"] + 'DM_' + label + '_lc.h5'
                hf = h5py.File(fname, 'w')
                hf.create_dataset('density_map', data=sigma_tot)
                hf.create_dataset('HF_ID', data=np.asarray(out_hfid))
                hf.create_dataset('LC_ID', data=np.asarray(out_lcid))
                hf.create_dataset('redshift', data=np.asarray(out_redshift))
                hf.create_dataset('snapshot', data=np.asarray(out_snapshot))
                hf.create_dataset('Vrms', data=np.asarray(out_vrms))
                hf.create_dataset('fov_Mpc', data=np.asarray(out_fov))
                hf.close()

    # Gather the Results
    #comm.Barrier()
    #comm.Gather(out_hfid, [rootout_hfid,split_sizes,displacements,MPI.DOUBLE], root=0)

    fname = args["outbase"] + 'DM_' + label + '_lc.h5'
    hf = h5py.File(fname, 'w')
    hf.create_dataset('density_map', data=sigma_tot)
    hf.create_dataset('HF_ID', data=np.asarray(out_hfid))
    hf.create_dataset('LC_ID', data=np.asarray(out_lcid))
    hf.create_dataset('redshift', data=np.asarray(out_redshift))
    hf.create_dataset('snapshot', data=np.asarray(out_snapshot))
    hf.create_dataset('Vrms', data=np.asarray(out_vrms))
    hf.create_dataset('fov_Mpc', data=np.asarray(out_fov))
    #RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
    hf.close()
Beispiel #21
0
Omega0 = 0.302
OmegaLambda = 0.698
OmegaBaryon = 0.04751
HubbleParam = 0.68

gadget_length_in_cm = Kpc / HubbleParam
gadget_mass_in_g = 1e10 * Msun / HubbleParam
gadget_velocity_in_cm_per_s = 1e5
gadget_time_in_s = gadget_length_in_cm / gadget_velocity_in_cm_per_s
gadget_energy_in_erg = gadget_mass_in_g * gadget_length_in_cm**2 / (
    gadget_time_in_s**2)

cosmology = LambdaCDM(H0=HubbleParam * 100, Om0=Omega0, Ode0=OmegaLambda)

D_c = lambda z: cosmology.comoving_distance(z).cgs.value
D_a = lambda z: cosmology.angular_diameter_distance(z).cgs.value
D_l = lambda z: cosmology.luminosity_distance(z).cgs.value

rho_crit = lambda z: cosmology.critical_density(z).cgs.value
rho_crit0 = cosmology.critical_density0.cgs.value

rho_bar_crit = lambda z: rho_crit(z) * OmegaBaryon
rho_bar_crit0 = rho_crit0 * OmegaBaryon

n_p_crit = lambda z: rho_bar_crit(z) / m_p * Xh
n_p_crit0 = rho_bar_crit0 / m_p * Xh

n_e_crit = lambda z: n_p_crit(z) * elec_frac
n_e_crit0 = n_p_crit0 * elec_frac
Beispiel #22
0
# Create LCDM cosmology
cosmo = LambdaCDM(H0=h * 100., Om0=O_matter, Ode0=O_lambda)

## Defining the lens and source samples


# Creating a normalized gaussian distribution
def calc_gaussian(x, mu, sigma):
    a = 1 / (sigma * np.sqrt(2 * np.pi))
    gaussian = a * np.exp(-0.5 * ((x - mu) / sigma)**2.)
    return gaussian


# Source redshift/distance bins
zsbins = np.linspace(0.05, 1.2, 70)
Dcsbins = (cosmo.comoving_distance(zsbins).to('pc')).value

# Source redshift PDF
srcZ = 0.6
srcSigma = 0.2
srcPZ = calc_gaussian(zsbins, srcZ, srcSigma)

# Lens redshifts
galZlist = np.linspace(0.2, 0.5, 10)
Dcls = (cosmo.comoving_distance(galZlist).to('pc')).value
Dals = Dcls / (1. + galZlist)

# Redshift barrier
z_epsilon = 0.2
Dc_epsilon = (cosmo.comoving_distance(z_epsilon).to('pc')).value
Beispiel #23
0
cosmo = LambdaCDM(H0=h * 100., Om0=O_matter, Ode0=O_lambda)


# Creating a normalized gaussian distribution
def calc_gaussian(x, mu, sigma):
    a = 1 / (sigma * np.sqrt(2 * np.pi))
    gaussian = a * np.exp(-0.5 * ((x - mu) / sigma)**2.)
    return gaussian


## Defining the lens and source samples

# Source redshift/distance bins
zsbins = np.linspace(0.05, 1.2, 70)
dZs = np.diff(zsbins)[0]
Dcsbins = (cosmo.comoving_distance(zsbins).to('pc')).value

# Source redshift PDF
srcZ = 0.6
srcSigma = 0.2
srcPZ = calc_gaussian(zsbins, srcZ, srcSigma)

# Lens redshift/distance bins
zlensbins = np.linspace(0.05, 0.7, 100)
dZl = np.diff(zlensbins)[0]
Dclbins = (cosmo.comoving_distance(zlensbins).to('pc')).value
Dalbins = Dclbins / (1 + zlensbins)

# Lens redshift PDF's
galZlist = np.linspace(0.2, 0.5, 10)
galSigma = [0.026] * len(galZlist)  #0.021*(1+galZlist) # 0.026
Beispiel #24
0
def apply_rsd(mock_catalog):
    """
    Applies redshift-space distortions

    Parameters
    ----------
    mock_catalog: Pandas dataframe
        Galaxy catalog

    Returns
    ---------
    mock_catalog: Pandas dataframe
        Mock catalog with redshift-space distortions now applied and
        ra,dec,rsd positions and velocity information added
    """

    ngal = len(mock_catalog)
    speed_c = 3 * 10**5  #km/s
    z_min = 0
    z_max = 0.5
    dz = 10**-3
    H0 = 100
    omega_m = 0.25
    omega_b = 0.04
    Tcmb0 = 2.7255

    redshift_arr = np.arange(z_min, z_max, dz)
    cosmo = LambdaCDM(H0, omega_m, omega_b, Tcmb0)
    como_dist = cosmo.comoving_distance(redshift_arr)
    comodist_z_interp = interp1d(como_dist, redshift_arr)

    cart_gals = mock_catalog[['x', 'y', 'z']].values  #Mpc/h
    vel_gals = mock_catalog[['vx', 'vy', 'vz']].values  #km/s

    dist_from_obs_arr = np.zeros(ngal)
    ra_arr = np.zeros(ngal)
    dec_arr = np.zeros(ngal)
    cz_arr = np.zeros(ngal)
    cz_nodist_arr = np.zeros(ngal)
    vel_tan_arr = np.zeros(ngal)
    vel_tot_arr = np.zeros(ngal)
    vel_pec_arr = np.zeros(ngal)
    for x in tqdm(range(ngal)):
        dist_from_obs = (np.sum(cart_gals[x]**2))**.5
        z_cosm = comodist_z_interp(dist_from_obs)
        cz_cosm = speed_c * z_cosm
        cz_val = cz_cosm
        ra, dec = cart_to_spherical_coords(cart_gals[x], dist_from_obs)
        vr = np.dot(cart_gals[x], vel_gals[x]) / dist_from_obs
        #this cz includes hubble flow and peculiar motion
        cz_val += vr * (1 + z_cosm)
        vel_tot = (np.sum(vel_gals[x]**2))**.5
        vel_tan = (vel_tot**2 - vr**2)**.5
        vel_pec = (cz_val - cz_cosm) / (1 + z_cosm)
        dist_from_obs_arr[x] = dist_from_obs
        ra_arr[x] = ra
        dec_arr[x] = dec
        cz_arr[x] = cz_val
        cz_nodist_arr[x] = cz_cosm
        vel_tot_arr[x] = vel_tot
        vel_tan_arr[x] = vel_tan
        vel_pec_arr[x] = vel_pec

    mock_catalog['r_dist'] = dist_from_obs_arr
    mock_catalog['ra'] = ra_arr
    mock_catalog['dec'] = dec_arr
    mock_catalog['cz'] = cz_arr
    mock_catalog['cz_nodist'] = cz_nodist_arr
    mock_catalog['vel_tot'] = vel_tot_arr
    mock_catalog['vel_tan'] = vel_tan_arr
    mock_catalog['vel_pec'] = vel_pec_arr

    return mock_catalog
Beispiel #25
0
                           widths=0.02)
    for partname in ('cbars', 'cmins', 'cmaxes'):
        vp = parts[partname]
        vp.set_edgecolor(color2)
        vp.set_linewidth(1)
    for pc in parts['bodies']:
        pc.set_facecolor(color2)
        pc.set_edgecolor(color2)

    if ii == 0:
        labels.append((matplotlib.patches.Patch(color=color2), "Bulla"))

redshifts = np.logspace(-3, 1, 100)
line = plt.plot(
    redshifts,
    5.0 * (np.log10(cosmo.comoving_distance(redshifts).value * 1e6) - 1.0),
    '--',
    color=color3,
    label=r'$\Lambda_\mathrm{CDM}$')
#labels.append(line)
labels.append(
    (matplotlib.lines.Line2D([-100, 100], [200, 200],
                             color=color3,
                             linestyle="--"), r'$\Lambda_\mathrm{CDM}$'))

#plt.xlabel(r'Redshift')
plt.ylabel('Distance Mod. [mag]')
plt.xlim([-0.01, 0.2])
plt.legend(*zip(*labels), loc=2)
#plt.ylim([0,2500])
#plt.xlim([-2.5,1.0])
Beispiel #26
0
    z = (1.0 - scale_factor) / scale_factor

    remove = np.where(z > 10.0)

    z = np.delete(z, remove)
    masses = np.delete(masses, remove, axis=1)

    remove1 = np.where(masses < 1e3)

    masses = np.delete(masses, remove1, axis=1)
    z = np.delete(z, remove1)

    cosmo = LambdaCDM(WMAP9.H(0.0).value, WMAP9.Om(0.0), 1.0 - WMAP9.Om(0.0))

    comoving_distance = cosmo.comoving_distance(z).value * 1e6 * 3.086e16

    f.close()

    #perform kernel density estimate
    #values = np.vstack([masses[0],masses[1],comoving_distance])
    kernel_generate = stats.gaussian_kde(comoving_distance)
    kernel = stats.gaussian_kde(
        np.vstack([comoving_distance, masses[0], masses[1]]))

    boundary = cosmo.comoving_distance(10.0)

    comoving_volume_cube = 106.5**3.0  #Mpc
    comoving_volume_boundary = 4.0 / 3.0 * pi * boundary.value**3.0

    generate_counts = int(
Beispiel #27
0
galRA, galDEC, galZ, rmag, rmag_abs = utils.import_gamacat(
    path_gamacat, gamacatname)

galmask = (rmag_abs < -21.) & (galZ < zmax)
galRA, galDEC, galZ, rmag, rmag_abs = galRA[galmask], galDEC[galmask], galZ[
    galmask], rmag[galmask], rmag_abs[galmask]

# Calculating the number of galaxies...
Ngals = np.array([np.sum(galZ < zbins[z])
                  for z in range(len(zbins))])  # below each redshift bin
Ngals_high = len(galZ) - Ngals  # above each redshift bin

# Calculating the volume of the cone at each redshift bin
cosmo = LambdaCDM(H0=70., Om0=0.315, Ode0=0.685)

Dcbins = (cosmo.comoving_distance(zbins).to('Mpc')
          ).value  # Comoving distance to each bin limit
Mpc_am = (cosmo.kpc_comoving_per_arcmin(zbins).to('Mpc/arcmin')
          ).value  # Comoving distance per arcmin at each bin limit
areabins = np.pi * (
    theta * Mpc_am)**2.  # Comoving area of the circle at each bin limit
#covolbins = cosmo.comoving_volume(zbins).to('kpc3').value

covolbins = 1. / 3. * areabins * Dcbins  # Comoving cone volume below each bin limit
covolbins_high = covolbins[
    -1] - covolbins  # Comoving cone volume above each bin limit

density = Ngals / covolbins  # Density below the redshift limit
density_high = Ngals_high / covolbins_high  # Density above the redshift limit

densmask = (0.2 < zbins) & (zbins < 0.25)
Beispiel #28
0
        masktype = 'nomask-%g' % ijnum
        i, j = ijlist[ij]
        thetanum = 0
    else:
        masktype = 'nomask'
        thetanum = ij
    if 'miceZ' in selection:
        masktype = 'nomask-Z'

    # Import trough catalog
    path_troughcat = '/data2/brouwer/MergedCatalogues/trough_catalogs'
    troughcatname = 'trough_catalog_%s_%s_%s.fits' % (cat, selection, masktype)
    troughRA, troughDEC, troughZ, paramlists = utils.import_troughcat(
        path_troughcat, troughcatname, [])
    troughZ = troughZ[0]
    troughDc = (cosmo.comoving_distance(troughZ).to('pc')).value
    troughDa = troughDc / (1 + troughZ)

    print('Troughs:', len(troughRA))

    # Weights of the troughs
    troughweights = np.ones(len(troughRA))

    # Redshift samples
    if 'lowZ' in selection:
        thetalist = np.array([10.])
    if 'highZ' in selection:
        thetalist = np.array([6.303])

    if 'miceZ' in selection:
        thetalist = np.array([20., 12.85, 9.45, 7.44, 6.14])  # Dc
Beispiel #29
0
mi_h = halos.i_des_true - 0.8 * (np.arctan(1.5 * z_h) - 0.1489)
mz_h = halos.z_des_true - 0.8 * (np.arctan(1.5 * z_h) - 0.1489)

lMH = np.log10(10**(halos.log_m) * 0.7)

Dl_h = np.array(cosmo.luminosity_distance(z_h).value) * 1.e6

Mg_h = mg_h + 5.0 - 5.0 * np.log10(Dl_h)
Mr_h = mr_h + 5.0 - 5.0 * np.log10(Dl_h)
Mi_h = mi_h + 5.0 - 5.0 * np.log10(Dl_h)
Mz_h = mz_h + 5.0 - 5.0 * np.log10(Dl_h)

mh = (z_h > 0.2) * (z_h < 0.65) * (lMH > 13.77
                                   )  # SELECT HALOS TO MIMIC redMaPPer

Dcosmo = cosmo.comoving_distance(z_h[mh])
catalog = SkyCoord(ra=halos.ra[mh] * u.degree,
                   dec=halos.dec[mh] * u.degree,
                   distance=Dcosmo)
idx, Rprox_h2, Rprox_h = catalog.match_to_catalog_3d(catalog, nthneighbor=2)
Rprox_h = np.array(Rprox_h.value)
Rprox_h2 = np.array(Rprox_h2.value)

# ----------------------
# redMaPPer PARAMETERS
# ----------------------

IDc = clusters.ID
z_c = clusters.Z_LAMBDA
mg_c = clusters.MAG_AUTO_G
mr_c = clusters.MAG_AUTO_R
zmin = 0.1
zgama = 0.5

# Path to the KiDS fields
if cat == 'kids':

    # Path to the KiDS fields
    path_kidscat = '/data2/brouwer/KidsCatalogues'
    kidscatname = '/KiDS_DR3_GAMA-like_Maciek_revised_1905.fits'

    # Importing the KiDS galaxies
    galRA, galDEC, galZB, galZ, galTB, mag_auto, ODDS, umag, gmag, rmag, imag = \
    utils.import_kidscat(path_kidscat, kidscatname)
    rmag_abs = utils.calc_absmag(rmag, galZ, gmag, imag, h, O_matter, O_lambda)

    galDc = (cosmo.comoving_distance(galZ).to('Mpc')).value
    gama_rlim = 20.2

# Path to the GAMA fields
if cat == 'gama':

    path_gamacat = '/data2/brouwer/MergedCatalogues/'
    gamacatname = 'ShearMergedCatalogueAll_sv0.8.fits'

    # Importing the GAMA coordinates
    galRA, galDEC, galZ, rmag, rmag_abs = \
    utils.import_gamacat(path_gamacat, gamacatname)

    galDc = (cosmo.comoving_distance(galZ).to('Mpc')).value
    gama_rlim = 19.8