示例#1
0
def load_observations(filenames: Union[str, Iterable[str]],
                      redshift_bracket: List[float] = [0.0, 1000.0]):
    """
    Parameters
    ----------

    filename: str, Iterable[str]
        Filename(s) of the observational dataset that you wish to load.
        Should probably end in .hdf5. See the documentation for
        :class:`velociraptor.observations.objects.ObservationalData`
        and :class:`velociraptor.observations.objects.MultiRedshiftObservationalData`
        for more information.

    redshift_bracket: str
        Redshift bracket to overlap with. If any of the observations in the
        file overlap with this bracket, they are returned. By default, this
        bracket is 0.0 to 1000.0, so will encompass all reasonable
        observations present in the file.
    

    Returns
    -------

    List[velociraptor.observations.objects.ObservationalData]:
        Observational data instances read from file that overlap with your
        specified redshift bracket.
    """

    returned_data = []

    if not isinstance(filenames, list):
        filenames = [filenames]

    for filename in filenames:
        try:
            multi_z = MultiRedshiftObservationalData()
            multi_z.load(filename)

            returned_data += multi_z.get_datasets_overlapping_with(
                redshifts=redshift_bracket)
        except ObservationalDataError:
            data = ObservationalData()
            data.load(filename)

            # Lower and upper bounds of the requested redshift bracket to
            # return datasets between
            lower, upper = redshift_bracket

            if ((data.redshift_lower <= lower and lower <= data.redshift_upper)
                    or
                (data.redshift_lower <= upper and upper <= data.redshift_upper)
                    or (lower <= data.redshift_lower
                        and data.redshift_upper <= upper)):
                returned_data.append(data)

    return returned_data
示例#2
0
citation = "Chartab et al. (2021)"
bibcode = "2021arXiv210101706C"
name = "MOSDEF Survey: gas-phase metallicity of galaxies at 1.4 <= z <= 2.6"
plot_as = "line"
h = h_sim

name = f"Fit to the stellar mass - gas metallicity at z=[{redshift_header_info:s}]"
comment = (
    "The data is taken from Chartab+21 "
    "Median fit to galaxy stacks from MOSDEF survey. "
    "Stellar masses obtained assuming a Chabrier IMF. "
    "The metallicity is expressed as 12 + log10(O/H), in these units the solar metallicity is 8.69."
)

# Store metadata at the top level
multi_z = MultiRedshiftObservationalData()
multi_z.associate_citation(citation, bibcode)
multi_z.associate_name(name)
multi_z.associate_comment(comment)
multi_z.associate_cosmology(cosmology)
multi_z.associate_maximum_number_of_returns(1)

output_filename = "Chartab2021.hdf5"
output_directory = "../"

if not os.path.exists(output_directory):
    os.mkdir(output_directory)

for z, dz_lower, dz_upper in zip(redshifts, redshifts_lower, redshifts_upper):

    # Create a single observational-data instance at redshift z
def passive_fractions_centrals():

    # Meta-data
    name = ("Fit to the passive fraction - stellar mass (centrals) "
            f"at z=[{redshift_header_info:s}]")
    comment = (
        "The data is taken from https://www.peterbehroozi.com/data.html. "
        "The quenched fractions are defined using the standard criterion where "
        "specific star formation rate < 1e-11 yr^-1. "
        "The stellar mass is the observed stellar mass as defined in Behroozi et al. "
        "(2019) eq. 25. "
        "Uses the Chabrier initial mass function. "
        "The passive fractions are given by the 50th percentile of the posterior "
        "distribution of the fitting model. "
        "Cosmology: Omega_m=0.307, Omega_lambda=0.693, h=0.678, sigma_8=0.823, "
        "n_s=0.96. "
        "Shows the passive fraction of centrals versus galaxy stellar mass.")

    # Store metadata at the top level
    multi_z = MultiRedshiftObservationalData()
    multi_z.associate_citation(citation, bibcode)
    multi_z.associate_name(name)
    multi_z.associate_comment(comment)
    multi_z.associate_cosmology(cosmology)
    multi_z.associate_maximum_number_of_returns(1)

    output_filename = "Behroozi2019_centrals.hdf5"
    output_directory = "../"

    if not os.path.exists(output_directory):
        os.mkdir(output_directory)

    for z, dz_lower, dz_upper, a_str in zip(redshifts, redshifts_lower,
                                            redshifts_upper,
                                            scale_factors_str):
        # Create a single observational-data instance at redshift z
        processed = ObservationalData()

        # Load raw Behroozi2019 data
        data = np.loadtxt(f"../raw/Behroozi2019_qf_groupstats_a{a_str}.dat")

        # Fetch the fields we need
        log_M_star, QF, QF_plus, QF_minus = (
            data[:, 0],
            data[:, 1],
            data[:, 2],
            data[:, 3],
        )

        # We don't want to plot zeros
        mask = np.where(QF > 0.0)

        # Transform stellar mass
        M_star = (10.0**log_M_star) * unyt.Solar_Mass

        # Define scatter with respect to the best-fit value (16 and 84 percentiles)
        QF_scatter = unyt.unyt_array((QF_minus[mask], QF_plus[mask]),
                                     units="dimensionless")

        # Compute \Delta z
        redshift_lower, redshift_upper = [z - dz_lower, z + dz_upper]

        processed.associate_x(
            M_star[mask],
            scatter=None,
            comoving=False,
            description="Galaxy Stellar Mass",
        )
        processed.associate_y(
            QF[mask] * unyt.dimensionless,
            scatter=QF_scatter,
            comoving=False,
            description="Passive Fraction (centrals)",
        )

        processed.associate_redshift(z, redshift_lower, redshift_upper)
        processed.associate_plot_as(plot_as)

        multi_z.associate_dataset(processed)

    output_path = f"{output_directory}/{output_filename}"

    if os.path.exists(output_path):
        os.remove(output_path)

    multi_z.write(filename=output_path)
示例#4
0
input_filename = "../raw/Ilbert2013.txt"

output_filename = "Ilbert2013.hdf5"
output_directory = "../"

if not os.path.exists(output_directory):
    os.mkdir(output_directory)
comment = (
    "Assuming Chabrier IMF and Vmax selection, quoted redshift is lower bound of range. "
    f"h-corrected for SWIFT using Cosmology: {cosmology.name}.")
citation = "Ilbert et al. (2013)"
bibcode = "2013A&A...556A..55I"
name = "GSMF from UltraVISTA"

multi_z = MultiRedshiftObservationalData()
multi_z.associate_comment(comment)
multi_z.associate_name(name)
multi_z.associate_citation(citation, bibcode)
multi_z.associate_cosmology(cosmology)

# z_bins is a 1-D ndarray containing the lower edges of the redshift bins
# gsmf_and_Mstar is a list of 2D ndarrays, one per redshift
# Each contains five columns as follows:
# log(Mstar) bins, Mstar errors, log(GSMF), GSMF +- errors
z_bins, gsmf_and_Mstar = load_file_and_split_by_z(input_filename)

for z, gsmf_and_Mstar_at_z in zip(z_bins, gsmf_and_Mstar):
    multi_z.associate_dataset(process_for_redshift(z, gsmf_and_Mstar_at_z))

output_path = f"{output_directory}/{output_filename}"
def Phi_passive_galaxies():

    # Meta-data
    name = f"Fit to the quenched galaxy stellar mass function at z=[{redshift_header_info:s}]"
    comment = (
        "The data is taken from https://www.peterbehroozi.com/data.html. "
        "The stellar mass is the observed stellar mass as defined in Behroozi et al. "
        "(2019) eq. 25. "
        "The quenched fractions are defined using the standard criterion where specific"
        " star formation rate < 1e-11 yr^-1. "
        "Uses the Chabrier initial mass function. "
        "GSMF is incomplete below 10**7.0 Msun at z=0 and 10**8.5 Msum at z=8. "
        "Cosmology: Omega_m=0.307, Omega_lambda=0.693, h=0.678, sigma_8=0.823, "
        "n_s=0.96. "
        "Shows the quenched galaxy stellar mass function (number densities in comoving"
        " Mpc^-3 dex^-1 vs. stellar mass).")

    # Store metadata at the top level
    multi_z = MultiRedshiftObservationalData()
    multi_z.associate_citation(citation, bibcode)
    multi_z.associate_name(name)
    multi_z.associate_comment(comment)
    multi_z.associate_cosmology(cosmology)
    multi_z.associate_maximum_number_of_returns(1)

    output_filename = "Behroozi2019_passive.hdf5"
    output_directory = "../"

    if not os.path.exists(output_directory):
        os.mkdir(output_directory)

    for z, dz_lower, dz_upper, a_str in zip(redshifts, redshifts_lower,
                                            redshifts_upper,
                                            scale_factors_str):
        # Create a single observational-data instance at redshift z
        processed = ObservationalData()

        # Load raw Behroozi2019 data
        data = np.loadtxt(f"../raw/Behroozi2019_smf_a{a_str}.dat")

        # Fetch the fields we need
        log_M_star, Phi, Phi_plus, Phi_minus = (
            data[:, 0],
            data[:, 7],
            data[:, 8],
            data[:, 9],
        )

        # We don't want to plot zeros
        mask = np.where(Phi > 0.0)

        # Transform stellar mass
        M_star = (10.0**log_M_star) * unyt.Solar_Mass

        # Define scatter with respect to the best-fit value (16 and 84 percentiles)
        Phi_scatter = unyt.unyt_array((Phi_minus[mask], Phi_plus[mask]),
                                      units=unyt.Mpc**(-3))

        # Compute \Delta z
        redshift_lower, redshift_upper = [z - dz_lower, z + dz_upper]

        processed.associate_x(
            M_star[mask],
            scatter=None,
            comoving=False,
            description="Galaxy Stellar Mass",
        )
        processed.associate_y(
            Phi[mask] * (h_sim / ORIGINAL_H)**3 * unyt.Mpc**(-3),
            scatter=Phi_scatter * (h_sim / ORIGINAL_H)**3,
            comoving=True,
            description="Phi (GSMF)",
        )

        processed.associate_redshift(z, redshift_lower, redshift_upper)
        processed.associate_plot_as(plot_as)

        multi_z.associate_dataset(processed)

    output_path = f"{output_directory}/{output_filename}"

    if os.path.exists(output_path):
        os.remove(output_path)

    multi_z.write(filename=output_path)
示例#6
0
output_filename = "Pillepich2018_TNG{box}.hdf5"
output_directory = "../"

if not os.path.exists(output_directory):
    os.mkdir(output_directory)

comment = ("Assuming Chabrier IMF, values calculated using 30 pkpc apertures. "
           "Obtained from TNG data portal. "
           f"h-corrected for SWIFT using Cosmology: {cosmology.name}.")
citation = "Pillepich et al. (2018)"
bibcode = "2018MNRAS.475..648P"
name = "GSMF from Illustris-TNG{box}"

for box_sz in box_sizes:
    multi_z = MultiRedshiftObservationalData()
    multi_z.associate_comment(comment)
    multi_z.associate_name(name.format(box=box_sz))
    multi_z.associate_citation(citation, bibcode)
    multi_z.associate_cosmology(cosmology)
    multi_z.associate_maximum_number_of_returns(1)

    z_bins, Mstar_bins, gsmf = load_file(input_filename.format(box=box_sz))

    for z, gsmf_at_z in zip(z_bins, gsmf):
        multi_z.associate_dataset(
            process_for_redshift(z, Mstar_bins, gsmf_at_z))

    output_path = f"{output_directory}/{output_filename.format(box=box_sz)}"

    if os.path.exists(output_path):
示例#7
0
input_filename = "../raw/Duncan2014.txt"

output_filename = "Duncan2014.hdf5"
output_directory = "../"

if not os.path.exists(output_directory):
    os.mkdir(output_directory)

comment = ("Assuming Chabrier IMF and Vmax selection."
           f"h-corrected for SWIFT using Cosmology: {cosmology.name}.")
citation = "Duncan et al. (2014)"
bibcode = "2014MNRAS.444.2960D"
name = "GSMF from CANDELS/GOODS-S"

multi_z = MultiRedshiftObservationalData()
multi_z.associate_comment(comment)
multi_z.associate_name(name)
multi_z.associate_citation(citation, bibcode)
multi_z.associate_cosmology(cosmology)
multi_z.associate_maximum_number_of_returns(1)

# z_bins is a 1-D ndarray containing the redshift bins
# gsmf_and_Mstar is a list of 2D ndarrays, one per redshift
# Each contains five columns as follows:
# log(Mstar) bins, log(Mstar) error, log(GSMF), GSMF -+ errors

z_bins, gsmf_and_Mstar = load_file_and_split_by_z(input_filename)

for z, gsmf_and_Mstar_at_z in zip(z_bins, gsmf_and_Mstar):
    multi_z.associate_dataset(process_for_redshift(z, gsmf_and_Mstar_at_z))
示例#8
0
bibcode = "2020ApJ...905..170M"
name = "Galaxy Stellar Mass-Galaxy Size"
plot_as = "points"
h_obs = 0.7
h = cosmology.h

stellar_mass_bin_range = unyt.unyt_array([10**9.6, 10**11.6], "Solar_Mass")
number_of_bins = 10

z = raw.T[1]
M = raw.T[2] * unyt.Solar_Mass
R = raw.T[3] * unyt.kpc
e_R = raw.T[4] * unyt.kpc
sf = raw.T[5].astype(bool)

multi_z_sf = MultiRedshiftObservationalData()
multi_z_sf.associate_comment(f"{comment} Includes SFing galaxies only.")
multi_z_sf.associate_name(f"{name} (SF)")
multi_z_sf.associate_citation(f"{citation} (SF)", bibcode)
multi_z_sf.associate_cosmology(cosmology)
multi_z_sf.associate_maximum_number_of_returns(1)

multi_z_nsf = MultiRedshiftObservationalData()
multi_z_nsf.associate_comment(f"{comment} Includes quiescent galaxies only.")
multi_z_nsf.associate_name(f"{name} (Q)")
multi_z_nsf.associate_citation(f"{citation} (Q)", bibcode)
multi_z_nsf.associate_cosmology(cosmology)
multi_z_nsf.associate_maximum_number_of_returns(1)

redshift_bins = [[0.3, 0.7], [0.7, 1.0], [1.0, 1.3], [1.3, 2.0]]
def StellarMassHaloMassRatios_vs_StellarMass():

    name = f"Fit to the stellar mass / halo mass - stellar mass relation at z=[{redshift_header_info:s}]"
    comment = (
        "The data is taken from https://www.peterbehroozi.com/data.html. "
        "Median fit to the raw data for centrals (i.e. excluding satellites). "
        "The stellar mass is the true stellar mass (i.e. w/o observational "
        "corrections). "
        "The halo mass is the peak halo mass that follows the Bryan & Norman (1998) "
        "spherical overdensity definition. "
        "The fitting function does not include the intrahalo light contribution to the "
        "stellar mass. "
        "Cosmology: Omega_m=0.307, Omega_lambda=0.693, h=0.678, sigma_8=0.823, "
        "n_s=0.96. "
        "Shows the ratio between stellar mass and halo mass as a function of stellar "
        "mass. ")

    # Store metadata at the top level
    multi_z = MultiRedshiftObservationalData()
    multi_z.associate_citation(citation, bibcode)
    multi_z.associate_name(name)
    multi_z.associate_comment(comment)
    multi_z.associate_cosmology(cosmology)
    multi_z.associate_maximum_number_of_returns(1)

    output_filename = "Behroozi2019RatioStellar.hdf5"
    output_directory = "../"

    if not os.path.exists(output_directory):
        os.mkdir(output_directory)

    for z, dz_lower, dz_upper in zip(redshifts, redshifts_lower,
                                     redshifts_upper):
        # Create a single observational-data instance at redshift z
        processed = ObservationalData()

        # Stellar masses (for the given halo masses, at redshift z)
        # Stellar masses (for the given halo masses, at redshift z)
        M_star, M_84, M_16 = behroozi_2019_raw_with_uncertainties(
            z, M_BN98,
            "../raw/Behroozi_2019_fitting_params_smhm_true_med_cen.txt")

        # Compute \Delta z
        redshift_lower, redshift_upper = [z - dz_lower, z + dz_upper]

        # Define scatter
        y_scatter = unyt.unyt_array(
            ((M_star - M_16) / M_BN98, (M_84 - M_star) / M_BN98))

        processed.associate_x(
            M_star * unyt.Solar_Mass,
            scatter=None,
            comoving=True,
            description="Galaxy Stellar Mass",
        )
        processed.associate_y(
            (M_star / M_BN98) * unyt.dimensionless,
            scatter=y_scatter * unyt.dimensionless,
            comoving=True,
            description=
            "Galaxy Stellar Mass / Halo Mass ($M_* / M_{\\rm BN98}$)",
        )

        processed.associate_redshift(z, redshift_lower, redshift_upper)
        processed.associate_plot_as(plot_as)

        multi_z.associate_dataset(processed)

    output_path = f"{output_directory}/{output_filename}"

    if os.path.exists(output_path):
        os.remove(output_path)

    multi_z.write(filename=output_path)