Exemple #1
0
processed = ObservationalData()

# Read the data (only those columns we need here)
raw = np.loadtxt(input_filename, delimiter=delimiter, usecols=(2, 3, 4, 5))

M_BH = 10**raw[:, 0] * unyt.Solar_Mass
M_BH_low = 10**(raw[:, 0] - raw[:, 1]) * unyt.Solar_Mass
M_BH_high = 10**(raw[:, 0] + raw[:, 1]) * unyt.Solar_Mass

M_halo = 10**raw[:, 2] * unyt.Solar_Mass
M_halo_low = 10**(raw[:, 2] - raw[:, 3]) * unyt.Solar_Mass
M_halo_high = 10**(raw[:, 2] + raw[:, 3]) * unyt.Solar_Mass

# Define the scatter as offset from the mean value
x_scatter = unyt.unyt_array((M_halo - M_halo_low, M_halo_high - M_halo))
y_scatter = unyt.unyt_array((M_BH - M_BH_low, M_BH_high - M_BH))

comment = ("Masses are provided h-free and cosmology-independent, so no "
           "h-correction made. "
           "Masses are (mostly) determined dynamically, with some stallar "
           "masses obtained from K-band luminosities with a fixed conversion "
           "factor. Halo masses are defined as M200_crit.")
citation = "Marasco et al. (2021)"
bibcode = "2021arXiv210510508M"
name = "Halo Mass-Black Hole Mass"
plot_as = "points"
redshift = 0.0
h = cosmology.h

processed.associate_x(M_halo,
Exemple #2
0
import numpy 
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot
pyplot.rcParams.update({'font.size':40})
from matplotlib.colors import LogNorm
from swiftsimio import load
from velociraptor.tools.lines import binned_median_line as bml
import unyt

spread = numpy.loadtxt('/cosma5/data/durham/dc-murr1/gas_spread.txt')
snap = load('/cosma6/data/dp004/dc-borr1/swift-test-data/eagle_0037.hdf5')
temp = snap.gas.temperatures

fig, ax = pyplot.subplots(figsize = (20,20))
x_bins = numpy.logspace(numpy.log10(numpy.amin(temp)), numpy.log10(numpy.amax(temp)), num = 100)
y_bins = numpy.logspace(numpy.log10(numpy.amin(spread)), numpy.log10(numpy.amax(spread)), num = 100)
h = ax.hist2d(temp, spread, bins = [x_bins, y_bins], norm = LogNorm())
pyplot.colorbar(h[3], ax=ax)
ax.loglog()
ax.set_ylabel('Spread metric [Mpc]')
ax.set_xlabel("Temperature [K]")
ax.tick_params(length = 10, width = 3)

x_bins = unyt.unyt_array(x_bins, units = temp.units)
spread = unyt.unyt_array(spread, units = 'Mpc')
centers, med, err = bml(temp, spread, x_bins = x_bins)
ax.plot(centers, med, linestyle = '--', linewidth = 7, color = 'red')

fig.savefig('/cosma5/data/durham/dc-murr1/gas_spread_temperature_metric.png')
Exemple #3
0
    # Double-check all particles for boundaries
    for i in range(3):
        mask = xp[:, i] < 0.0
        xp[mask, i] += 1.0
        mask = xp[:, i] > 1.0
        xp[mask, i] -= 1.0

    # Set up metadata
    unitL = unyt.Mpc
    edgelen = 22 * 1e-3 * unitL  # 22 so we can cut off 1kpc on each edge for image
    edgelen = edgelen.to(unitL)
    boxsize = np.array([1.0, 1.0, 0.0]) * edgelen

    xs = unyt.unyt_array(
        [np.array([xs[0] * edgelen, xs[1] * edgelen, 0.0 * edgelen])], unitL
    )
    xp *= edgelen
    h *= edgelen

    w = Writer(unit_system=cosmo_units, box_size=boxsize, dimension=2)

    # write particle positions ans smoothing lengths
    w.gas.coordinates = xp
    w.stars.coordinates = xs
    w.gas.velocities = np.zeros(xp.shape) * (unitL / unyt.Myr)
    w.stars.velocities = np.zeros(xs.shape) * (unitL / unyt.Myr)
    w.gas.smoothing_length = h
    w.stars.smoothing_length = w.gas.smoothing_length[:1]

    # get gas masses
 def test_parse_units_xarray_no_copy():
     in_ = xr.DataArray([4, 3, 2, 1], attrs={'units': 'm'})
     actual = process_unit_input(in_, copy=False)
     desired = unyt.unyt_array([4, 3, 2, 1], 'm')
     assert actual.base is in_.values
     assert_allclose_units(actual, desired)
Exemple #5
0
def recreate_single_figure(
    plot: VelociraptorPlot,
    line_data: Dict[str, Dict],
    output_directory: str,
    file_type: str,
) -> None:
    """
    Recreates a single figure using the data in ``line_data`` and the metadata in
    ``plot``.

    Parameters
    ----------
    plot: VelociraptorPlot
        Velociraptor plot instance (from AutoPlotter).
    line_data: Dict[str, Dict]
        Global line data, obtained from the ``load_yaml_line_data`` function.
    output_directory: str
        Output directory for the plot
    file_type: str
        Output file type (e.g. ``png``)
    """

    try:
        first_line_metadata = line_data[list(line_data.keys())[0]]["metadata"]
        fake_catalogue = FakeCatalogue(
            z=first_line_metadata["redshift"], a=first_line_metadata["scale_factor"]
        )
    except KeyError:
        fake_catalogue = FakeCatalogue()

    fig, ax = plt.subplots()

    # Add simulation data
    for line_type in valid_line_types:
        line = getattr(plot, f"{line_type}_line", None)
        if line is not None:
            for color, (name, data) in enumerate(line_data.items()):
                color_name = f"C{color}"

                try:
                    this_plot = data[plot.filename]
                    this_line_dict = this_plot["lines"][line_type]
                except KeyError:
                    continue

                if (
                    this_line_dict.get("centers", []) == []
                    and this_line_dict.get("additional_points_x", []) == []
                ):
                    # Don't plot this line, as it contains no information.
                    continue

                centers = unyt.unyt_array(this_line_dict["centers"], units=plot.x_units)
                heights = unyt.unyt_array(this_line_dict["values"], units=plot.y_units)
                errors = unyt.unyt_array(this_line_dict["scatter"], units=plot.y_units)

                ax.set_xlabel(this_plot.get("x_label", ax.get_xlabel()))
                ax.set_ylabel(this_plot.get("y_label", ax.get_ylabel()))

                # Data points from the bins with too few data points
                additional_x = unyt.unyt_array(
                    this_line_dict.get("additional_points_x", []), units=plot.x_units
                )
                additional_y = unyt.unyt_array(
                    this_line_dict.get("additional_points_y", []), units=plot.y_units
                )

                if line.scatter == "errorbar":
                    ax.errorbar(
                        centers, heights, yerr=errors, label=name, color=color_name
                    )
                elif line.scatter == "shaded":
                    ax.plot(centers, heights, label=name, color=color_name)

                    # Deal with different + and -ve errors
                    if errors.shape[0]:
                        if errors.ndim > 1:
                            down, up = errors
                        else:
                            up = errors
                            down = errors
                    else:
                        up = 0
                        down = 0

                    ax.fill_between(
                        centers,
                        heights - down,
                        heights + up,
                        color=color_name,
                        alpha=0.3,
                        linewidth=0.0,
                    )

                # line.scatter == "none":
                else:
                    ax.plot(centers, heights, label=name)

                ax.scatter(additional_x, additional_y, c=color_name)

                # Enter only if the plot has a valid Y-axis range and there are any
                # additional data points.
                if plot.y_lim is not None and len(additional_x) > 0:

                    # Draw arrows for each data point beyond X- or/and Y- axis range
                    line.highlight_data_outside_domain(
                        ax,
                        additional_x.value,
                        additional_y.value,
                        color_name,
                        (plot.x_lim[0].value, plot.x_lim[1].value),
                        (plot.y_lim[0].value, plot.y_lim[1].value),
                    )

    # Add observational data second to allow for colour precedence
    # to go to runs
    observational_data_scale_factor_bracket = [
        10 ** (log10(fake_catalogue.a) + plot.observational_data_bracket_width),
        10 ** (log10(fake_catalogue.a) - plot.observational_data_bracket_width),
    ]

    observational_data_redshift_bracket = [
        (1 - x) / x for x in observational_data_scale_factor_bracket
    ]

    valid_observational_data = load_observations(
        plot.observational_data_filenames,
        redshift_bracket=observational_data_redshift_bracket,
    )

    for index, data in enumerate(valid_observational_data, start=1):
        data.x.convert_to_units(plot.x_units)
        data.y.convert_to_units(plot.y_units)
        data.plot_on_axes(
            ax, errorbar_kwargs=dict(zorder=-10, color=f"C{index + color}")
        )

    # Finally set up metadata
    if plot.x_log:
        ax.set_xscale("log")
    if plot.y_log:
        ax.set_yscale("log")

    try:
        ax.set_xlim(*unyt.unyt_array(plot.x_lim, units=plot.x_units))
    except AttributeError:
        pass

    try:
        ax.set_ylim(*unyt.unyt_array(plot.y_lim, units=plot.y_units))
    except AttributeError:
        pass

    decorate_axes(
        ax,
        catalogue=fake_catalogue,
        comment=plot.comment,
        legend_loc=plot.legend_loc,
        redshift_loc=plot.redshift_loc,
        comment_loc=plot.comment_loc,
    )

    fig.savefig(f"{output_directory}/{plot.filename}.{file_type}")
    plt.close(fig)
def test_parse_units_already_unyt():
    in_ = unyt.unyt_array([1, 2, 3, 4], 'ft')
    out_ = process_unit_input(in_)
    assert_allclose_units(in_, out_)
def test_parse_units_no_copy_array():
    in_ = np.array([1, 2, 3, 4])
    actual = process_unit_input(in_, 'ft', copy=False)
    desired = unyt.unyt_array([1, 2, 3, 4], 'ft')
    assert actual.base is in_
    assert_allclose_units(actual, desired)
Exemple #8
0
def project_gas(
    data: SWIFTDataset,
    resolution: int,
    project: Union[str, None] = "masses",
    region: Union[None, unyt_array] = None,
    mask: Union[None, array] = None,
    rotation_center: Union[None, unyt_array] = None,
    rotation_matrix: Union[None, array] = None,
    parallel: bool = False,
    backend: str = "fast",
):
    r"""
    Creates a 2D projection of a SWIFT dataset, projected by the "project"
    variable (e.g. if project is Temperature, we return: \bar{T} = \sum_j T_j
    W_{ij}).

    Default projection variable is mass. If it is None, then we don't
    weight with anything, providing a number density image.

    Parameters
    ----------

    data: SWIFTDataset
        The SWIFT dataset that you wish to visualise (get this from ``load``)

    resolution: int
        The resolution of the image. All images returned are square, ``res``
        by ``res``, pixel grids.

    project: str, optional
        Variable to project to get the weighted density of. By default, this
        is mass. If you would like to mass-weight any other variable, you can
        always create it as ``data.gas.my_variable = data.gas.other_variable
        * data.gas.masses``.

    region: unyt_array, optional
        Region, determines where the image will be created (this corresponds
        to the left and right-hand edges, and top and bottom edges) if it is
        not None. It should have a length of four or six, and take the form:
        ``[x_min, x_max, y_min, y_max, {z_min, z_max}]``

    mask: np.array, optional
        Allows only a sub-set of the particles in data to be visualised. Useful
        in cases where you have read data out of a ``velociraptor`` catalogue,
        or if you only want to visualise e.g. star forming particles. This boolean
        mask is applied just before visualisation.

    rotation_center: np.array, optional
        Center of the rotation. If you are trying to rotate around a galaxy, this
        should be the most bound particle.

    rotation_matrix: np.array, optional
        Rotation matrix (3x3) that describes the rotation of the box around
        ``rotation_center``. In the default case, this provides a projection
        along the z axis.

    parallel: bool, optional
        Defaults to ``False``, whether or not to create the image in parallel.
        The parallel version of this function uses significantly more memory.

    backend: str, optional
        Backend to use. See documentation for details. Defaults to 'fast'.


    Returns
    -------

    image: unyt_array
        Projected image with units of project / length^2, of size ``res`` x
        ``res``.


    Notes
    -----

    + Particles outside of this range are still considered if their smoothing
      lengths overlap with the range.
    + The returned array has x as the first component and y as the second component,
      which is the opposite to what ``imshow`` requires. You should transpose the
      array if you want it to be visualised the 'right way up'.
    """

    image = project_gas_pixel_grid(
        data=data,
        resolution=resolution,
        project=project,
        mask=mask,
        parallel=parallel,
        region=region,
        rotation_matrix=rotation_matrix,
        rotation_center=rotation_center,
        backend=backend,
    )

    if region is not None:
        x_range = region[1] - region[0]
        y_range = region[3] - region[2]
        units = 1.0 / (x_range * y_range)
        # Unfortunately this is required to prevent us from {over,under}flowing
        # the units...
        units.convert_to_units(1.0 / (x_range.units * y_range.units))
    else:
        units = 1.0 / (data.metadata.boxsize[0] * data.metadata.boxsize[1])
        # Unfortunately this is required to prevent us from {over,under}flowing
        # the units...
        units.convert_to_units(1.0 / data.metadata.boxsize.units**2)

    if project is not None:
        units *= getattr(data.gas, project).units

    return unyt_array(image, units=units)
import unyt
import h5py
import numpy
import hist

units = unyt.unyt_quantity(1 / 0.7 * unyt.kpc).to('Mpc')
spread = unyt.unyt_array(h5py.File(
    '/cosma5/data/durham/dc-murr1/gas_spread.hdf5', 'r')['array_data'],
                         units=units)
spread.convert_to_units('Mpc')

distance = unyt.unyt_array(numpy.loadtxt(
    '/cosma5/data/durham/dc-murr1/simba_gas_nearest_halo_distance.txt'),
                           units='Mpc')
radius = unyt.unyt_array(numpy.loadtxt(
    '/cosma5/data/durham/dc-murr1/simba_gas_nearest_halo_radius.txt'),
                         units='Mpc')
mass = unyt.unyt_array(numpy.loadtxt(
    '/cosma5/data/durham/dc-murr1/simba_gas_nearest_halo_mass.txt'),
                       units='msun')
gas_halos = numpy.loadtxt(
    '/cosma5/data/durham/dc-murr1/simba_gas_neighbour_halos.txt')
gas_halos = gas_halos.astype(int)

frac_distance = distance / radius

mask = numpy.where(mass > 0)[0]
spread = spread[mask]
frac_distance = frac_distance[mask]
mass = mass[mask]
gas_halos = gas_halos[mask]
input_filename = "../raw/Hunt2020.txt"

output_filename = "Hunt2020_Data.hdf5"
output_directory = "../"

if not os.path.exists(output_directory):
    os.mkdir(output_directory)

# Read the data
raw = np.loadtxt(input_filename)
M_star = pow(10.0, raw[:, 0]) * unyt.Solar_Mass
MH2_per_Mstar = pow(10.0, raw[:, 1]) * unyt.dimensionless
MH2_per_Mstar_lo = pow(10.0, raw[:, 2]) * unyt.dimensionless
MH2_per_Mstar_hi = pow(10.0, raw[:, 3]) * unyt.dimensionless

y_scatter = unyt.unyt_array(
    [MH2_per_Mstar - MH2_per_Mstar_lo, MH2_per_Mstar_hi - MH2_per_Mstar])

# Meta-data
comment = ("Stellar Masses obtained assuming a Chabrier (2003) IMF. "
           "local measurements decoupled from the Hubble flow (no h)."
           "H2 measurements via CO detections in the MAGMA sample.")

citation = "Hunt et al 2020 (MAGMA)"
bibcode = "2020A&A...643A.180H"
name = "Stellar mass - H2 Gas to Stellar Mass ratio"
plot_as = "points"
redshift = 0.0
h = h_sim

# Write everything
processed = ObservationalData()
def main():

    # Load TON from a CIF file, replicate the cell
    # Use mbuild to create a zeolite supercell from CIF
    cif_path = resource_filename(
        "mc_examples",
        "realistic_workflows/zeolite_adsorption/resources/structures/TON.cif")
    lattice = mbuild.lattice.load_cif(cif_path)
    compound_dict = {
        "Si": mbuild.Compound(name="Si"),
        "O": mbuild.Compound(name="O"),
    }
    zeolite = lattice.populate(compound_dict, 2, 2, 6)

    # Create a CG methane, load and apply ff
    methane = mbuild.Compound(name="_CH4")
    ff_path = resource_filename(
        "mc_examples",
        "realistic_workflows/zeolite_adsorption/resources/ffxml/adsorbates.xml",
    )
    ff_ads = foyer.Forcefield(ff_path)
    methane_ff = ff_ads.apply(methane)

    # Define pure fluid temperatures and chemical potentials
    temperatures = [298 * u.K, 309 * u.K, 350 * u.K]
    mus_fluid = np.arange(-49, -30, 3) * u.Unit("kJ/mol")

    # Define the pressures at which we wish to study adsorption
    pressures = [
        0.01,
        0.1,
        0.25,
        0.5,
        0.75,
        1.0,
        2.0,
        3.0,
        5.0,
    ] * u.bar

    # Select the zeolite ff
    zeo_ff_names = ["june", "trappe"]

    # Define a few custom_args that will be
    # the same for all zeolite simulations
    custom_args = {
        "charge_style": "none",
        "vdw_cutoff": 14.0 * u.angstrom,
        "prop_freq": 10,
        "max_molecules": [1, 10000],
    }

    # Loop over different zeolite ff's
    for zeo_ff_name in zeo_ff_names:

        # Load and apply ff to the zeolite structure
        ff_path = resource_filename(
            "mc_examples",
            f"realistic_workflows/zeolite_adsorption/resources/ffxml/zeo_{zeo_ff_name}.xml",
        )
        ff_zeo = foyer.Forcefield(ff_path)
        zeolite_ff = ff_zeo.apply(zeolite)

        # Create the box_list, species_list, System, and MoveSet.
        # These are not dependent upon (T,P) condition
        box_list = [zeolite]
        species_list = [zeolite_ff, methane_ff]
        mols_in_boxes = [[1, 0]]

        system = mc.System(box_list, species_list, mols_in_boxes=mols_in_boxes)
        moveset = mc.MoveSet("gcmc", species_list)

        # Loop over each temperature to compute an isotherm
        for temperature in temperatures:

            # Before we begin we must determine the
            # chemical potentials required to achieve
            # the desired pressures
            fluid_pressures = []
            for mu_fluid in mus_fluid:
                dirname = f"fluid_T_{temperature:0.1f}_mu_{mu_fluid:.1f}".replace(
                    " ", "_").replace("/", "-")
                thermo = ThermoProps(dirname + "/prod.out.prp")
                fluid_pressures.append(np.mean(thermo.prop("Pressure")))
            fluid_pressures = u.unyt_array(fluid_pressures)

            # Fit a line to mu vs. P
            slope, intercept, r_value, p_value, stderr = linregress(
                np.log(fluid_pressures.to_value(u.bar)).flatten(),
                y=mus_fluid.to_value("kJ/mol").flatten(),
            )
            # Determine chemical potentials
            mus = (slope * np.log(pressures.in_units(u.bar)) +
                   intercept) * u.Unit("kJ/mol")

            # Loop over each pressure and run the MC simulation!
            for (pressure, mu) in zip(pressures, mus):
                print(f"\nRun simulation: T = {temperature}, P = {pressure}\n")
                dirname = f"zeo_ff_{zeo_ff_name}_T_{temperature:0.1f}_P_{pressure:0.2f}".replace(
                    " ", "_").replace("/", "-")
                if not os.path.isdir(dirname):
                    os.mkdir(dirname)
                else:
                    pass
                with temporary_cd(dirname):

                    mc.run(
                        system=system,
                        moveset=moveset,
                        run_type="equil",
                        run_length=50000,
                        temperature=temperature,
                        run_name="equil",
                        chemical_potentials=["none", mu],
                        **custom_args,
                    )

                    mc.restart(
                        restart_from="equil",
                        run_name="prod",
                        run_type="prod",
                        total_run_length=200000,
                    )
Exemple #12
0
    )
    label = "_".join(labels[i].split(" "))
    output_filename = f"DeLooze20_individual_{label}.hdf5"

    y_all.append(pow(10, raw[:, 0]))
    x_all.append(raw[:, 1])

    logMHIMstar_med = pow(10, raw[:, 0]) * unyt.dimensionless
    logMHIMstar_lo = pow(10, raw[:, 2]) * unyt.dimensionless
    logMHIMstar_hi = pow(10, raw[:, 4]) * unyt.dimensionless
    oabundance_med = raw[:, 1] * unyt.dimensionless
    oabundance_lo = raw[:, 3] * unyt.dimensionless
    oabundance_hi = raw[:, 5] * unyt.dimensionless

    # Define the scatter as offset from the mean value
    y_scatter = unyt.unyt_array(
        (logMHIMstar_med - logMHIMstar_lo, logMHIMstar_hi - logMHIMstar_med))
    x_scatter = unyt.unyt_array(
        (oabundance_med - oabundance_lo, oabundance_hi - oabundance_med))

    # Meta-data
    comment = f"values for individual galaxies derived from {label} survey"
    citation = f"{label} compiled by De Looze et al. (2020)"
    bibcode = "2020MNRAS.496.3668D"
    name = "MHI/Mstar as a function of 12 + log10(O/H)"
    plot_as = "points"
    redshift = 0.0
    redshift_lower = 0.0
    redshift_upper = 3.0
    h = 0.7

    # Write everything
# Cosmology
h_sim = cosmology.h
Omega_b = cosmology.Ob0
Omega_m = cosmology.Om0

input_filename = "../raw/Lin2012.dat"

output_filename = "Lin2012.hdf5"
output_directory = "../"

if not os.path.exists(output_directory):
    os.mkdir(output_directory)

# Read the data
raw = np.loadtxt(input_filename)
M_500 = unyt.unyt_array((0.71 / h_sim) * 10 ** raw[:, 0], units="Msun")
M_500_error = unyt.unyt_array((0.71 / h_sim) * raw[:, 1], units="Msun")
M_500_gas = unyt.unyt_array((0.71 / h_sim) * 10 ** raw[:, 2], units="Msun")
M_500_gas_error = unyt.unyt_array((0.71 / h_sim) * raw[:, 3], units="Msun")
z = raw[:, 6]

# Compute the gas fractions
fb_500 = (M_500_gas / M_500) * (0.71 / h_sim) ** (2.5)
fb_500_error = fb_500 * ((M_500_error / M_500) + (M_500_gas_error / M_500_gas))

# Normalise by the cosmic mean
fb_500 = fb_500 / (Omega_b / Omega_m)
fb_500_error = fb_500_error / (Omega_b / Omega_m)

# Select only the low-z data
M_500 = M_500[z < 0.25]
    "when the gas was last heated by SNII, split by redshift")

snapshot_filenames = [
    f"{directory}/{snapshot}" for directory, snapshot in zip(
        arguments.directory_list, arguments.snapshot_list)
]

names = arguments.name_list
output_path = arguments.output_directory

plt.style.use(arguments.stylesheet_location)

data = [load(snapshot_filename) for snapshot_filename in snapshot_filenames]
number_of_bins = 256

SNII_density_bins = unyt.unyt_array(np.logspace(-5, 6.5, number_of_bins),
                                    units="1/cm**3")
log_SNII_density_bin_width = np.log10(SNII_density_bins[1].value) - np.log10(
    SNII_density_bins[0].value)
SNII_density_centers = 0.5 * (SNII_density_bins[1:] + SNII_density_bins[:-1])

# Begin plotting

fig, axes = plt.subplots(3, 1, sharex=True, sharey=True)
axes = axes.flat

ax_dict = {
    "$z < 1$": axes[0],
    "$1 < z < 3$": axes[1],
    "$z > 3$": axes[2],
}
def test_parse_units_check_dims_success():
    desired = unyt.unyt_array([1, 2, 3, 4], 'ft')
    actual = process_unit_input(([1, 2, 3, 4], 'ft'),
                                default_units='inch',
                                check_dims=True)
    assert_allclose_units(actual, desired)
    os.mkdir(output_directory)

Fe_over_H = 12.0 - 4.5
O_over_H = 12.0 - 3.31
O_over_Fe = O_over_H - Fe_over_H

# tabulate/compute the same ratios from Anders & Grevesse (1989)
Fe_over_H_AG89 = 7.67
O_over_H_AG89 = 8.93

O_over_Fe_AG89 = O_over_H_AG89 - Fe_over_H_AG89

data = np.loadtxt(input_filename, skiprows=3)
FeH_scu = data[:, 0] + Fe_over_H_AG89 - Fe_over_H
OFe_scu = data[:, 4] - data[:, 0] + O_over_Fe_AG89 - O_over_Fe
x = unyt.unyt_array(FeH_scu * unyt.dimensionless)
y = unyt.unyt_array(OFe_scu * unyt.dimensionless)

# Meta-data
comment = (
    "Solar abundances are taken from Asplund et al. (2009), "
    "[Fe/H]Sun = 7.5 and [Mg/H]Sun = 7.6"
)
citation = "Geisler et al. (2005), Sculptor"
bibcode = "2005AJ....129.1428G"
name = "[O/Fe] as a function of [Fe/H] for Sculptor"
plot_as = "points"
redshift = 0.0

# Write everything
processed = ObservationalData()
def test_parse_units_convert_success():
    desired = unyt.unyt_array([12, 24, 36, 48], 'inch')
    actual = process_unit_input(([1, 2, 3, 4], 'ft'),
                                default_units='inch',
                                convert=True)
    assert_allclose_units(actual, desired)
comment = (
    "Assuming Chabrier IMF. z=0.01 - 0.2. No h-correction "
    "was required as data was supplied h-free. "
    "Adopts cosmological parameters of h=0.7, omega0=0.3 "
    "omegaL=0.7."
)
citation = "Moustakas et al. (2013) (SDSS)"
bibcode = "2013ApJ...767...50M"
name = "Galaxy Stellar Mass - Passive Fraction from SDSS"
plot_as = "points"
redshift = 0.1
h = cosmology.h

log_M = raw.T[2]
M = unyt.unyt_array(10 ** (log_M), units=unyt.Solar_Mass)
passive_frac = unyt.unyt_array(raw.T[4] / raw.T[3], units="dimensionless")


processed.associate_x(
    M, scatter=None, comoving=False, description="Galaxy Stellar Mass"
)
processed.associate_y(
    passive_frac, scatter=None, comoving=False, description="Passive Fraction"
)
processed.associate_citation(citation, bibcode)
processed.associate_name(name)
processed.associate_comment(comment)
processed.associate_redshift(redshift)
processed.associate_plot_as(plot_as)
processed.associate_cosmology(cosmology)
def test_parse_units_already_unyt_no_copy():
    in_ = unyt.unyt_array([1, 2, 3, 4], 'ft')
    out_ = process_unit_input(in_, copy=False)
    assert in_ is out_
Exemple #20
0
# Reading the Kennicutt 1998 data

array_of_interest = np.arange(1, 3 + 0.25, 0.25)


def KS(sigma_g, n, A):
    return A * sigma_g**n


Sigma_g = 10**array_of_interest
Sigma_star = KS(Sigma_g, 1.4, 1.515e-4)

Sigma_H2 = Sigma_g
Sigma_SFR = Sigma_star

SigmaH2 = unyt.unyt_array(Sigma_H2, units="Msun/pc**2")
SigmaSFR = unyt.unyt_array(Sigma_SFR, units="Msun/yr/kpc**2")

processed.associate_x(SigmaH2,
                      scatter=None,
                      comoving=False,
                      description="H2 Surface density")
processed.associate_y(
    SigmaSFR,
    scatter=None,
    comoving=False,
    description="Star Formation Rate Surface Density",
)

processed.associate_citation(citation, bibcode)
processed.associate_name(name)
 def test_parse_units_xarray():
     in_ = xr.DataArray([1, 2, 3, 4], attrs={'units': 'mm'})
     actual = process_unit_input(in_)
     desired = unyt.unyt_array([1, 2, 3, 4], 'mm')
     assert_allclose_units(actual, desired)
Exemple #22
0
            stddev = np.std(means)
            error = np.std(means) / np.sqrt(num_obs)
        else:
            stddev = errors[0]
            error = errors[0]
        try:
            pair_means[label].append(weighted_mean)
            pair_stds[label].append(stddev)
            pair_mean_errors[label].append(error)
        except KeyError:
            pair_means[label] = [weighted_mean]
            pair_stds[label] = [stddev]
            pair_mean_errors[label] = [error]

for label in pairs_to_use:
    values = u.unyt_array(pair_means[label])
    mean = np.mean(values)
    xvalues = (values - mean) * u.m / u.s
    stddevs = u.unyt_array(pair_stds[label])
    errors = u.unyt_array(pair_mean_errors[label])
    yvalues = list(range(0, len(stars_to_use)))

    weighted_mean, weight_sum = np.average(values,
                                           weights=errors**-2,
                                           returned=True)
    error_on_weighted_mean = (1 / np.sqrt(weight_sum))
    #    tqdm.write(f'{error_on_weighted_mean:.2f}')
    #    ax_dict[label].errorbar(x=xvalues, y=yvalues,
    #                            yerr=None, xerr=stddevs,
    #                            marker='', capsize=4, linestyle='',
    #                            color='DodgerBlue', ecolor='DodgerBlue',
Exemple #23
0
def main():
    """Run the main routine of the script."""

    # Define the limits to plot in the various stellar parameters.
    temp_lims = (5400, 6300) * u.K
    mtl_lims = (-0.75, 0.45)
    # mag_lims = (4, 5.8)
    logg_lims = (4.1, 4.6)

    # Define the model to use:
    if args.constant:
        model_func = fit.constant_model
    elif args.linear:
        model_func = fit.linear_model
    elif args.quadratic:
        model_func = fit.quadratic_model
    elif args.cubic:
        model_func = fit.cubic_model
    elif args.cross_term:
        model_func = fit.cross_term_model
    elif args.quadratic_cross_term:
        model_func = fit.quad_cross_term_model
    elif args.quad_cross_term:
        model_func = fit.quad_cross_term_model

    model_name = '_'.join(model_func.__name__.split('_')[:-1])

    if args.transitions:
        tqdm.write('Unpickling transitions list.')
        with open(vcl.final_selection_file, 'r+b') as f:
            transitions_list = pickle.load(f)
        vprint(f'Found {len(transitions_list)} transitions.')
    elif args.pairs:
        tqdm.write('Unpickling pairs list.')
        with open(vcl.final_pair_selection_file, 'r+b') as f:
            pairs_list = pickle.load(f)
        vprint(f'Found {len(pairs_list)} pairs in the list.')

    db_file = vcl.databases_dir / 'stellar_db_uncorrected.hdf5'
    if not db_file.exists():
        raise FileNotFoundError('The given stellar database does not exist:'
                                f' {db_file}')

    # Load data from HDF5 database file.
    tqdm.write('Reading data from stellar database file...')
    if args.transitions:
        star_transition_offsets = u.unyt_array.from_hdf5(
            db_file, dataset_name='star_transition_offsets')
        star_transition_offsets_EotWM = u.unyt_array.from_hdf5(
            db_file, dataset_name='star_transition_offsets_EotWM')
        star_transition_offsets_EotM = u.unyt_array.from_hdf5(
            db_file, dataset_name='star_transition_offsets_EotM')
        # star_transition_offsets_stds = u.unyt_array.from_hdf5(
        #         db_file, dataset_name='star_standard_deviations')
    elif args.pairs:
        star_pair_separations = u.unyt_array.from_hdf5(
            db_file, dataset_name='star_pair_separations')
        star_pair_separations_EotWM = u.unyt_array.from_hdf5(
            db_file, dataset_name='star_pair_separations_EotWM')
        star_pair_separations_EotM = u.unyt_array.from_hdf5(
            db_file, dataset_name='star_pair_separations_EotM')
    star_temperatures = u.unyt_array.from_hdf5(
        db_file, dataset_name='star_temperatures')

    with h5py.File(db_file, mode='r') as f:

        star_metallicities = hickle.load(f, path='/star_metallicities')
        # star_magnitudes = hickle.load(f, path='/star_magnitudes')
        star_gravities = hickle.load(f, path='/star_gravities')
        transition_column_dict = hickle.load(f,
                                             path='/transition_column_index')
        pair_column_dict = hickle.load(f, path='/pair_column_index')

        star_names = hickle.load(f, path='/star_row_index')

    # Handle various fitting and plotting setup:
    eras = {'pre': 0, 'post': 1}
    param_dict = {'temp': 0, 'mtl': 1, 'logg': 2}

    # Create lists to store information about each fit in:
    index_nums = []
    chi_squareds_pre, sigmas_pre, sigma_sys_pre = [], [], []
    chi_squareds_post, sigmas_post, sigma_sys_post = [], [], []
    index_num = 0

    # Figure out how many parameters the model function takes, so we know how
    # many to dynamically give it later. Subtract 1 for the parameter which
    # takes the stellar parameters.
    params_list = [0 for i in range(len(signature(model_func).parameters) - 1)]

    # Define the folder to put plots in.
    output_dir = vcl.output_dir
    if args.transitions:
        fit_target = 'transitions'
    elif args.pairs:
        fit_target = 'pairs'
    plots_folder = output_dir /\
        f'stellar_parameter_fits_{fit_target}_{args.sigma}sigma/{model_name}'
    vprint(f'Creating plots in {plots_folder}')
    if not plots_folder.exists():
        os.makedirs(plots_folder)

    # Create a dictionary of fit coefficients assigned to each transition's
    # label
    coefficients_dict = {}
    covariance_dict = {}
    sigmas_dict = {}
    sigma_sys_dict = {}

    if args.transitions:
        tqdm.write('Creating plots for each transition...')
        for transition in tqdm(transitions_list):
            for order_num in transition.ordersToFitIn:
                index_nums.append(index_num)
                index_num += 1
                label = '_'.join([transition.label, str(order_num)])
                vprint(20 * '-')
                vprint(f'Analyzing {label}...')

                # The column number to use for this transition:
                col = transition_column_dict[label]
                ylimits = (-300 * u.m / u.s,
                           300 * u.m / u.s) if not args.full_range else None

                comp_fig, axes_dict = create_comparison_figure(
                    ylims=ylimits,
                    fit_target='transitions',
                    temp_lims=temp_lims,
                    mtl_lims=mtl_lims,
                    logg_lims=logg_lims)

                for time in eras.keys():

                    vprint(20 * '=')
                    vprint(f'Working on {time}-change era.')
                    mean = np.nanmean(star_transition_offsets[eras[time], :,
                                                              col])

                    # First, create a masked version to catch any missing
                    # entries:
                    m_offsets = ma.masked_invalid(
                        star_transition_offsets[eras[time], :, col])
                    total_stars = ma.count(m_offsets)
                    vprint(f'Found {total_stars} stars with data.')
                    m_offsets = m_offsets.reshape([len(m_offsets), 1])
                    # Then create a new array from the non-masked data:
                    offsets = u.unyt_array(m_offsets[~m_offsets.mask],
                                           units=u.m / u.s)
                    vprint(f'Median of offsets is {np.nanmedian(offsets)}')

                    #                m_stds = ma.masked_invalid(star_transition_offsets_stds[
                    #                            eras[time], :, col])
                    #                m_stds = m_stds.reshape([len(m_stds), 1])
                    #                stds = u.unyt_array(m_stds[~m_stds.mask],
                    #                                    units=u.m/u.s)

                    m_eotwms = ma.masked_invalid(
                        star_transition_offsets_EotWM[eras[time], :, col])
                    m_eotwms = m_eotwms.reshape([len(m_eotwms), 1])
                    eotwms = u.unyt_array(m_eotwms[~m_offsets.mask],
                                          units=u.m / u.s)

                    m_eotms = ma.masked_invalid(
                        star_transition_offsets_EotM[eras[time], :, col])
                    m_eotms = m_eotms.reshape([len(m_eotms), 1])
                    # Use the same mask as for the offsets.
                    eotms = u.unyt_array(m_eotms[~m_offsets.mask],
                                         units=u.m / u.s)
                    # Create an error array which uses the greater of the error
                    # on the mean or the error on the weighted mean.
                    err_array = ma.array(np.maximum(eotwms, eotms).value)

                    vprint(f'Mean is {np.mean(offsets)}')
                    weighted_mean = np.average(offsets, weights=err_array**-2)
                    vprint(f'Weighted mean is {weighted_mean}')

                    # Mask the various stellar parameter arrays with the same
                    # mask so that everything stays in sync.
                    temperatures = ma.masked_array(star_temperatures)
                    temps = temperatures[~m_offsets.mask]
                    metallicities = ma.masked_array(star_metallicities)
                    metals = metallicities[~m_offsets.mask]
                    # magnitudes = ma.masked_array(star_magnitudes)
                    # mags = magnitudes[~m_offsets.mask]
                    gravities = ma.masked_array(star_gravities)
                    loggs = gravities[~m_offsets.mask]

                    stars = ma.masked_array([key for key in star_names.keys()
                                             ]).reshape(
                                                 len(star_names.keys()), 1)
                    names = stars[~m_offsets.mask]

                    # Stack the stellar parameters into vertical slices
                    # for passing to model functions.
                    x_data = ma.array(np.stack((temps, metals, loggs), axis=0))

                    # Create the parameter list for this run of fitting.
                    params_list[0] = float(mean)

                    beta0 = tuple(params_list)
                    vprint(beta0)

                    results = fit.find_sys_scatter(model_func,
                                                   x_data,
                                                   ma.array(offsets.value),
                                                   err_array,
                                                   beta0,
                                                   n_sigma=args.sigma,
                                                   tolerance=0.001,
                                                   verbose=args.verbose)

                    mask = results['mask_list'][-1]
                    residuals = ma.array(results['residuals'], mask=mask)
                    x_data.mask = mask
                    err_array.mask = mask

                    # for item1, item2 in zip(residuals, ma.getdata(residuals)):
                    #     print(f'{item1:10.3f}   {item2:10.3f}')

                    chi_squared_nu = results['chi_squared_list'][-1]
                    sys_err = results['sys_err_list'][-1] * u.m / u.s

                    vprint(f'Terminated with sys_err = {sys_err}')
                    vprint(f'Finished {label}_{time} in'
                           f' {len(results["sys_err_list"])} steps.')
                    # Add the optimized parameters and covariances to the
                    # dictionary. Make sure we separate them by time period.
                    coefficients_dict[label + '_' + time] = results['popt']
                    covariance_dict[label + '_' + time] = results['pcov']

                    sigma = np.nanstd(residuals) * u.m / u.s

                    sigmas_dict[label + '_' + time] = sigma
                    sigma_sys_dict[label + '_' + time] = sys_err

                    if time == 'pre':
                        chi_squareds_pre.append(chi_squared_nu)
                        sigmas_pre.append(sigma.value)
                        sigma_sys_pre.append(sys_err.value)
                    else:
                        chi_squareds_post.append(chi_squared_nu)
                        sigmas_post.append(sigma.value)
                        sigma_sys_post.append(sys_err.value)

                    for plot_type, lims in zip(
                        ('temp', 'mtl', 'logg'),
                        (temp_lims, mtl_lims, logg_lims)):
                        ax = axes_dict[f'{plot_type}_{time}']
                        plot_data_points(
                            ax,
                            ma.compressed(x_data[param_dict[plot_type]]),
                            ma.compressed(residuals),
                            thick_err=ma.compressed(err_array),
                            # thin_err=iter_err_array,
                            thin_err=None,
                            era=time)
                        if args.label_outliers:
                            # Find outliers more than 3 sigma away from zero so
                            # we can label them.
                            labels = []
                            for x, y, e in zip(
                                    range(len(x_data[param_dict[plot_type]])),
                                    residuals, err_array):
                                sig_lim = args.sigma * e
                                if abs(y) > sig_lim:
                                    star_name = find_star(
                                        x_data[:, x], x_data, names)

                                    labels.append(
                                        ax.text(x_data[param_dict[plot_type],
                                                       x],
                                                y,
                                                star_name,
                                                horizontalalignment='left',
                                                verticalalignment='top',
                                                size=8,
                                                weight='bold',
                                                color='Red'))
                            # print(labels)
                            adjust_text(labels,
                                        ax=ax,
                                        only_move={
                                            'points': 'y',
                                            'text': 'xy',
                                            'objects': 'xy'
                                        },
                                        arrowprops=dict(arrowstyle='-',
                                                        color='OliveDrab'),
                                        autoalign=True,
                                        lim=1000,
                                        fontsize=9)

                        points = residuals.count()
                        outliers = total_stars - points
                        ax.annotate(
                            f'Blendedness: {transition.blendedness}\n'
                            f'#Stars: {points}\n'
                            f'#Outliers: {outliers}', (0.01, 0.99),
                            xycoords='axes fraction',
                            verticalalignment='top')
                        ax.annotate(
                            fr'$\chi^2_\nu$: {chi_squared_nu:.4f}'
                            '\n'
                            fr'$\sigma$: {sigma:.2f}'
                            '\n'
                            r'$\sigma_\mathrm{sys}$:'
                            f' {sys_err:.2f}', (0.99, 0.99),
                            xycoords='axes fraction',
                            horizontalalignment='right',
                            verticalalignment='top')
                        data = np.array(
                            ma.masked_invalid(residuals).compressed())
                        axes_dict[f'hist_{time}'].hist(
                            data,
                            bins='fd',
                            color='Black',
                            histtype='step',
                            orientation='horizontal')

                file_name = plots_folder / f'{label}_{model_name}.png'
                vprint(f'Saving file {label}.png')
                vprint('\n')

                comp_fig.savefig(str(file_name))
                plt.close('all')

    elif args.pairs:
        tqdm.write('Creating plots for each pair...')
        for pair in tqdm(pairs_list):
            for order_num in pair.ordersToMeasureIn:
                index_nums.append(index_num)
                index_num += 1
                label = '_'.join([pair.label, str(order_num)])
                vprint(20 * '-')
                vprint(f'Analyzing {label}...')

                # The column number to use for this transition:
                col = pair_column_dict[label]
                ylimits = (-300 * u.m / u.s,
                           300 * u.m / u.s) if not args.full_range else None

                comp_fig, axes_dict = create_comparison_figure(
                    ylims=ylimits,
                    fit_target='pairs',
                    temp_lims=temp_lims,
                    mtl_lims=mtl_lims,
                    logg_lims=logg_lims)

                for time in eras.keys():

                    vprint(20 * '=')
                    vprint(f'Working on {time}-change era.')
                    mean = np.nanmean(star_pair_separations[eras[time], :,
                                                            col])

                    # First, create a masked version to catch any missing
                    # entries:
                    m_seps = ma.masked_invalid(
                        star_pair_separations[eras[time], :, col])
                    total_stars = ma.count(m_seps)
                    vprint(f'Found {total_stars} stars with data.')
                    m_seps = m_seps.reshape([len(m_seps), 1])
                    # Then create a new array from the non-masked data:
                    separations = u.unyt_array(m_seps[~m_seps.mask],
                                               units=u.m / u.s)
                    vprint('Median of separations is'
                           f' {np.nanmedian(separations)}')

                    m_eotwms = ma.masked_invalid(
                        star_pair_separations_EotWM[eras[time], :, col])
                    m_eotwms = m_eotwms.reshape([len(m_eotwms), 1])
                    eotwms = u.unyt_array(m_eotwms[~m_seps.mask],
                                          units=u.m / u.s)

                    m_eotms = ma.masked_invalid(
                        star_pair_separations_EotM[eras[time], :, col])
                    m_eotms = m_eotms.reshape([len(m_eotms), 1])
                    # Use the same mask as for the offsets.
                    eotms = u.unyt_array(m_eotms[~m_seps.mask],
                                         units=u.m / u.s)
                    # Create an error array which uses the greater of the error
                    # on the mean or the error on the weighted mean.
                    err_array = ma.array(np.maximum(eotwms, eotms).value)

                    vprint(f'Mean is {np.mean(separations)}')
                    weighted_mean = np.average(separations,
                                               weights=err_array**-2)
                    vprint(f'Weighted mean is {weighted_mean}')

                    # Mask the various stellar parameter arrays with the same
                    # mask so that everything stays in sync.
                    temperatures = ma.masked_array(star_temperatures)
                    temps = temperatures[~m_seps.mask]
                    metallicities = ma.masked_array(star_metallicities)
                    metals = metallicities[~m_seps.mask]
                    gravities = ma.masked_array(star_gravities)
                    loggs = gravities[~m_seps.mask]

                    stars = ma.masked_array([key for key in star_names.keys()
                                             ]).reshape(
                                                 len(star_names.keys()), 1)
                    names = stars[~m_seps.mask]

                    # Stack the stellar parameters into vertical slices
                    # for passing to model functions.
                    x_data = ma.array(np.stack((temps, metals, loggs), axis=0))

                    # Create the parameter list for this run of fitting.
                    params_list[0] = float(mean)

                    beta0 = tuple(params_list)
                    vprint(beta0)

                    results = fit.find_sys_scatter(model_func,
                                                   x_data,
                                                   ma.array(separations.value),
                                                   err_array,
                                                   beta0,
                                                   n_sigma=args.sigma,
                                                   tolerance=0.001,
                                                   verbose=args.verbose)

                    mask = results['mask_list'][-1]
                    residuals = ma.array(results['residuals'], mask=mask)
                    x_data.mask = mask
                    err_array.mask = mask

                    chi_squared_nu = results['chi_squared_list'][-1]
                    sys_err = results['sys_err_list'][-1] * u.m / u.s

                    vprint(f'Terminated with sys_err = {sys_err}')
                    vprint(f'Finished {label}_{time} in'
                           f' {len(results["sys_err_list"])} steps.')
                    # Add the optimized parameters and covariances to the
                    # dictionary. Make sure we separate them by time period.
                    coefficients_dict[label + '_' + time] = results['popt']
                    covariance_dict[label + '_' + time] = results['pcov']

                    sigma = np.nanstd(residuals) * u.m / u.s

                    sigmas_dict[label + '_' + time] = sigma
                    sigma_sys_dict[label + '_' + time] = sys_err

                    if time == 'pre':
                        chi_squareds_pre.append(chi_squared_nu)
                        sigmas_pre.append(sigma.value)
                        sigma_sys_pre.append(sys_err.value)
                    else:
                        chi_squareds_post.append(chi_squared_nu)
                        sigmas_post.append(sigma.value)
                        sigma_sys_post.append(sys_err.value)

                    for plot_type, lims in zip(
                        ('temp', 'mtl', 'logg'),
                        (temp_lims, mtl_lims, logg_lims)):
                        ax = axes_dict[f'{plot_type}_{time}']
                        plot_data_points(
                            ax,
                            ma.compressed(x_data[param_dict[plot_type]]),
                            ma.compressed(residuals),
                            thick_err=ma.compressed(err_array),
                            # thin_err=None,
                            thin_err=np.sqrt(
                                ma.compressed(err_array)**2 +
                                sys_err.value**2),
                            era=time)
                        if args.label_outliers:
                            # Find outliers more than 3 sigma away from zero so
                            # we can label them.
                            labels = []
                            for x, y, e in zip(
                                    range(len(x_data[param_dict[plot_type]])),
                                    residuals, err_array):
                                sig_lim = args.sigma * e
                                if abs(y) > sig_lim:
                                    star_name = find_star(
                                        x_data[:, x], x_data, names)

                                    labels.append(
                                        ax.text(x_data[param_dict[plot_type],
                                                       x],
                                                y,
                                                star_name,
                                                horizontalalignment='left',
                                                verticalalignment='top',
                                                size=8,
                                                weight='bold',
                                                color='Red'))
                            # print(labels)
                            adjust_text(labels,
                                        ax=ax,
                                        only_move={
                                            'points': 'y',
                                            'text': 'xy',
                                            'objects': 'xy'
                                        },
                                        arrowprops=dict(arrowstyle='-',
                                                        color='OliveDrab'),
                                        autoalign=True,
                                        lim=1000,
                                        fontsize=9)

                        points = residuals.count()
                        outliers = total_stars - points
                        ax.annotate(
                            f'Blend tuple: {pair.blendTuple}\n'
                            f'#Stars: {points}\n'
                            f'#Outliers: {outliers}', (0.01, 0.99),
                            xycoords='axes fraction',
                            verticalalignment='top')
                        ax.annotate(
                            fr'$\chi^2_\nu$: {chi_squared_nu:.4f}'
                            '\n'
                            fr'$\sigma$: {sigma:.2f}'
                            '\n'
                            r'$\sigma_\mathrm{sys}$:'
                            f' {sys_err:.2f}', (0.99, 0.99),
                            xycoords='axes fraction',
                            horizontalalignment='right',
                            verticalalignment='top')
                        data = np.array(
                            ma.masked_invalid(residuals).compressed())
                        axes_dict[f'hist_{time}'].hist(
                            data,
                            bins='fd',
                            color='Black',
                            histtype='step',
                            orientation='horizontal')

                file_name = plots_folder / f'{label}_{model_name}.png'
                vprint(f'Saving file {label}.png')
                vprint('\n')

                comp_fig.savefig(str(file_name))
                plt.close('all')

    # Save metadata from this run's fits to CSV:
    csv_file = plots_folder / f'{model_name}_{fit_target}_fit_results.csv'

    with open(csv_file, 'w', newline='') as f:
        datawriter = csv.writer(f)
        header = ('#index', 'chi_squared_pre', 'sigma_pre', 'sigma_sys_pre',
                  'chi_squared_post', 'sigma_post', 'sigma_sys_post')
        datawriter.writerow(header)
        for row in zip(index_nums, chi_squareds_pre, sigmas_pre, sigma_sys_pre,
                       chi_squareds_post, sigmas_post, sigma_sys_post):
            datawriter.writerow(row)

    # Save the function used and the parameters found for each transition/pair
    # to an HDF5 file for use in other scripts.
    output_dir = output_dir / 'fit_params'
    hdf5_file = output_dir /\
        f'{model_name}_{fit_target}_{args.sigma:.1f}sigma_params.hdf5'
    if not hdf5_file.parent.exists():
        os.mkdir(hdf5_file.parent)

    vprint(f'Writing HDF5 file with fit parameters at {hdf5_file}')
    if hdf5_file.exists():
        os.unlink(hdf5_file)
    with h5py.File(hdf5_file, mode='a') as f:
        f.attrs['type'] = 'A file containing a fitting function and the' +\
                          ' parameters for it for each transition or pair' +\
                          'in /coeffs_dict'
        hickle.dump(model_func, f, path='/fitting_function')
        hickle.dump(coefficients_dict, f, path='/coeffs_dict')
        hickle.dump(covariance_dict, f, path='/covariance_dict')
        hickle.dump(sigmas_dict, f, path='/sigmas_dict')
        hickle.dump(sigma_sys_dict, f, path='/sigma_sys_dict')
Exemple #24
0
    "recorded when the gas was last kicked by SNII, split by redshift")

snapshot_filenames = [
    f"{directory}/{snapshot}" for directory, snapshot in zip(
        arguments.directory_list, arguments.snapshot_list)
]

names = arguments.name_list
output_path = arguments.output_directory

plt.style.use(arguments.stylesheet_location)

data = [load(snapshot_filename) for snapshot_filename in snapshot_filenames]
number_of_bins = 256

SNII_v_kick_bins = unyt.unyt_array(np.logspace(-1, 4, number_of_bins),
                                   units="km/s")
log_SNII_v_kick_bin_width = np.log10(SNII_v_kick_bins[1].value) - np.log10(
    SNII_v_kick_bins[0].value)
SNII_v_kick_centres = 0.5 * (SNII_v_kick_bins[1:] + SNII_v_kick_bins[:-1])

# Begin plotting
fig, axes = plt.subplots(3, 1, sharex=True, sharey=True)
axes = axes.flat

ax_dict = {
    "$z < 1$": axes[0],
    "$1 < z < 3$": axes[1],
    "$z > 3$": axes[2],
}

for label, ax in ax_dict.items():
Exemple #25
0
for element in element_list:

    output_filename = "Gallazzi2021_Data_" + element + ".hdf5"
    if element == "OFe":
        correction = O_over_Fe_solar_Gr91 - O_over_Fe_solar_Asplund09

    if element == "MgFe":
        correction = Mg_over_Fe_solar_Gr91 - Mg_over_Fe_solar_Asplund09

    Z_median = (raw[:, 1] + correction) * unyt.dimensionless
    Z_lo = (raw[:, 2] + correction) * unyt.dimensionless
    Z_hi = (raw[:, 3] + correction) * unyt.dimensionless

    # Define the scatter as offset from the mean value
    y_scatter = unyt.unyt_array((Z_median - Z_lo, Z_hi - Z_median))

    # Meta-data
    comment = (
        "Data obtained assuming a Chabrier IMF and h=0.7. "
        f"h-corrected for SWIFT using cosmology: {cosmology.name}. "
        "The metallicity is expressed as [alpha/Fe]. Note that alpha does not stand for Oxygen. "
        "Gallazi et al. adopt a semi-empirical estimate of [alpha/Fe] building on the work of Gallazzi et al. (2006). "
        "For each galaxy they measure the index ratio Mgb/Fe. "
        "The error bars given the 16th and 84th percentile of the distribution. "
        f"This has been corrected to use Z_solar={solar_metallicity} (Asplund+ 2009)"
    )
    citation = "Gallazzi et al. (2021)"
    bibcode = "2021MNRAS.502...4457"
    name = "Stellar mass - [alpha/Fe] relation"
    plot_as = "line"
def test_parse_units_override_default():
    parser = UnitInputParser()
    desired = unyt.unyt_array([12, 24, 36, 48], 'inch')
    actual = parser.parse([12, 24, 36, 48], 'inch')
    assert_allclose_units(actual, desired)
def plot_photons(filename, emin, emax, fmin, fmax):
    """
    Create the actual plot.

    filename: file to work with
    emin: list of minimal nonzero energy of all snapshots
    emax: list of maximal energy of all snapshots
    fmin: list of minimal flux magnitude of all snapshots
    fmax: list of maximal flux magnitude of all snapshots
    """

    print("working on", filename)

    # Read in data first
    data = swiftsimio.load(filename)
    meta = data.metadata
    boxsize = meta.boxsize
    edgelen = min(boxsize[0], boxsize[1])

    xstar = data.stars.coordinates
    xpart = data.gas.coordinates
    dxp = xpart - xstar
    r = np.sqrt(np.sum(dxp**2, axis=1))

    time = meta.time
    r_expect = meta.time * meta.reduced_lightspeed

    use_const_emission_rates = bool(
        meta.parameters["GEARRT:use_const_emission_rates"])
    L = None
    if use_const_emission_rates:
        # read emission rate parameter as string
        emissionstr = meta.parameters[
            "GEARRT:star_emission_rates_LSol"].decode("utf-8")
        # clean string up
        if emissionstr.startswith("["):
            emissionstr = emissionstr[1:]
        if emissionstr.endswith("]"):
            emissionstr = emissionstr[:-1]

        # transform string values to floats with unyts
        emissions = emissionstr.split(",")
        emlist = []
        for er in emissions:
            emlist.append(float(er))
        const_emission_rates = unyt.unyt_array(emlist, unyt.L_Sun)
        L = const_emission_rates[group_index]

    if plot_anisotropy_estimate:
        ncols = 4
    else:
        ncols = 3
    fig = plt.figure(figsize=(5 * ncols, 5.5), dpi=200)

    nbins = 100
    r_bin_edges = np.linspace(0.5 * edgelen * 1e-2, 0.507 * edgelen, nbins + 1)
    r_bin_centres = 0.5 * (r_bin_edges[1:] + r_bin_edges[:-1])
    r_analytical_bin_edges = np.linspace(0.5 * edgelen * 1e-2, 0.507 * edgelen,
                                         nbins + 1)

    # --------------------------
    # Read in and process data
    # --------------------------

    energies = getattr(data.gas.photon_energies,
                       "group" + str(group_index + 1))
    Fx = getattr(data.gas.photon_fluxes, "Group" + str(group_index + 1) + "X")
    Fy = getattr(data.gas.photon_fluxes, "Group" + str(group_index + 1) + "Y")

    fmag = np.sqrt(Fx**2 + Fy**2)
    particle_count, _ = np.histogram(
        r,
        bins=r_analytical_bin_edges,
        range=(r_analytical_bin_edges[0], r_analytical_bin_edges[-1]),
    )
    L = L.to(energies.units / time.units)

    xlabel_units_str = boxsize.units.latex_representation()
    energy_units_str = energies.units.latex_representation()
    flux_units_str = Fx.units.latex_representation()

    # ------------------------
    # Plot photon energies
    # ------------------------
    ax1 = fig.add_subplot(1, ncols, 1)
    ax1.set_title("Particle Radiation Energies")
    ax1.set_ylabel("Photon Energy [$" + energy_units_str + "$]")

    # don't expect more than float precision
    emin_to_use = max(emin, 1e-5 * emax)

    if use_const_emission_rates:
        # plot entire expected solution
        rA, EA = analytical_energy_solution(L, time, r_analytical_bin_edges,
                                            r_expect)

        mask = particle_count > 0
        if mask.any():
            EA = EA[mask].to(energies.units)
            rA = rA[mask]
            pcount = particle_count[mask]

            # the particle bin counts will introduce noise.
            # So use a linear fit for the plot. I assume here
            # that the particle number per bin increases
            # proprtional to r, which should roughly be the
            # case for the underlying glass particle distribution.
            lin_res = stats.linregress(rA, pcount)

            ax1.plot(
                rA,
                EA / line(rA.v, lin_res.slope, lin_res.intercept),
                **lineplot_kwargs,
                linestyle="--",
                c="red",
                label="Analytical Solution",
            )

    else:
        # just plot where photon front should be
        ax1.plot(
            [r_expect, r_expect],
            [emin_to_use, emax * 1.1],
            label="expected photon front",
            color="red",
        )

    ax1.scatter(r, energies, **scatterplot_kwargs)
    energies_binned, _, _ = stats.binned_statistic(
        r,
        energies,
        statistic="mean",
        bins=r_bin_edges,
        range=(r_bin_edges[0], r_bin_edges[-1]),
    )
    ax1.plot(r_bin_centres,
             energies_binned,
             **lineplot_kwargs,
             label="Mean Radiation Energy")
    ax1.set_ylim(emin_to_use, emax * 1.1)

    # ------------------------------
    # Plot binned photon energies
    # ------------------------------
    ax2 = fig.add_subplot(1, ncols, 2)
    ax2.set_title("Total Radiation Energy in radial bins")
    ax2.set_ylabel("Total Photon Energy [$" + energy_units_str + "$]")

    energies_summed_bin, _, _ = stats.binned_statistic(
        r,
        energies,
        statistic="sum",
        bins=r_bin_edges,
        range=(r_bin_edges[0], r_bin_edges[-1]),
    )
    ax2.plot(
        r_bin_centres,
        energies_summed_bin,
        **lineplot_kwargs,
        label="Total Energy in Bin",
    )
    current_ylims = ax2.get_ylim()
    ax2.set_ylim(emin_to_use, current_ylims[1])

    if use_const_emission_rates:
        # plot entire expected solution
        # Note: you need to use the same bins as for the actual results
        rA, EA = analytical_integrated_energy_solution(L, time, r_bin_edges,
                                                       r_expect)

        ax2.plot(
            rA,
            EA.to(energies.units),
            **lineplot_kwargs,
            linestyle="--",
            c="red",
            label="Analytical Solution",
        )
    else:
        # just plot where photon front should be
        ax2.plot(
            [r_expect, r_expect],
            ax2.get_ylim(r),
            label="Expected Photon Front",
            color="red",
        )

    # ------------------------------
    # Plot photon fluxes
    # ------------------------------
    ax3 = fig.add_subplot(1, ncols, 3)
    ax3.set_title("Particle Radiation Flux Magnitudes")
    ax3.set_ylabel("Photon Flux Magnitude [$" + flux_units_str + "$]")

    fmin_to_use = max(fmin, 1e-5 * fmax)
    ax3.set_ylim(fmin_to_use, fmax * 1.1)

    ax3.scatter(r, fmag, **scatterplot_kwargs)

    fmag_mean_bin, _, _ = stats.binned_statistic(
        r,
        fmag,
        statistic="mean",
        bins=r_bin_edges,
        range=(r_bin_edges[0], r_bin_edges[-1]),
    )
    ax3.plot(
        r_bin_centres,
        fmag_mean_bin,
        **lineplot_kwargs,
        label="Mean Radiation Flux of particles",
    )

    if use_const_emission_rates:
        # plot entire expected solution
        rA, FA = analytical_flux_magnitude_solution(L, time,
                                                    r_analytical_bin_edges,
                                                    r_expect)

        mask = particle_count > 0
        if mask.any():
            FA = FA[mask].to(Fx.units)
            rA = rA[mask]
            pcount = particle_count[mask]

            # the particle bin counts will introduce noise.
            # So use a linear fit for the plot. I assume here
            # that the particle number per bin increases
            # proprtional to r, which should roughly be the
            # case for the underlying glass particle distribution.
            lin_res = stats.linregress(rA, pcount)

            ax3.plot(
                rA,
                FA / line(rA.v, lin_res.slope, lin_res.intercept),
                **lineplot_kwargs,
                linestyle="--",
                c="red",
                label="analytical solution",
            )

    else:
        # just plot where photon front should be
        ax1.plot(
            [r_expect, r_expect],
            [emin_to_use, emax * 1.1],
            label="expected photon front",
            color="red",
        )

    # ------------------------------
    # Plot photon flux sum
    # ------------------------------

    if plot_anisotropy_estimate:

        ax4 = fig.add_subplot(1, ncols, 4)
        ax4.set_title("Vectorial Sum of Radiation Flux in radial bins")
        ax4.set_ylabel("[1]")

        fmag_sum_bin, _, _ = stats.binned_statistic(
            r,
            fmag,
            statistic="sum",
            bins=r_bin_edges,
            range=(r_bin_edges[0], r_bin_edges[-1]),
        )
        mask_sum = fmag_sum_bin > 0
        fmag_max_bin, _, _ = stats.binned_statistic(
            r,
            fmag,
            statistic="max",
            bins=r_bin_edges,
            range=(r_bin_edges[0], r_bin_edges[-1]),
        )
        mask_max = fmag_max_bin > 0
        Fx_sum_bin, _, _ = stats.binned_statistic(
            r,
            Fx,
            statistic="sum",
            bins=r_bin_edges,
            range=(r_bin_edges[0], r_bin_edges[-1]),
        )
        Fy_sum_bin, _, _ = stats.binned_statistic(
            r,
            Fy,
            statistic="sum",
            bins=r_bin_edges,
            range=(r_bin_edges[0], r_bin_edges[-1]),
        )
        F_sum_bin = np.sqrt(Fx_sum_bin**2 + Fy_sum_bin**2)

        ax4.plot(
            r_bin_centres[mask_sum],
            F_sum_bin[mask_sum] / fmag_sum_bin[mask_sum],
            **lineplot_kwargs,
            label=
            "$\left| \sum_{i \in \mathrm{particles \ in \ bin}} \mathbf{F}_i \\right| $ / $\sum_{i \in \mathrm{particles \ in \ bin}} \left| \mathbf{F}_{i} \\right| $",
        )
        ax4.plot(
            r_bin_centres[mask_max],
            F_sum_bin[mask_max] / fmag_max_bin[mask_max],
            **lineplot_kwargs,
            linestyle="--",
            label=
            "$\left| \sum_{i \in \mathrm{particles \ in \ bin}} \mathbf{F}_i \\right| $ / $\max_{i \in \mathrm{particles \ in \ bin}} \left| \mathbf{F}_{i} \\right| $",
        )

    # -------------------------------------------
    # Cosmetics that all axes have in common
    # -------------------------------------------
    for ax in fig.axes:
        ax.set_xlabel("r [$" + xlabel_units_str + "$]")
        ax.set_yscale("log")
        ax.set_xlim(0.0, 0.501 * edgelen)
        ax.legend(fontsize="x-small")

    # Add title
    title = filename.replace("_",
                             "\_")  # exception handle underscore for latex
    if meta.cosmology is not None:
        title += ", $z$ = {0:.2e}".format(meta.z)
    title += ", $t$ = {0:.2e}".format(meta.time)
    fig.suptitle(title)

    plt.tight_layout()
    figname = filename[:-5]
    figname += "-PhotonPropagation.png"
    plt.savefig(figname)
    plt.close()
    gc.collect()

    return
def test_parse_units_with_default():
    actual = process_unit_input([1, 2, 3, 4], 'ft')
    desired = unyt.unyt_array([1, 2, 3, 4], 'ft')
    assert_allclose_units(actual, desired)
glass.close()

# replace the particle closest to the center
# by the star
r = np.sqrt(np.sum((0.5 - xp) ** 2, axis=1))
rmin = np.argmin(r)
xs = xp[rmin]
xp = np.delete(xp, rmin, axis=0)
h = np.delete(h, rmin)


unitL = unyt.cm
t_end = 1e-3 * unyt.s
edgelen = unyt.c.to("cm/s") * t_end * 2.0
edgelen = edgelen.to(unitL)
boxsize = unyt.unyt_array([edgelen.v, edgelen.v, 0.0], unitL)

xs = unyt.unyt_array(
    [np.array([xs[0] * edgelen, xs[1] * edgelen, 0.0 * edgelen])], unitL
)
xp *= edgelen
h *= edgelen


w = Writer(unit_system=unyt.unit_systems.cgs_unit_system, box_size=boxsize, dimension=2)

w.gas.coordinates = xp
w.stars.coordinates = xs
w.gas.velocities = np.zeros(xp.shape) * (unyt.cm / unyt.s)
w.stars.velocities = np.zeros(xs.shape) * (unyt.cm / unyt.s)
w.gas.masses = np.ones(xp.shape[0], dtype=np.float64) * 100 * unyt.g
# Cosmology
h_sim = cosmology.h

output_filename = "Reyes2011.hdf5"
output_directory = "../"

if not os.path.exists(output_directory):
    os.mkdir(output_directory)


# Fits taken from eq. 39
stellar_masses = np.linspace(9.0, 11.0, 128)
v_max = 2.142 + 0.278 * (stellar_masses - 10.102)

stellar_masses = (
    unyt.unyt_array(10 ** stellar_masses, units=unyt.Solar_Mass)
    * kroupa_to_chabrier_mass
)
v_max = unyt.unyt_array(10 ** v_max, units=unyt.km / unyt.s)

# Meta-data
comment = (
    "Best-fit (eq. 39) to the data of 189 local galaxies. "
    "No cosmology correction needed as variables provided as physical. "
    f"Converted Kroupa to Chabrier IMF using ratio {kroupa_to_chabrier_mass}."
)
citation = "Reyes et al. (2011) (Fit)"
bibcode = "2011MNRAS.417.2347R"
name = "Fit to the stellar mass - v_80 (tully-fisher) relation at z=0."
plot_as = "line"
redshift = 0.0