Esempio n. 1
0
def test_model_output():
    data = load.model_output(sn_file)
    data = load.model_output(nmbr_file)
    data = load.model_output(yields_file)
    data = load.model_output(masses_file_bin)
    data = load.model_output(sed_file)
    data = load.model_output(ion_file)
    data = load.model_output(hr_file, hr_type='TL')
    data = load.model_output(hr_file, hr_type='Tg')
    data = load.model_output(hr_file, hr_type='TTG')
    del data
Esempio n. 2
0
class TestSpectraCompiler(object):

    # Initialise model_output DataFrame return a smaller single dataframe
    # This reduces I/O readings
    data = model_output(f"{data_path}/spectra-bin-imf135_300.z002.dat")

    # Patch the model_output function
    @patch("hoki.data_compilers.np.loadtxt")
    @patch("hoki.data_compilers.isfile")
    def test_compiler(self, mock_isfile, mock_model_output):

        # Set the model_output to the DataFrame
        mock_model_output.return_value = self.data.to_numpy()
        mock_isfile.return_value = True

        spec = SpectraCompiler(f"{data_path}", f"{data_path}", "imf135_300")

        # Check if pkl file is created
        assert os.path.isfile(f"{data_path}/all_spectra-bin-imf135_300.npy")

        # Check output dataframe
        npt.assert_allclose(spec.output[3],
                            self.data.loc[:,
                                          slice("6.0", "11.0")].T.to_numpy(),
                            err_msg="Complied spectra is wrong.")

        # Remove created pickle
        os.remove(f"{data_path}/all_spectra-bin-imf135_300.npy")
Esempio n. 3
0
class TestLoadAllEmissivities(object):

    # Initialise model_output DataFrame
    # This reduces I/O readings
    data = load.model_output(f"{data_path}/ionizing-bin-imf135_300.z002.dat")

    # Patch the model_output function
    @patch("hoki.data_compilers.np.loadtxt")
    @patch("hoki.data_compilers.isfile")
    def test_compile_emissivities(self, mock_isfile, mock_model_output):

        # Set the model_output to the DataFrame
        mock_model_output.return_value = self.data.to_numpy()
        mock_isfile.return_value = True
        res = load.emissivities_all_z(f"{data_path}", "imf135_300")

        # Check if compiled file is created
        assert os.path.isfile(f"{data_path}/all_ionizing-bin-imf135_300.npy"),\
            "No compiled file is created."

        # Check output numpy array
        npt.assert_allclose(res[3],
                            self.data.drop(columns='log_age').to_numpy(),
                            err_msg="Loading of files has failed.")

    def test_load_pickled_file(self):

        res = load.emissivities_all_z(f"{data_path}", "imf135_300")

        # Check output numpy array
        npt.assert_allclose(res[3],
                            self.data.drop(columns='log_age').to_numpy(),
                            err_msg="Loading of compiled file has failed.")

        os.remove(f"{data_path}/all_ionizing-bin-imf135_300.npy")
Esempio n. 4
0
def packnload(file):
    """Load the data from a BPASS zipped file.

    Parameters
    ----------
    file : string
        A string pointing to a zipped version of BPASS data.

    Returns
    -------
    pandas DataFrame
        A pandas DataFrame containing the model data from the BPASS file,
        which is a *hoki* output.

    """
    gunzip(file + ".gz", file)
    out = load.model_output(file)
    os.remove(file)
    return out
Esempio n. 5
0
class TestLoadAllRates(object):

    # Setup files to load
    data = load.model_output(f"{data_path}/supernova-bin-imf135_300.zem5.dat")

    # Check if function loads rates
    @patch("hoki.load.model_output")
    def test_load_rates(self, mock_model_output):
        mock_model_output.return_value = self.data
        x = load.rates_all_z(f"{data_path}", "imf135_300"),\
            "The rates cannot be initialised."

    # Load rates
    with patch("hoki.load.model_output") as mock_model_output:
        mock_model_output.return_value = data
        x = load.rates_all_z(f"{data_path}", "imf135_300")

    # Test wrong inputs
    def test_file_not_present(self):
        with pytest.raises(AssertionError):
            _ = load.rates_all_z(f"{data_path}", "imf135_300"),\
                "The file is not present, but the load function runs."

    def test_wrong_imf(self):
        with pytest.raises(HokiKeyError):
            _ = load.rates_all_z(f"{data_path}", "i"),\
                "An unsupported IMF is taken as an input."

    # Test output
    def test_output_shape(self):
        assert type(self.x) == pd.DataFrame
        assert (self.x.columns.get_level_values(0).unique() ==
                np.array(BPASS_EVENT_TYPES)).all(),\
            "wrong headers read from the file."
        assert (self.x.columns.get_level_values(1).unique() ==
                np.array(BPASS_NUM_METALLICITIES)).all(),\
            "wrong metallicity header"

    def test_output(self):
        assert np.isclose(self.x.loc[:, ("Ia", 0.00001)],
                          self.data["Ia"]).all(),\
            "Models are not loaded correctly."
Esempio n. 6
0
class TestEmissivityCompiler(object):

    # Initialise model_output DataFrame return a smaller single dataframe
    # This reduces I/O readings
    data = model_output(f"{data_path}/ionizing-bin-imf135_300.z002.dat")

    # Patch the model_output function
    @patch("hoki.data_compilers.np.loadtxt")
    @patch("hoki.data_compilers.isfile")
    def test_compiler(self, mock_isfile, mock_model_output):

        # Set the model_output to the DataFrame
        mock_model_output.return_value = self.data.to_numpy()
        mock_isfile.return_value = True

        res = EmissivityCompiler(f"{data_path}", f"{data_path}", "imf135_300")

        assert os.path.isfile(f"{data_path}/all_ionizing-bin-imf135_300.npy")

        npt.assert_allclose(res.output[3],
                            self.data.drop(columns='log_age').to_numpy(),
                            err_msg="Compiled emissivities is wrong.")
        os.remove(f"{data_path}/all_ionizing-bin-imf135_300.npy")
Esempio n. 7
0
class TestLoadAllSpectra(object):

    # Initialise model_output DataFrame
    # This reduces I/O readings
    data = load.model_output(f"{data_path}/spectra-bin-imf135_300.z002.dat")

    # Patch the model_output function
    @patch("hoki.data_compilers.np.loadtxt")
    @patch("hoki.data_compilers.isfile")
    def test_compile_spectra(self, mock_isfile, mock_model_output):

        # Set the model_output to the DataFrame
        mock_model_output.return_value = self.data.to_numpy()
        mock_isfile.return_value = True
        spec = load.spectra_all_z(f"{data_path}", "imf135_300")

        # Check if compiled file is created
        assert os.path.isfile(f"{data_path}/all_spectra-bin-imf135_300.npy"),\
            "No compiled file is created."

        # Check output numpy array
        npt.assert_allclose(spec[3],
                            self.data.loc[:,
                                          slice("6.0", "11.0")].T.to_numpy(),
                            err_msg="Loading of files has failed.")

    def test_load_pickled_file(self):

        spec = load.spectra_all_z(f"{data_path}", "imf135_300")

        # Check output numpy array
        npt.assert_allclose(spec[3],
                            self.data.loc[:,
                                          slice("6.0", "11.0")].T.to_numpy(),
                            err_msg="Loading of compiled file has failed.")

        os.remove(f"{data_path}/all_spectra-bin-imf135_300.npy")
Esempio n. 8
0
    def __init__(self, obs_df, model):
        """
        Initialisation of the AgeWizard object

        Parameters
        ----------
        obs_df: pandas.DataFrame
            Observational data. MUST contain a logT and logL column (for HRD comparison) or a col and mag column
            (for CMD comparison)
        model: str or hoki.hrdiagrams.HRDiagrams() hoki.cmd.CMD()
            Location of the modeled HRD or CMD. This can be an already instanciated HRDiagram or CMD() object, or a
            path to an HR Diagram file or a pickled CMD.
        """

        # Making sure the osbervational properties are given in a format we can use.
        if not isinstance(obs_df, pd.DataFrame):
            raise HokiFormatError(
                "Observations should be stored in a Data Frame")

        if 'name' not in obs_df.columns:
            warnings.warn(
                "We expect the name of sources to be given in the 'name' column. "
                "If I can't find names I'll make my own ;)", HokiFormatWarning)

        # Checking what format they giving for the model:
        if isinstance(model, hoki.hrdiagrams.HRDiagram):
            self.model = model
        elif isinstance(model, hoki.cmd.CMD):
            self.model = model
        elif isinstance(model, str) and 'hrs' in model:
            self.model = load.model_output(model, hr_type='TL')
        elif isinstance(model, str):
            try:
                self.model = load.unpickle(path=model)
            except AssertionError:
                print('-----------------')
                print(
                    'HOKI DEBUGGER:\nThe model param should be a path to \na BPASS HRDiagram output file or pickled CMD,'
                    'or\na hoki.hrdiagrams.HRDiagram or a hoki.cmd.CMD')
                print('-----------------')
                raise HokiFatalError('model is ' + str(type(model)))

        else:
            print('-----------------')
            print(
                'HOKI DEBUGGER:\nThe model param should be a path to \na BPASS HRDiagram output file or pickled CMD,'
                'or\na hoki.hrdiagrams.HRDiagram or a hoki.cmd.CMD')
            print('-----------------')
            raise HokiFatalError('model is ' + str(type(model)))

        self.obs_df = obs_df
        self.coordinates = find_coordinates(self.obs_df, self.model)

        # This line is obsolete but might need revival if we ever want to add the not normalised distributions again
        # self._distributions = calculate_distributions_normalised(self.obs_df, self.model)

        self.pdfs = calculate_individual_pdfs(self.obs_df,
                                              self.model).fillna(0)
        self.sources = self.pdfs.columns.to_list()
        self.sample_pdf = None
        self._most_likely_age = None
Esempio n. 9
0
import hoki.age_utils as au
import hoki.load as load
import pkg_resources
import numpy as np
import pandas as pd
import pytest
from hoki.utils.exceptions import HokiFatalError, HokiUserWarning, HokiFormatError

# Loading Data

data_path = pkg_resources.resource_filename('hoki', 'data')
hr_file = data_path + '/hrs-sin-imf_chab100.zem4.dat'
cmd_file = data_path + '/cmd_bv_z002_bin_imf135_300'
myhrd = load.model_output(hr_file, hr_type='TL')
mycmd = load.unpickle(cmd_file)
# Creating Test Inputs

fake_hrd_input = pd.DataFrame.from_dict({
    'name': ['star1', 'star2', 'star3'],
    'logT': np.array([4.58, 4.48, 4.14]),
    'logL': np.array([4.83, 5.07, 5.40])
})

bad_hrd_input = pd.DataFrame.from_dict({
    'logT': np.array(['bla']),
    'logL': np.array([4.83])
})

no_name_input = pd.DataFrame.from_dict({
    'logT': np.array([4.58, 4.48, 4.14]),
    'logL': np.array([4.83, 5.07, 5.40])
Esempio n. 10
0
from hoki import load
import pandas as pd
import matplotlib.pyplot as plt
import os
import time

start_time = time.time()

# specify which BPASS file to use
# BPASS_file = 'spectra-bin-imf135_300.z020.dat'
BPASS_file = 'spectra-bin-imf135_100.z002.dat'

count = 0

# Load the BPASS data
BPASS_data = load.model_output(BPASS_file)

# Convert the BPASS data to microns.  The flux is in luminosity per angstrom, so it is converted by multiplying
# by angstroms/micron (10^4).  The wavelength is in angstrom so it needs to be converted by multiplying by
# microns/angstrom (10^-4), but this is easiest done by converting the whole data set at once then correcting
# the wavelength conversion by another factor of 10^-4.
# BPASS_data *= 10**4
# BPASS_data.WL *= 10**-4

time_list = BPASS_data.columns[BPASS_data.columns != 'WL']

time_list_exp = np.power(10, time_list.astype(float))

delta_t = np.zeros_like(time_list_exp)

for i in range(len(delta_t)):
Esempio n. 11
0
class TestCSPEventRate():

    data = model_output(f"{data_path}/supernova-bin-imf135_300.zem5.dat")

    # Check initalisation
    @patch("hoki.load.model_output")
    def test_init(self, mock_model_output):
        mock_model_output.return_value = self.data
        _ = er.CSPEventRate(f"{data_path}", "imf135_300")

    def test_input_functions_at_time(self):
        assert np.isclose(
            self.CSP.at_time([sfh_fnc], [Z_fnc], ["Ia"], 0,
                             sample_rate=-1)[0]["Ia"],
            0.002034966495416449), "Correct input is not taken."

        assert np.isclose(
            self.CSP.at_time(sfh_fnc, Z_fnc, ["Ia"], 0,
                             sample_rate=-1)[0]["Ia"],
            0.002034966495416449), "Correct input is not taken."
        assert np.isclose(
            self.CSP.at_time(sfh, Z_fnc, ["Ia"], 0, sample_rate=-1)[0]["Ia"],
            0.002034966495416449), "Correct input is not taken."

        assert np.isclose(
            self.CSP.at_time([sfh], Z_fnc, ["Ia"], 0)[0]["Ia"],
            0.0018987009588956765), "Correct input is not taken."
        assert np.isclose(
            self.CSP.at_time([sfh, sfh], [Z_fnc, Z_fnc], ["Ia"], 0)[0]["Ia"],
            0.0018987009588956765), "Correct input is not taken."

    def test_event_rate_wrong_input(self):
        with pytest.raises(HokiFormatError):
            _ = self.CSP.over_time([sfh_fnc], [Z_fnc, Z_fnc], ["Ia"], 100)
        with pytest.raises(ValueError):
            _ = self.CSP.over_time([sfh_fnc], [Z_fnc], ["B"], 100)
        with pytest.raises(HokiFormatError):
            _ = self.CSP.over_time([sfh_fnc, sfh_fnc], [Z_fnc], ["B"], 100)

    @patch("hoki.load.model_output")
    def test_input_over_time(self, mock_model_output):
        # Load model_output with a single supernova rate file
        mock_model_output.return_value = self.data
        CSP = er.CSPEventRate(f"{data_path}", "imf135_300")

        test_out, time_edges = CSP.over_time([sfh_fnc], [Z_fnc], ["Ia"],
                                             100,
                                             return_time_edges=True)

    # Load model_output with a single supernova rate file
    with patch("hoki.load.model_output") as mock_model_output:
        mock_model_output.return_value = data
        CSP = er.CSPEventRate(f"{data_path}", "imf135_300")

        test_out, time_edges = CSP.over_time([sfh_fnc], [Z_fnc], ["Ia"],
                                             100,
                                             return_time_edges=True)

    def test_bins(self):
        assert np.isclose(self.time_edges,  np.linspace(0, HOKI_NOW, 101)).all(),\
            "time edges are not properly set."

    def test_event_rate_calculation(self):
        expected = np.loadtxt(f"{data_path}/csp_test_data/type_Ia_rates.txt")
        assert np.isclose(self.test_out["Ia"], expected).all(),\
            "The event rate calculation is wrong."

    def test_event_rate_calculation_multi_type(self):
        out = self.CSP.over_time([sfh_fnc], [Z_fnc], ["Ia", "II"], 100)
        assert len(out) == 1, "The output of calculate_over_time is wrong."
        assert len(out[0]) == 2, "The output of calculate_over_time is wrong."

    def test_event_rate_calculation_multi(self):
        out = self.CSP.over_time([sfh_fnc, sfh_fnc], [Z_fnc, Z_fnc], ["Ia"],
                                 100)
        assert len(out) == 2, "The output ofcalculate_over_time is wrong."
        assert len(out[0]) == 1, "The output of calculate_over_time is wrong."

    def test_event_rate_at_time(self):
        x = self.CSP.at_time([sfh_fnc], [Z_fnc], ["Ia"], 0)
        assert np.isclose(x[0]["Ia"], 0.0018987009588956765),\
            "The output of CSP.at_time is wrong."

    def test_vector_input(self):
        assert np.isclose(
            self.CSP.at_time([vec_sfh], [vec_Z], ["Ia"], 0,
                             sample_rate=-1)[0]["Ia"],
            0.002034966495416449), "Correct input is not taken."

    def test_full_grid_over_time(self):

        # Build mock 2D grid
        nr_time_points = len(time_points)
        SFH = np.zeros((1, 13, nr_time_points), dtype=np.float64)
        bpass_Z_index = np.array([
            np.argmin(np.abs(Z_fnc(i) - BPASS_NUM_METALLICITIES))
            for i in time_points
        ])
        SFH[0, bpass_Z_index, range(nr_time_points)] += np.array(
            [sfh_fnc(i) for i in time_points])

        out = self.CSP.grid_over_time(SFH, time_points, ["Ia", "IIP"], 100)
        assert out.shape == (1, 13, 2, 100), "Output shape is wrong"
        assert np.allclose(out[0][:, 0].sum(axis=0),
                           self.test_out["Ia"]), "Not the same as over_time"

    def test_full_grid_at_time(self):

        # Build mock 2D grid
        nr_time_points = len(time_points)
        SFH = np.zeros((1, 13, nr_time_points), dtype=np.float64)
        bpass_Z_index = np.array([
            np.argmin(np.abs(Z_fnc(i) - BPASS_NUM_METALLICITIES))
            for i in time_points
        ])
        SFH[0, bpass_Z_index, range(nr_time_points)] += np.array(
            [sfh_fnc(i) for i in time_points])

        out = self.CSP.grid_at_time(SFH, time_points, ["Ia", "IIP"], 0)
        assert out.shape == (1, 13, 2), "Output shape is wrong"
        assert np.isclose(out[0][:, 0].sum(axis=0),
                          0.0018987009588956765), "Not the same as over_time"
Esempio n. 12
0
from hoki import load
import numpy as np
from glob import glob
from h5py_utils import write_data_h5py

# model_dir = 'BPASSv2.2.1_sin-imf_chab300'
# fname = 'bpass_sin.h5'
model_dir = 'BPASSv2.2.1_bin-imf_chab100'
fname = 'bpass.h5'

models = glob(model_dir + '/*')

output_temp = load.model_output(models[0])

ages = np.array([float(a) for a in output_temp.columns[1:]])
age_mask = (10**ages / 1e9) < 18  # Gyr
ages = ages[age_mask]

wl = output_temp['WL'].values
metallicities = np.array([None] * len(models))

spec = np.zeros((len(metallicities), len(ages), len(wl)))

for i, mod in enumerate(models):
    try:
        metallicities[i] = float('0.' + mod[-7:-4])
    except:  # ...catch em# format
        metallicities[i] = 10**-float(mod[-5])

# sort by increasing metallicity
Z_idx = np.argsort(metallicities)
Esempio n. 13
0
def generate_ssp_table_bpass(
        ssp_lookup_file,
        Zsol=Solar['total'],
        return_table=False,
        model_dir='/home/rad/caesar/BPASSv2.2.1_bin-imf_chab100'):
    '''
        Generates an SPS lookup table from BPASS.
        '''
    from hoki import load
    from glob import glob

    mylog.info('Generating BPASS SSP lookup table %s' % (ssp_lookup_file))
    mylog.info('Using BPASS files in: %s' % (model_dir))

    specfiles = glob(model_dir + '/spectra*')  # these must be gunzipped
    smfiles = glob(model_dir + '/starmass*')  # these must be gunzipped
    output_temp = load.model_output(specfiles[0])
    #output_temp = output_temp[(output_temp.WL>LAMBDA_LO)&(output_temp.WL<LAMBDA_HI)]  # restrict wavelength range for faster calculations
    #print(specfiles[0],output_temp)

    ages = np.array([float(a) for a in output_temp.columns[1:]])
    age_mask = (10**ages / 1e9) < 18  # Gyr
    ages = ages[age_mask]

    wavelengths = output_temp['WL'].values
    metallicities = np.array([None] * len(specfiles))

    for i, mod in enumerate(specfiles):  # parse metallicities from filenames
        try:
            metallicities[i] = float('0.' + mod[-7:-4])
        except:  # ...handle em5=1e-5 and em4=1e-4 cases
            metallicities[i] = 10**-float(mod[-5])

    # sort by increasing metallicity
    Z_idx = np.argsort(metallicities)
    metallicities = metallicities[Z_idx].astype(float)

    ssp_spectra = np.zeros((len(ages) * len(metallicities), len(wavelengths)))
    for iZ, mod in enumerate(np.array(specfiles)[Z_idx]):
        output = load.model_output(mod)
        #output = output[(output.WL>LAMBDA_LO)&(output.WL<LAMBDA_HI)]  # restrict wavelength range for faster calculations
        for iage, a in enumerate(ages):
            j = iZ * len(ages) + iage
            ssp_spectra[j] = output[str(a)].values
            ssp_spectra[
                j] *= wavelengths**2 / CLIGHT_AA  # convert from per AA to per Hz

    mass_remaining = []
    for i, mod in enumerate(np.array(smfiles)[Z_idx]):
        output = load.model_output(mod)
        mass_remaining.append(output['stellar_mass'].values)
    mass_remaining = np.asarray(mass_remaining).flatten() / 1.e6  # to Mo

    # convert units
    ssp_ages = ages  # log yr
    ssp_logZ = np.log10(metallicities)
    ssp_spectra /= 1e6  # to Msol
    #print(np.shape(mass_remaining),mass_remaining)

    with h5py.File(ssp_lookup_file, 'w') as hf:
        hf.create_dataset('fsps_options', data=model_dir)
        hf.create_dataset('ages', data=ssp_ages)
        hf.create_dataset('logZ', data=ssp_logZ)
        hf.create_dataset('mass_remaining', data=mass_remaining)
        hf.create_dataset('wavelengths', data=wavelengths)
        hf.create_dataset('spectra', data=ssp_spectra)
    memlog('Generated BPASS lookup table with %d ages and %d metallicities' %
           (len(ssp_ages), len(ssp_logZ)))

    if return_table:
        return ssp_ages, ssp_logZ, mass_remaining, wavelengths, ssp_spectra
Esempio n. 14
0
    def __init__(self, obs_df, model, nsamples=100):
        """
        Initialisation of the AgeWizard object

        Parameters
        ----------
        obs_df: pandas.DataFrame
            Observational data. MUST contain a logT and logL column (for HRD comparison) or a col and mag column
            (for CMD comparison)
        model: str or hoki.hrdiagrams.HRDiagrams() hoki.cmd.CMD()
            Location of the modeled HRD or CMD. This can be an already instanciated HRDiagram or CMD() object, or a
            path to an HR Diagram file or a pickled CMD.
        nsamples: int, optional
            Number of times each data point should be sampled from its error distribution. Default is 100.
            This only matters if you are taking errors into account.
        """

        print(f"{Dialogue.info()} AgeWizard Starting")
        print(f"{Dialogue.running()} Initial Checks")

        # Making sure the osbervational properties are given in a format we can use.
        if not isinstance(obs_df, pd.DataFrame):
            raise HokiFormatError(
                "Observations should be stored in a Data Frame")

        if 'name' not in obs_df.columns:
            warnings.warn(
                "We expect the name of sources to be given in the 'name' column. "
                "If I can't find names I'll make my own ;)", HokiFormatWarning)

        # Checking what format they giving for the model:
        if isinstance(model, hoki.hrdiagrams.HRDiagram):
            self.model = model
        elif isinstance(model, hoki.cmd.CMD):
            self.model = model
        elif isinstance(model, str) and 'hrs' in model:
            self.model = load.model_output(model, hr_type='TL')
        elif isinstance(model, str):
            try:
                self.model = load.unpickle(path=model)
            except AssertionError:
                print(f'{Dialogue.ORANGE}-----------------{Dialogue.ENDC}')
                print(
                    f'{Dialogue.debugger()}\nThe model param should be a path to \na BPASS HRDiagram output file or pickled CMD,'
                    'or\na hoki.hrdiagrams.HRDiagram or a hoki.cmd.CMD')
                print(f'{Dialogue.ORANGE}-----------------{Dialogue.ENDC}')
                raise HokiFatalError('model is ' + str(type(model)))

        else:
            print(f'{Dialogue.ORANGE}-----------------{Dialogue.ENDC}')
            print(
                f'{Dialogue.debugger()}\nThe model param should be a path to \na BPASS HRDiagram output file or pickled CMD,'
                'or\na hoki.hrdiagrams.HRDiagram or a hoki.cmd.CMD')
            print(f'{Dialogue.ORANGE}-----------------{Dialogue.ENDC}')
            raise HokiFatalError('model is ' + str(type(model)))

        print(f"{Dialogue.complete()} Initial Checks")

        self.obs_df = obs_df.copy()

        # not needed?
        # self.coordinates = find_coordinates(self.obs_df, self.model)

        # This line is obsolete but might need revival if we ever want to add the not normalised distributions again
        # self._distributions = calculate_distributions_normalised(self.obs_df, self.model)

        self.pdfs = au.calculate_individual_pdfs(self.obs_df,
                                                 self.model,
                                                 nsamples=nsamples).fillna(0)
        self.sources = self.pdfs.columns.to_list()
        self.sample_pdf = None
        self._most_likely_age = None
Esempio n. 15
0
# plt.title(r'scale for V band')
# plt.savefig('V band tau scaling.png', dpi = 200)
## ---------------------------------------------------------------------



# Plot L_Edd
# ------------------------------------------------------------------

a_min = 0.001
a_max = 1

BPASS_file = 'spectra-bin-imf100_300.z001.dat'
SM_file = BPASS_file.replace('spectra','starmass')

BPASS_data = load.model_output(BPASS_file)
BPASS_data.WL *= 10**-4

time_list = BPASS_data.columns[BPASS_data.columns != 'WL']

time_list_exp = np.power(10,time_list.astype(float))

wl_min = 0.001
wl_max = 10

BPASS_data = BPASS_data[ (BPASS_data.WL >= wl_min) & (BPASS_data.WL <= wl_max) ]
wl_list = BPASS_data.WL.to_numpy()

kappa_av_RP = np.zeros_like(time_list)
kappa_av_F = np.zeros_like(time_list)
kappa_RP = np.zeros_like(wl_list)