def kurucz_atomic_data(atomic_data_fname): atomic_data = AtomData.from_hdf5(atomic_data_fname) if atomic_data.md5 != '21095dd25faa1683f4c90c911a00c3f8': pytest.skip('Need default Kurucz atomic dataset ' '(md5="21095dd25faa1683f4c90c911a00c3f8"') else: return atomic_data
def __init__(self, config_fname, atom_data=None, log_dir='./logs/'): self._log_dir = log_dir self.set_logger('startup') self._config = ConfigurationNameSpace.from_yaml(config_fname) if atom_data is None: self._atom_data = AtomData.from_hdf5(self._config.atom_data) else: self._atom_data = atom_data
def __init__(self, *args, **kwargs): config_fname = args[0] atom_data = kwargs.pop('atom_data', None) self._log_dir = kwargs.pop('log_dir', './logs/') self.set_logger('startup') self._config = ConfigurationNameSpace.from_yaml(config_fname) if atom_data is None: self._atom_data = AtomData.from_hdf5(self._config.atom_data) else: self._atom_data = atom_data
def from_yaml(cls, fname, resume_fit=None): """ Reading the fitter configuration from a yaml file Parameters ---------- fname: ~str filename """ conf_dict = yaml.load(open(fname), OrderedDictYAMLLoader) default_config = ConfigurationNameSpace.from_yaml( conf_dict['tardis']['default_conf']) atom_data = AtomData.from_hdf5(conf_dict['tardis']['atom_data']) parameter_config = ParameterConfiguration.from_conf_dict(conf_dict['fitter']['parameters']) number_of_samples = conf_dict['fitter']['number_of_samples'] max_iterations = conf_dict['fitter']['max_iterations'] optimizer_dict = conf_dict['fitter'].pop('optimizer') optimizer_class = all_optimizer_dict[optimizer_dict.pop('name')] optimizer = optimizer_class(parameter_config, number_of_samples, **optimizer_dict) fitness_function_dict = conf_dict['fitter'].pop('fitness_function') fitness_function_class = all_fitness_function_dict[ fitness_function_dict.pop('name')] fitness_function = fitness_function_class(**fitness_function_dict) resume = conf_dict['fitter'].get('resume', resume_fit) fitter_log = conf_dict['fitter'].get('fitter_log', None) spectral_store_dict = conf_dict['fitter'].get('spectral_store', None) if spectral_store_dict is not None: spectral_store_fname = spectral_store_dict['fname'] spectral_store_mode = spectral_store_dict.get('mode', 'all') spectral_store_clobber = spectral_store_dict.get('clobber', False) spectral_store = SpectralStore(spectral_store_fname, mode=spectral_store_mode, resume=resume, clobber=spectral_store_clobber) else: spectral_store = None return cls(optimizer, fitness_function, parameter_config=parameter_config, default_config=default_config, atom_data=atom_data, number_of_samples=number_of_samples, max_iterations=max_iterations, fitter_log=fitter_log, spectral_store=spectral_store, resume=resume)
def setup(self, request, reference, data_path, atomic_data_fname): """ This method does initial setup of creating configuration and performing a single run of integration test. """ # The last component in dirpath can be extracted as name of setup. self.name = data_path['setup_name'] self.config_file = os.path.join(data_path['config_dirpath'], "config.yml") # Load atom data file separately, pass it for forming tardis config. self.atom_data = AtomData.from_hdf5(atomic_data_fname) # Check whether the atom data file in current run and the atom data # file used in obtaining the reference data are same. # TODO: hard coded UUID for kurucz atom data file, generalize it later. kurucz_data_file_uuid1 = "5ca3035ca8b311e3bb684437e69d75d7" assert self.atom_data.uuid1 == kurucz_data_file_uuid1 # Create a Configuration through yaml file and atom data. tardis_config = Configuration.from_yaml( self.config_file, atom_data=self.atom_data) # Check whether current run is with less packets. if request.config.getoption("--less-packets"): less_packets = request.config.integration_tests_config['less_packets'] tardis_config['montecarlo']['no_of_packets'] = ( less_packets['no_of_packets'] ) tardis_config['montecarlo']['last_no_of_packets'] = ( less_packets['last_no_of_packets'] ) # We now do a run with prepared config and get radial1d model. self.result = Radial1DModel(tardis_config) # If current test run is just for collecting reference data, store the # output model to HDF file, save it at specified path. Skip all tests. # Else simply perform the run and move further for performing # assertions. if request.config.getoption("--generate-reference"): run_radial1d(self.result, hdf_path_or_buf=os.path.join( data_path['gen_ref_dirpath'], "{0}.h5".format(self.name) )) pytest.skip("Reference data saved at {0}".format( data_path['gen_ref_dirpath'] )) else: run_radial1d(self.result) # Get the reference data through the fixture. self.reference = reference
def setup(self): """ This method does initial setup of creating configuration and performing a single run of integration test. """ self.config_file = data_path("config_w7.yml") self.abundances = data_path("abundancies_w7.dat") self.densities = data_path("densities_w7.dat") # First we check whether atom data file exists at desired path. self.atom_data_filename = os.path.expanduser(os.path.expandvars( pytest.config.getvalue('atomic-dataset'))) assert os.path.exists(self.atom_data_filename), \ "{0} atom data file does not exist".format(self.atom_data_filename) # The available config file doesn't have file paths of atom data file, # densities and abundances profile files as desired. We load the atom # data seperately and provide it to tardis_config later. For rest of # the two, we form dictionary from the config file and override those # parameters by putting file paths of these two files at proper places. config_yaml = yaml.load(open(self.config_file)) config_yaml['model']['abundances']['filename'] = self.abundances config_yaml['model']['structure']['filename'] = self.densities # Load atom data file separately, pass it for forming tardis config. self.atom_data = AtomData.from_hdf5(self.atom_data_filename) # Check whether the atom data file in current run and the atom data # file used in obtaining the baseline data for slow tests are same. # TODO: hard coded UUID for kurucz atom data file, generalize it later. kurucz_data_file_uuid1 = "5ca3035ca8b311e3bb684437e69d75d7" assert self.atom_data.uuid1 == kurucz_data_file_uuid1 # The config hence obtained will be having appropriate file paths. tardis_config = Configuration.from_config_dict(config_yaml, self.atom_data) # We now do a run with prepared config and get radial1d model. self.obtained_radial1d_model = Radial1DModel(tardis_config) simulation = Simulation(tardis_config) simulation.legacy_run_simulation(self.obtained_radial1d_model) # The baseline data against which assertions are to be made is ingested # from already available compressed binaries (.npz). These will return # dictionaries of numpy.ndarrays for performing assertions. self.slow_test_data_dir = os.path.join(os.path.expanduser( os.path.expandvars(pytest.config.getvalue('slow-test-data'))), "w7") self.expected_ndarrays = np.load(os.path.join(self.slow_test_data_dir, "ndarrays.npz")) self.expected_quantities = np.load(os.path.join(self.slow_test_data_dir, "quantities.npz"))
def run_tardis(config, atom_data=None): """ This function is one of the core functions to run TARDIS from a given config object. It will return a model object containing Parameters ---------- config: ~str or ~dict filename of configuration yaml file or dictionary atom_data: ~str or ~tardis.atomic.AtomData if atom_data is a string it is interpreted as a path to a file storing the atomic data. Atomic data to use for this TARDIS simulation. If set to None, the atomic data will be loaded according to keywords set in the configuration [default=None] """ from tardis.io.config_reader import Configuration from tardis.simulation import Simulation from tardis.atomic import AtomData if atom_data is not None: try: atom_data = AtomData.from_hdf5(atom_data) except TypeError: atom_data = atom_data try: tardis_config = Configuration.from_yaml(config) except TypeError: tardis_config = Configuration.from_config_dict(config) simulation = Simulation.from_config(tardis_config, atom_data=atom_data) simulation.run() return simulation
def atomic_data(): atomic_db_fname = os.path.join(tardis.__path__[0], 'tests', 'data', 'chianti_he_db.h5') return AtomData.from_hdf5(atomic_db_fname)
def atomic_data(selected_atoms): atomic_db_fname = os.path.join(tardis.__path__[0], 'tests', 'data', 'chianti_he_db.h5') atom_data = AtomData.from_hdf5(atomic_db_fname) atom_data.prepare_atom_data(selected_atoms) return atom_data
def setup(self, request, reference, data_path): """ This method does initial setup of creating configuration and performing a single run of integration test. """ # The last component in dirpath can be extracted as name of setup. self.name = data_path['setup_name'] self.config_file = os.path.join(data_path['config_dirpath'], "config.yml") # A quick hack to use atom data per setup. Atom data is ingested from # local HDF or downloaded and cached from a url, depending on data_path # keys. atom_data_name = yaml.load(open(self.config_file))['atom_data'] # Get the path to HDF file: if 'atom_data_url' in data_path: # If the atom data is to be ingested from url: atom_data_filepath = download_file(urlparse.urljoin( base=data_path['atom_data_url'], url=atom_data_name), cache=True ) else: # If the atom data is to be ingested from local file: atom_data_filepath = os.path.join( data_path['atom_data_dirpath'], atom_data_name ) # Load atom data file separately, pass it for forming tardis config. self.atom_data = AtomData.from_hdf5(atom_data_filepath) # Check whether the atom data file in current run and the atom data # file used in obtaining the reference data are same. # TODO: hard coded UUID for kurucz atom data file, generalize it later. # kurucz_data_file_uuid1 = "5ca3035ca8b311e3bb684437e69d75d7" # assert self.atom_data.uuid1 == kurucz_data_file_uuid1 # Create a Configuration through yaml file and atom data. tardis_config = Configuration.from_yaml( self.config_file, atom_data=self.atom_data) # Check whether current run is with less packets. if request.config.getoption("--less-packets"): less_packets = request.config.integration_tests_config['less_packets'] tardis_config['montecarlo']['no_of_packets'] = ( less_packets['no_of_packets'] ) tardis_config['montecarlo']['last_no_of_packets'] = ( less_packets['last_no_of_packets'] ) # We now do a run with prepared config and get radial1d model. self.result = Radial1DModel(tardis_config) # If current test run is just for collecting reference data, store the # output model to HDF file, save it at specified path. Skip all tests. # Else simply perform the run and move further for performing # assertions. if request.config.getoption("--generate-reference"): run_radial1d(self.result, hdf_path_or_buf=os.path.join( data_path['gen_ref_dirpath'], "{0}.h5".format(self.name) )) pytest.skip("Reference data saved at {0}".format( data_path['gen_ref_dirpath'] )) else: run_radial1d(self.result) # Get the reference data through the fixture. self.reference = reference
def included_he_atomic_data(test_data_path): atomic_db_fname = os.path.join(test_data_path, 'chianti_he_db.h5') return AtomData.from_hdf5(atomic_db_fname)
def setup(self, request, reference, data_path, pytestconfig): """ This method does initial setup of creating configuration and performing a single run of integration test. """ # Get capture manager capmanager = pytestconfig.pluginmanager.getplugin('capturemanager') # The last component in dirpath can be extracted as name of setup. self.name = data_path['setup_name'] self.config_file = os.path.join(data_path['config_dirpath'], "config.yml") # A quick hack to use atom data per setup. Atom data is ingested from # local HDF or downloaded and cached from a url, depending on data_path # keys. atom_data_name = yaml.load(open(self.config_file))['atom_data'] # Get the path to HDF file: atom_data_filepath = os.path.join( data_path['atom_data_path'], atom_data_name ) # Load atom data file separately, pass it for forming tardis config. self.atom_data = AtomData.from_hdf5(atom_data_filepath) # Check whether the atom data file in current run and the atom data # file used in obtaining the reference data are same. # TODO: hard coded UUID for kurucz atom data file, generalize it later. # kurucz_data_file_uuid1 = "5ca3035ca8b311e3bb684437e69d75d7" # assert self.atom_data.uuid1 == kurucz_data_file_uuid1 # Create a Configuration through yaml file and atom data. tardis_config = Configuration.from_yaml(self.config_file) # Check whether current run is with less packets. if request.config.getoption("--less-packets"): less_packets = request.config.integration_tests_config['less_packets'] tardis_config['montecarlo']['no_of_packets'] = ( less_packets['no_of_packets'] ) tardis_config['montecarlo']['last_no_of_packets'] = ( less_packets['last_no_of_packets'] ) # We now do a run with prepared config and get the simulation object. self.result = Simulation.from_config(tardis_config, atom_data=self.atom_data) capmanager.suspendcapture(True) # If current test run is just for collecting reference data, store the # output model to HDF file, save it at specified path. Skip all tests. # Else simply perform the run and move further for performing # assertions. self.result.run() if request.config.getoption("--generate-reference"): ref_data_path = os.path.join( data_path['gen_ref_path'], "{0}.h5".format(self.name) ) if os.path.exists(ref_data_path): pytest.skip( 'Reference data {0} does exist and tests will not ' 'proceed generating new data'.format(ref_data_path)) self.result.to_hdf(path_or_buf=ref_data_path, suffix_count=False) pytest.skip("Reference data saved at {0}".format( data_path['gen_ref_path'] )) capmanager.resumecapture() # Get the reference data through the fixture. self.reference = reference
def included_he_atomic_data(): import os, tardis atomic_db_fname = os.path.join(tardis.__path__[0], 'tests', 'data', 'chianti_he_db.h5') return AtomData.from_hdf5(atomic_db_fname)
import os import sys import argparse import numpy as np import pandas as pd from tardis.atomic import AtomData atomic_dataset = AtomData.from_hdf5() def get_atomic_number(element): index = -1 for atomic_no, row in atomic_dataset.atom_data.iterrows(): if element in row["name"]: index = atomic_no break return index def extract_file_block(f): qty = [] for line in f: items = line.split() if items: qty.extend(np.array(items).astype(np.float64)) else: break qty = np.array(qty)
with h5py.File(fpath, "w") as f: f["basic_atom_data"] = atom_data_h0 f["ionization_data"] = ionization_data_h0.to_records(index=False) f["lines_data"] = lines_cut.to_records(index=False) f["levels_data"] = levels_cut.to_records(index=False) f["macro_atom_data"] = macro_atom_data_cut.to_records(index=False) f["macro_atom_references"] = macro_atom_references_cut.to_records(index=False) f["synpp_refs"] = synpp_refs_h0.to_records(index=False) f["zeta_data"] = zeta_data_h0 f['zeta_data'].attrs['t_rad'] = np.arange(2000, 42000, 2000) f['zeta_data'].attrs['source'] = 'Used with kind permission from Knox Long' f.attrs['data_sources'] = data_sources f.attrs['database_version'] = 'v0.9' md5_hash = hashlib.md5() for dataset in f.values(): md5_hash.update(dataset.value.data) uuid1 = uuid.uuid1().hex f.attrs['md5'] = md5_hash.hexdigest() f.attrs['uuid1'] = uuid1 ad = AtomData.from_hdf5(fpath) print ad.atom_data print ad.ionization_data print ad.levels print ad.lines print ad.macro_atom_data_all print ad.macro_atom_references_all print ad.synpp_refs
import os import sys import argparse import numpy as np import pandas as pd from tardis.atomic import AtomData atomic_dataset = AtomData.from_hdf5() def get_atomic_number(element): index = -1 for atomic_no, row in atomic_dataset.atom_data.iterrows(): if element in row['name']: index = atomic_no break return index def extract_file_block(f): qty = [] for line in f: items = line.split() if items: qty.extend(np.array(items).astype(np.float64)) else: break