def setup(self): """ This method does initial setup of creating configuration and performing a single run of integration test. """ self.config_file = data_path("config_w7.yml") self.abundances = data_path("abundancies_w7.dat") self.densities = data_path("densities_w7.dat") # First we check whether atom data file exists at desired path. self.atom_data_filename = os.path.expanduser(os.path.expandvars( pytest.config.getvalue('atomic-dataset'))) assert os.path.exists(self.atom_data_filename), \ "{0} atom data file does not exist".format(self.atom_data_filename) # The available config file doesn't have file paths of atom data file, # densities and abundances profile files as desired. We load the atom # data seperately and provide it to tardis_config later. For rest of # the two, we form dictionary from the config file and override those # parameters by putting file paths of these two files at proper places. config_yaml = yaml.load(open(self.config_file)) config_yaml['model']['abundances']['filename'] = self.abundances config_yaml['model']['structure']['filename'] = self.densities # Load atom data file separately, pass it for forming tardis config. self.atom_data = AtomData.from_hdf5(self.atom_data_filename) # Check whether the atom data file in current run and the atom data # file used in obtaining the baseline data for slow tests are same. # TODO: hard coded UUID for kurucz atom data file, generalize it later. kurucz_data_file_uuid1 = "5ca3035ca8b311e3bb684437e69d75d7" assert self.atom_data.uuid1 == kurucz_data_file_uuid1 # The config hence obtained will be having appropriate file paths. tardis_config = Configuration.from_config_dict(config_yaml, self.atom_data) # We now do a run with prepared config and get radial1d model. self.obtained_radial1d_model = Radial1DModel(tardis_config) simulation = Simulation(tardis_config) simulation.legacy_run_simulation(self.obtained_radial1d_model) # The baseline data against which assertions are to be made is ingested # from already available compressed binaries (.npz). These will return # dictionaries of numpy.ndarrays for performing assertions. self.slow_test_data_dir = os.path.join(os.path.expanduser( os.path.expandvars(pytest.config.getvalue('slow-test-data'))), "w7") self.expected_ndarrays = np.load(os.path.join(self.slow_test_data_dir, "ndarrays.npz")) self.expected_quantities = np.load(os.path.join(self.slow_test_data_dir, "quantities.npz"))
def setup(self): self.atom_data_filename = os.path.expanduser( os.path.expandvars(pytest.config.getvalue('atomic-dataset'))) assert os.path.exists( self.atom_data_filename), ("{0} atomic datafiles" " does not seem to " "exist".format(self.atom_data_filename)) self.config_yaml = yaml.load( open('tardis/io/tests/data/tardis_configv1_verysimple.yml')) self.config_yaml['atom_data'] = self.atom_data_filename tardis_config = Configuration.from_config_dict(self.config_yaml) self.model = Radial1DModel(tardis_config) self.simulation = Simulation(tardis_config) self.simulation.legacy_run_simulation(self.model)
def runner( self, config, atomic_data_fname, tardis_ref_data, generate_reference): config.atom_data = atomic_data_fname self.name = (self._name + "_{:s}".format(config.plasma.line_interaction_type)) if config.spectrum.integrated.interpolate_shells > 0: self.name += '_interp' simulation = Simulation.from_config(config) simulation.run() if not generate_reference: return simulation.runner else: simulation.runner.hdf_properties = [ 'j_blue_estimator', 'spectrum', 'spectrum_integrated' ] simulation.runner.to_hdf( tardis_ref_data, '', self.name) pytest.skip( 'Reference data was generated during this run.')
def setup(self): self.atom_data_filename = os.path.expanduser(os.path.expandvars( pytest.config.getvalue('atomic-dataset'))) assert os.path.exists(self.atom_data_filename), ("{0} atomic datafiles" " does not seem to " "exist".format( self.atom_data_filename)) self.config_yaml = yaml_load_config_file( 'tardis/io/tests/data/tardis_configv1_verysimple.yml') self.config_yaml['atom_data'] = self.atom_data_filename tardis_config = Configuration.from_config_dict(self.config_yaml) self.simulation = Simulation.from_config(tardis_config) self.simulation.run()
def runner(self, atomic_data_fname, tardis_ref_data, generate_reference): config = Configuration.from_yaml( 'tardis/io/tests/data/tardis_configv1_verysimple.yml') config['atom_data'] = atomic_data_fname simulation = Simulation.from_config(config) simulation.run() if not generate_reference: return simulation.runner else: simulation.runner.hdf_properties = [ 'j_blue_estimator', 'spectrum', 'spectrum_virtual' ] simulation.runner.to_hdf(tardis_ref_data, '', self.name) pytest.skip('Reference data was generated during this run.')
def runner(self, atomic_data_fname, tardis_ref_data, generate_reference): config = Configuration.from_yaml( "tardis/io/tests/data/tardis_configv1_verysimple.yml") config["atom_data"] = atomic_data_fname simulation = Simulation.from_config(config) simulation.run() if not generate_reference: return simulation.runner else: simulation.runner.hdf_properties = [ "j_blue_estimator", "spectrum", "spectrum_virtual", ] simulation.runner.to_hdf(tardis_ref_data, "", self.name) pytest.skip("Reference data was generated during this run.")
def runner(self, config, atomic_data_fname, tardis_ref_data, generate_reference): config.atom_data = atomic_data_fname self.name = self._name + f"_{config.plasma.line_interaction_type:s}" if config.spectrum.integrated.interpolate_shells > 0: self.name += "_interp" simulation = Simulation.from_config(config) simulation.run() if not generate_reference: return simulation.runner else: simulation.runner.hdf_properties = [ "j_blue_estimator", "spectrum", "spectrum_integrated", ] simulation.runner.to_hdf(tardis_ref_data, "", self.name) pytest.skip("Reference data was generated during this run.")
def runner( self, atomic_data_fname, tardis_ref_data, generate_reference): config = Configuration.from_yaml( 'tardis/io/tests/data/tardis_configv1_verysimple.yml') config['atom_data'] = atomic_data_fname simulation = Simulation.from_config(config) simulation.run() if not generate_reference: return simulation.runner else: simulation.runner.hdf_properties = [ 'j_blue_estimator', 'spectrum', 'spectrum_virtual' ] simulation.runner.to_hdf( tardis_ref_data, '', self.name) pytest.skip( 'Reference data was generated during this run.')
class TestSimpleRun(): """ Very simple run """ @classmethod @pytest.fixture(scope="class", autouse=True) def setup(self): self.atom_data_filename = os.path.expanduser( os.path.expandvars(pytest.config.getvalue('atomic-dataset'))) assert os.path.exists( self.atom_data_filename), ("{0} atomic datafiles" " does not seem to " "exist".format(self.atom_data_filename)) self.config_yaml = yaml.load( open('tardis/io/tests/data/tardis_configv1_verysimple.yml')) self.config_yaml['atom_data'] = self.atom_data_filename tardis_config = Configuration.from_config_dict(self.config_yaml) self.model = Radial1DModel(tardis_config) self.simulation = Simulation(tardis_config) self.simulation.legacy_run_simulation(self.model) def test_j_blue_estimators(self): j_blue_estimator = np.load( data_path('simple_test_j_blue_estimator.npy')) np.testing.assert_allclose(self.model.runner.j_blue_estimator, j_blue_estimator) def test_spectrum(self): luminosity_density = np.load( data_path('simple_test_luminosity_density_lambda.npy')) luminosity_density = luminosity_density * u.Unit('erg / (Angstrom s)') np.testing.assert_allclose( self.model.spectrum.luminosity_density_lambda, luminosity_density) def test_virtual_spectrum(self): virtual_luminosity_density = np.load( data_path('simple_test_virtual_luminosity_density_lambda.npy')) virtual_luminosity_density = virtual_luminosity_density * u.Unit( 'erg / (Angstrom s)') np.testing.assert_allclose( self.model.spectrum_virtual.luminosity_density_lambda, virtual_luminosity_density) def test_plasma_properties(self): pass def test_runner_properties(self): """Tests whether a number of runner attributes exist and also verifies their types Currently, runner attributes needed to call the model routine to_hdf5 are checked. """ virt_type = np.ndarray props_required_by_modeltohdf5 = dict([ ("virt_packet_last_interaction_type", virt_type), ("virt_packet_last_line_interaction_in_id", virt_type), ("virt_packet_last_line_interaction_out_id", virt_type), ("virt_packet_last_interaction_in_nu", virt_type), ("virt_packet_nus", virt_type), ("virt_packet_energies", virt_type), ]) required_props = props_required_by_modeltohdf5.copy() for prop, prop_type in required_props.items(): assert type(getattr(self.model.runner, prop)) == prop_type, ( "wrong type of attribute '{}': expected {}, found {}".format( prop, prop_type, type(getattr(self.model.runner, prop)))) def test_legacy_model_properties(self): """Tests whether a number of model attributes exist and also verifies their types Currently, model attributes needed to run the gui and to call the model routine to_hdf5 are checked. Notes ----- The list of properties may be incomplete """ props_required_by_gui = dict([ ("converged", bool), ("iterations_executed", int), ("iterations_max_requested", int), ("current_no_of_packets", int), ("no_of_packets", int), ("no_of_virtual_packets", int), ]) props_required_by_tohdf5 = dict([ ("runner", MontecarloRunner), ("plasma_array", LegacyPlasmaArray), ("last_line_interaction_in_id", np.ndarray), ("last_line_interaction_out_id", np.ndarray), ("last_line_interaction_shell_id", np.ndarray), ("last_line_interaction_in_id", np.ndarray), ("last_line_interaction_angstrom", u.quantity.Quantity), ]) required_props = props_required_by_gui.copy() required_props.update(props_required_by_tohdf5) for prop, prop_type in required_props.items(): assert type(getattr(self.model, prop)) == prop_type, ( "wrong type of attribute '{}': expected {}, found {}".format( prop, prop_type, type(getattr(self.model, prop))))
class TestSimpleRun(): """ Very simple run """ @classmethod @pytest.fixture(scope="class", autouse=True) def setup(self): self.atom_data_filename = os.path.expanduser(os.path.expandvars( pytest.config.getvalue('atomic-dataset'))) assert os.path.exists(self.atom_data_filename), ("{0} atomic datafiles" " does not seem to " "exist".format( self.atom_data_filename)) self.config_yaml = yaml.load(open( 'tardis/io/tests/data/tardis_configv1_verysimple.yml')) self.config_yaml['atom_data'] = self.atom_data_filename tardis_config = Configuration.from_config_dict(self.config_yaml) self.model = Radial1DModel(tardis_config) self.simulation = Simulation(tardis_config) self.simulation.legacy_run_simulation(self.model) def test_spectrum(self): luminosity_density = np.load( data_path('simple_test_luminosity_density_lambda.npy')) luminosity_density = luminosity_density * u.Unit( 'erg / (Angstrom s)') np.testing.assert_allclose( self.model.spectrum.luminosity_density_lambda,luminosity_density) def test_virtual_spectrum(self): virtual_luminosity_density = np.load( data_path('simple_test_virtual_luminosity_density_lambda.npy')) virtual_luminosity_density = virtual_luminosity_density * u.Unit( 'erg / (Angstrom s)') np.testing.assert_allclose( self.model.spectrum_virtual.luminosity_density_lambda, virtual_luminosity_density) def test_plasma_properties(self): pass def test_runner_properties(self): """Tests whether a number of runner attributes exist and also verifies their types Currently, runner attributes needed to call the model routine to_hdf5 are checked. """ if self.model.runner.virt_logging > 0: virt_type = np.ndarray else: virt_type = type(None) props_required_by_modeltohdf5 = dict([ ("virt_packet_last_interaction_type", virt_type), ("virt_packet_last_line_interaction_in_id", virt_type), ("virt_packet_last_line_interaction_out_id", virt_type), ("virt_packet_last_interaction_in_nu", virt_type), ("virt_packet_nus", virt_type), ("virt_packet_energies", virt_type), ]) required_props = props_required_by_modeltohdf5.copy() for prop, prop_type in required_props.items(): assert type(getattr(self.model.runner, prop)) == prop_type, ("wrong type of attribute '{}': expected {}, found {}".format(prop, prop_type, type(getattr(self.model.runner, prop)))) def test_legacy_model_properties(self): """Tests whether a number of model attributes exist and also verifies their types Currently, model attributes needed to run the gui and to call the model routine to_hdf5 are checked. Notes ----- The list of properties may be incomplete """ props_required_by_gui = dict([ ("converged", bool), ("iterations_executed", int), ("iterations_max_requested", int), ("current_no_of_packets", int), ("no_of_packets", int), ("no_of_virtual_packets", int), ]) props_required_by_tohdf5 = dict([ ("runner", MontecarloRunner), ("plasma_array", LegacyPlasmaArray), ("last_line_interaction_in_id", np.ndarray), ("last_line_interaction_out_id", np.ndarray), ("last_line_interaction_shell_id", np.ndarray), ("last_line_interaction_in_id", np.ndarray), ("last_line_interaction_angstrom", u.quantity.Quantity), ]) required_props = props_required_by_gui.copy() required_props.update(props_required_by_tohdf5) for prop, prop_type in required_props.items(): assert type(getattr(self.model, prop)) == prop_type, ("wrong type of attribute '{}': expected {}, found {}".format(prop, prop_type, type(getattr(self.model, prop))))