def make_iso_table( project, oiso=None, logtmin=6.0, logtmax=10.13, dlogt=0.05, z=[0.0152], iso_fname=None, ): """ The isochrone tables are loaded (downloading if necessary) Parameters ---------- project: str project name oiso: isochrone.Isochrone object contains the full isochrones information logtmin: float log-age min logtmax: float log-age max dlogt: float log-age step to request z: float or sequence list of metalicity values, where default (Z=0.152) is adopted Z_sun for PARSEC/COLIBRI models Returns ------- fname: str name of saved file oiso: isochrone.Isochrone object contains the full isochrones information """ if iso_fname is None: iso_fname = "%s/%s_iso.csv" % (project, project) if not os.path.isfile(iso_fname): if oiso is None: oiso = isochrone.PadovaWeb() t = oiso._get_t_isochrones(max(5.0, logtmin), min(10.13, logtmax), dlogt, z) t.header["NAME"] = "{0} Isochrones".format("_".join( iso_fname.split("_")[:-1])) print("{0} Isochrones".format("_".join(iso_fname.split("_")[:-1]))) t.write(iso_fname) # read in the isochrone data from the file # not sure why this is needed, but reproduces previous ezpipe method oiso = ezIsoch(iso_fname) return (iso_fname, oiso)
def test_make_kurucz_tlusty_spectral_grid(self): """ Generate the spectral grid based on Kurucz and Tlusty stellar atmosphere models based on a cached set of isochrones and compare the result to a cached version. """ # read in the cached isochrones oiso = ezIsoch(self.iso_fname_cache) # calculate the redshift redshift = (self.settings.velocity / const.c).decompose().value # make the spectral grid spec_fname = tempfile.NamedTemporaryFile(suffix=".hd5").name (spec_fname, g) = make_spectral_grid( "test", oiso, osl=self.settings.osl, redshift=redshift, distance=self.settings.distances, distance_unit=self.settings.distance_unit, spec_fname=spec_fname, # filterLib=filter_fname, extLaw=self.settings.extLaw, add_spectral_properties_kwargs=self.settings. add_spectral_properties_kwargs, ) # compare the new to the cached version compare_hdf5(self.spec_fname_cache, spec_fname)
def test_make_kurucz_tlusty_spectral_grid(): # download the needed files kurucz_fname = download_rename("kurucz2004.grid.fits") tlusty_fname = download_rename("tlusty.lowres.grid.fits") filter_fname = download_rename("filters.hd5") iso_fname = download_rename("beast_example_phat_iso.csv") # download cached version of spectral grid spec_fname_cache = download_rename("beast_example_phat_spec_grid.hd5") ################ # generate the same spectral grid from the code # read in the cached isochrones oiso = ezIsoch(iso_fname) # define the distance distances = [24.47] distance_unit = units.mag velocity = -300 * units.km / units.s redshift = (velocity / const.c).decompose().value # define the spectral libraries to use osl = stellib.Tlusty(filename=tlusty_fname) + stellib.Kurucz( filename=kurucz_fname) # define the extinction curve to use extLaw = extinction.Gordon16_RvFALaw() filters = [ "HST_WFC3_F275W", "HST_WFC3_F336W", "HST_ACS_WFC_F475W", "HST_ACS_WFC_F814W", "HST_WFC3_F110W", "HST_WFC3_F160W", ] add_spectral_properties_kwargs = dict(filternames=filters) spec_fname = "/tmp/beast_example_phat_spec_grid.hd5" spec_fname, g = make_spectral_grid( "test", oiso, osl=osl, redshift=redshift, distance=distances, distance_unit=distance_unit, spec_fname=spec_fname, filterLib=filter_fname, extLaw=extLaw, add_spectral_properties_kwargs=add_spectral_properties_kwargs, ) # compare the new to the cached version compare_hdf5(spec_fname_cache, spec_fname)
def create_physicsmodel(nsubs=1, nprocs=1, subset=[None, None]): """ Create the physics model grid. If nsubs > 1, this will make sub-grids. Parameters ---------- nsubs : int (default=1) number of subgrids to split the physics model into nprocs : int (default=1) Number of parallel processes to use (currently only implemented for subgrids) subset : list of two ints (default=[None,None]) Only process subgrids in the range [start,stop]. (only relevant if nsubs > 1) """ # before doing ANYTHING, force datamodel to re-import (otherwise, any # changes within this python session will not be loaded!) importlib.reload(datamodel) # check input parameters verify_params.verify_input_format(datamodel) # filename for the SED grid modelsedgrid_filename = "%s/%s_seds.grid.hd5" % ( datamodel.project, datamodel.project, ) # grab the current subgrid slice subset_slice = slice(subset[0], subset[1]) # make sure the project directory exists create_project_dir(datamodel.project) # download and load the isochrones (iso_fname, oiso) = make_iso_table( datamodel.project, oiso=datamodel.oiso, logtmin=datamodel.logt[0], logtmax=datamodel.logt[1], dlogt=datamodel.logt[2], z=datamodel.z, ) # remove the isochrone points with logL=-9.999 oiso = ezIsoch(oiso.selectWhere("*", "logL > -9")) if hasattr(datamodel, "add_spectral_properties_kwargs"): extra_kwargs = datamodel.add_spectral_properties_kwargs else: extra_kwargs = None if hasattr(datamodel, "velocity"): redshift = (datamodel.velocity / const.c).decompose().value else: redshift = 0 # generate the spectral library (no dust extinction) (spec_fname, g_spec) = make_spectral_grid( datamodel.project, oiso, osl=datamodel.osl, redshift=redshift, distance=datamodel.distances, distance_unit=datamodel.distance_unit, extLaw=datamodel.extLaw, add_spectral_properties_kwargs=extra_kwargs, ) # add the stellar priors as weights # also computes the grid weights for the stellar part (pspec_fname, g_pspec) = add_stellar_priors( datamodel.project, g_spec, age_prior_model=datamodel.age_prior_model, mass_prior_model=datamodel.mass_prior_model, met_prior_model=datamodel.met_prior_model, ) # -------------------- # no subgrids # -------------------- if nsubs == 1: # generate the SED grid by integrating the filter response functions # effect of dust extinction applied before filter integration # also computes the dust priors as weights make_extinguished_sed_grid( datamodel.project, g_pspec, datamodel.filters, extLaw=datamodel.extLaw, av=datamodel.avs, rv=datamodel.rvs, fA=datamodel.fAs, rv_prior_model=datamodel.rv_prior_model, av_prior_model=datamodel.av_prior_model, fA_prior_model=datamodel.fA_prior_model, spec_fname=modelsedgrid_filename, add_spectral_properties_kwargs=extra_kwargs, ) # -------------------- # use subgrids # -------------------- if nsubs > 1: # Work with the whole grid up to there (otherwise, priors need a # rework - they don't like having only a subset of the parameter # space, especially when there's only one age for example) # Make subgrids, by splitting the spectral grid into equal sized pieces custom_sub_pspec = subgridding_tools.split_grid(pspec_fname, nsubs) file_prefix = "{0}/{0}_".format(datamodel.project) # function to process the subgrids individually def gen_subgrid(i, sub_name): sub_g_pspec = FileSEDGrid(sub_name) sub_seds_fname = "{}seds.gridsub{}.hd5".format(file_prefix, i) # generate the SED grid by integrating the filter response functions # effect of dust extinction applied before filter integration # also computes the dust priors as weights (sub_seds_fname, sub_g_seds) = make_extinguished_sed_grid( datamodel.project, sub_g_pspec, datamodel.filters, extLaw=datamodel.extLaw, av=datamodel.avs, rv=datamodel.rvs, fA=datamodel.fAs, rv_prior_model=datamodel.rv_prior_model, av_prior_model=datamodel.av_prior_model, fA_prior_model=datamodel.fA_prior_model, add_spectral_properties_kwargs=extra_kwargs, seds_fname=sub_seds_fname, ) return sub_seds_fname # run the above function par_tuples = [ (i, sub_name) for i, sub_name in enumerate(custom_sub_pspec) ][subset_slice] parallel_wrapper(gen_subgrid, par_tuples, nprocs=nprocs) # Save a list of subgrid names that we expect to see required_names = [ "{}seds.gridsub{}.hd5".format(file_prefix, i) for i in range(nsubs) ] outdir = os.path.join(".", datamodel.project) subgrid_names_file = os.path.join(outdir, "subgrid_fnames.txt") with open(subgrid_names_file, "w") as fname_file: for fname in required_names: fname_file.write(fname + "\n")