def fit_submodel(modelsedgridfile): # input files trimmed_modelsedgridfile = modelsedgridfile.replace( 'seds', 'seds_trim') trimmed_noisemodelfile = trimmed_modelsedgridfile.replace( 'seds', 'noisemodel') # output files lnpfile = modelsedgridfile.replace('seds', 'lnp') statsfile = modelsedgridfile.replace('seds', 'stats') statsfile = statsfile.replace('.hd5', '.fits') pdf1dfile = statsfile.replace('stats', 'pdf1d') # load the subgrid seds and subgrid noisemodel modelsedgrid = FileSEDGrid(trimmed_modelsedgridfile) noisemodel_vals = noisemodel.get_noisemodelcat( trimmed_noisemodelfile) fit.summary_table_memory(obsdata, noisemodel_vals, modelsedgrid, resume=args.resume, threshold=-10., save_every_npts=100, lnp_npts=60, stats_outname=statsfile, pdf1d_outname=pdf1dfile, grid_info_dict=grid_info_dict, lnp_outname=lnpfile, do_not_normalize=True) print('Done fitting on grid ' + trimmed_modelsedgridfile)
def test_fit_grid(): # download the needed files vega_fname = download_rename('vega.hd5') obs_fname = download_rename('b15_4band_det_27_A.fits') noise_trim_fname = download_rename( 'beast_example_phat_noisemodel_trim.grid.hd5') seds_trim_fname = download_rename('beast_example_phat_seds_trim.grid.hd5') # download cached version of fitting results stats_fname_cache = download_rename('beast_example_phat_stats.fits') pdf1d_fname_cache = download_rename('beast_example_phat_pdf1d.fits') ################ # read in the the AST noise model noisemodel_vals = noisemodel.get_noisemodelcat(noise_trim_fname) # read in the observed data filters = [ 'HST_WFC3_F275W', 'HST_WFC3_F336W', 'HST_ACS_WFC_F475W', 'HST_ACS_WFC_F814W', 'HST_WFC3_F110W', 'HST_WFC3_F160W' ] basefilters = ['F275W', 'F336W', 'F475W', 'F814W', 'F110W', 'F160W'] obs_colnames = [f.lower() + '_rate' for f in basefilters] obsdata = get_obscat(obs_fname, filters, obs_colnames, vega_fname=vega_fname) # output files stats_fname = '/tmp/beast_example_phat_stats.fits' pdf1d_fname = '/tmp/beast_example_phat_pdf1d.fits' lnp_fname = '/tmp/beast_example_phat_lnp.hd5' fit.summary_table_memory(obsdata, noisemodel_vals, seds_trim_fname, threshold=-10., save_every_npts=100, lnp_npts=60, stats_outname=stats_fname, pdf1d_outname=pdf1d_fname, lnp_outname=lnp_fname) # check that the stats files are exactly the same table_cache = Table.read(stats_fname_cache) table_new = Table.read(stats_fname) compare_tables(table_cache, table_new) # lnp files not checked as they are randomly sparsely sampled # hence will be different every time the fitting is run # check that the pdf1d files are exactly the same compare_fits(pdf1d_fname_cache, pdf1d_fname)
def fit_submodel(modelsedgridfile): # input files trimmed_modelsedgridfile = modelsedgridfile.replace( "seds", "seds_trim") trimmed_noisemodelfile = trimmed_modelsedgridfile.replace( "seds", "noisemodel") # output files lnpfile = modelsedgridfile.replace("seds", "lnp") statsfile = modelsedgridfile.replace("seds", "stats") statsfile = statsfile.replace(".hd5", ".fits") pdf1dfile = statsfile.replace("stats", "pdf1d") if args.dens_bin is not None: # Put everything in the right subfolder ( trimmed_modelsedgridfile, trimmed_noisemodelfile, lnpfile, statsfile, pdf1dfile, ) = [ os.path.join(bin_subfolder, f) for f in [ trimmed_modelsedgridfile, trimmed_noisemodelfile, lnpfile, statsfile, pdf1dfile, ] ] # load the subgrid seds and subgrid noisemodel modelsedgrid = FileSEDGrid(trimmed_modelsedgridfile) noisemodel_vals = noisemodel.get_noisemodelcat( trimmed_noisemodelfile) try: fit.summary_table_memory( obsdata, noisemodel_vals, modelsedgrid, resume=args.resume, threshold=-10.0, save_every_npts=100, lnp_npts=60, stats_outname=statsfile, pdf1d_outname=pdf1dfile, grid_info_dict=grid_info_dict, lnp_outname=lnpfile, do_not_normalize=True, ) print("Done fitting on grid " + trimmed_modelsedgridfile) except Exception as e: if not args.ignore_missing_subresults: raise e
def test_fit_grid(self): """ Fit a cached version of the observations with cached version of the trimmed sed grid and noisemodel and compare the result to cached versions of the stats and pdf1d files. """ # read in the the AST noise model noisemodel_vals = noisemodel.get_noisemodelcat( self.noise_trim_fname_cache) # read in the observed data obsdata = Observations(self.obs_fname_cache, self.settings.filters, self.settings.obs_colnames) # output files stats_fname = tempfile.NamedTemporaryFile(suffix=".fits").name pdf1d_fname = tempfile.NamedTemporaryFile(suffix=".fits").name pdf2d_fname = tempfile.NamedTemporaryFile(suffix=".fits").name lnp_fname = tempfile.NamedTemporaryFile(suffix=".hd5").name fit.summary_table_memory( obsdata, noisemodel_vals, self.seds_trim_fname_cache, threshold=-10.0, save_every_npts=100, lnp_npts=500, max_nbins=200, stats_outname=stats_fname, pdf1d_outname=pdf1d_fname, pdf2d_outname=pdf2d_fname, pdf2d_param_list=["Av", "M_ini", "logT"], lnp_outname=lnp_fname, surveyname=self.settings.surveyname, ) # check that the stats files are exactly the same table_cache = Table.read(self.stats_fname_cache) table_new = Table.read(stats_fname) compare_tables(table_cache, table_new) # lnp files not checked as they are randomly sparsely sampled # hence will be different every time the fitting is run # check that the pdf1d/pdf2d files are exactly the same compare_fits(self.pdf1d_fname_cache, pdf1d_fname) compare_fits(self.pdf2d_fname_cache, pdf2d_fname)
def run_beast_production(basename, physicsmodel=False, ast=False, observationmodel=False, trim=False, fitting=False, resume=False, source_density='', sub_source_density=''): """ Turns the original command-line version of run_beast_production.py into something callable from within a function Parameters ---------- basename : string name of the gst file (assuming it's located in ./data/) For the info related to the other inputs, see the argparse info at the bottom """ # before doing ANYTHING, force datamodel to re-import (otherwise, any # changes within this python session will not be loaded!) importlib.reload(datamodel) # check input parameters, print what is the problem, stop run_beast verify_params.verify_input_format(datamodel) # update the filenames as needed for production # - photometry sub-file datamodel.obsfile = basename.replace( '.fits', '_with_sourceden' + '_SD_' + source_density.replace('_', '-') + '_sub' + sub_source_density + '.fits') # - stats files stats_filebase = "%s/%s"%(datamodel.project,datamodel.project) \ + '_sd' + source_density.replace('_','-') \ + '_sub' + sub_source_density sed_trimname = stats_filebase + '_sed_trim.grid.hd5' # - trimmed noise model noisemodel_trimname = stats_filebase + '_noisemodel_trim.hd5' # - SED grid #modelsedgrid_filename = "%s/%s_seds.grid.hd5"%(datamodel.project, # datamodel.project) modelsedgrid_filename = "METAL_seds.grid.hd5" print("***run information***") print(" project = " + datamodel.project) print(" obsfile = " + datamodel.obsfile) print(" astfile = " + datamodel.astfile) print(" noisefile = " + datamodel.noisefile) print(" trimmed sedfile = " + sed_trimname) print("trimmed noisefiles = " + noisemodel_trimname) print(" stats filebase = " + stats_filebase) # make sure the project directory exists pdir = create_project_dir(datamodel.project) if physicsmodel: # download and load the isochrones (iso_fname, oiso) = make_iso_table(datamodel.project, oiso=datamodel.oiso, logtmin=datamodel.logt[0], logtmax=datamodel.logt[1], dlogt=datamodel.logt[2], z=datamodel.z) if hasattr(datamodel, 'add_spectral_properties_kwargs'): extra_kwargs = datamodel.add_spectral_properties_kwargs else: extra_kwargs = None if hasattr(datamodel, 'velocity'): redshift = (datamodel.velocity / const.c).decompose().value else: redshift = 0 # generate the spectral library (no dust extinction) (spec_fname, g_spec) = make_spectral_grid( datamodel.project, oiso, osl=datamodel.osl, redshift=redshift, distance=datamodel.distances, distance_unit=datamodel.distance_unit, add_spectral_properties_kwargs=extra_kwargs) # add the stellar priors as weights # also computes the grid weights for the stellar part (pspec_fname, g_pspec) = add_stellar_priors(datamodel.project, g_spec) # generate the SED grid by integrating the filter response functions # effect of dust extinction applied before filter integration # also computes the dust priors as weights (seds_fname, g_seds) = make_extinguished_sed_grid( datamodel.project, g_pspec, datamodel.filters, extLaw=datamodel.extLaw, av=datamodel.avs, rv=datamodel.rvs, fA=datamodel.fAs, rv_prior_model=datamodel.rv_prior_model, av_prior_model=datamodel.av_prior_model, fA_prior_model=datamodel.fA_prior_model, spec_fname=modelsedgrid_filename, add_spectral_properties_kwargs=extra_kwargs) if ast: N_models = datamodel.ast_models_selected_per_age Nfilters = datamodel.ast_bands_above_maglimit Nrealize = datamodel.ast_realization_per_model mag_cuts = datamodel.ast_maglimit obsdata = datamodel.get_obscat(basename, datamodel.filters) if len(mag_cuts) == 1: tmp_cuts = mag_cuts min_mags = np.zeros(len(datamodel.filters)) for k, filtername in enumerate(obsdata.filters): sfiltername = obsdata.data.resolve_alias(filtername) sfiltername = sfiltername.replace('rate', 'vega') sfiltername = sfiltername.replace('RATE', 'VEGA') keep, = np.where(obsdata[sfiltername] < 99.) min_mags[k] = np.percentile(obsdata[keep][sfiltername], 90.) # max. mags from the gst observation cat. mag_cuts = min_mags + tmp_cuts outfile = './' + datamodel.project + '/' + datamodel.project + '_inputAST.txt' outfile_params = './' + datamodel.project + '/' + datamodel.project + '_ASTparams.fits' chosen_seds = pick_models(modelsedgrid_filename, datamodel.filters, mag_cuts, Nfilter=Nfilters, N_stars=N_models, Nrealize=Nrealize, outfile=outfile, outfile_params=outfile_params) if datamodel.ast_with_positions == True: separation = datamodel.ast_pixel_distribution filename = datamodel.project + '/' + datamodel.project + '_inputAST.txt' if datamodel.ast_reference_image is not None: # With reference image, use the background or source density map if available if datamodel.ast_density_table is not None: pick_positions_from_map( obsdata, chosen_seds, datamodel.ast_density_table, datamodel.ast_N_bins, datamodel.ast_realization_per_model, outfile=filename, refimage=datamodel.ast_reference_image, refimage_hdu=0, Nrealize=1, set_coord_boundary=datamodel.ast_coord_boundary) else: pick_positions(obsdata, filename, separation, refimage=datamodel.ast_reference_image) else: # Without reference image, we can only use this function if datamodel.ast_density_table is None: pick_positions(obsdata, filename, separation) else: print( "To use ast_density_table, ast_reference_image must be specified." ) if observationmodel: print('Generating noise model from ASTs and absflux A matrix') # get the modesedgrid on which to generate the noisemodel modelsedgrid = FileSEDGrid(modelsedgrid_filename) # generate the AST noise model noisemodel.make_toothpick_noise_model( \ datamodel.noisefile, datamodel.astfile, modelsedgrid, use_rate=True, absflux_a_matrix=datamodel.absflux_a_matrix) if trim: print('Trimming the model and noise grids') # read in the observed data obsdata = datamodel.get_obscat(basename, datamodel.filters) # get the modesedgrid on which to generate the noisemodel modelsedgrid = FileSEDGrid(modelsedgrid_filename) # read in the noise model just created noisemodel_vals = noisemodel.get_noisemodelcat(datamodel.noisefile) # trim the model sedgrid trim_grid.trim_models(modelsedgrid, noisemodel_vals, obsdata, sed_trimname, noisemodel_trimname, sigma_fac=3.) if fitting: start_time = time.clock() # read in the the AST noise model noisemodel_vals = noisemodel.get_noisemodelcat(noisemodel_trimname) # read in the observed data obsdata = datamodel.get_obscat(datamodel.obsfile, datamodel.filters) # output files statsfile = stats_filebase + '_stats.fits' pdf1dfile = statsfile.replace('stats.fits', 'pdf1d.fits') lnpfile = statsfile.replace('stats.fits', 'lnp.hd5') fit.summary_table_memory(obsdata, noisemodel_vals, sed_trimname, resume=resume, threshold=-10., save_every_npts=100, lnp_npts=500, stats_outname=statsfile, pdf1d_outname=pdf1dfile, lnp_outname=lnpfile, surveyname=datamodel.surveyname) new_time = time.clock() print('time to fit: ', (new_time - start_time) / 60., ' min')
def test_merge_pdf1d_stats(self): """ Using cached versions of the observations, sed grid, and noise model, split the grids and do the fitting on the subgrids and original grid. Merge the results from the subgrids and compare to the results from fitting the full grid. """ ###################################### # STEP 1: GET SOME DATA TO WORK WITH # ###################################### # read in the observed data obsdata = Observations(self.obs_fname_cache, self.settings.filters, self.settings.obs_colnames) ######################################################################################### # STEP 2: SPLIT THE GRIDS AND GENERATE THE GRID INFO DICT AS IN THE SUBGRIDDING EXAMPLE # ######################################################################################### num_subgrids = 3 # Split SED grid sub_seds_trim_fnames = subgridding_tools.split_grid( self.seds_trim_fname_cache, num_subgrids, overwrite=True) # Split noise grid (a standardized function does not exist) sub_noise_trim_fnames = [] noisemodel_vals = noisemodel.get_noisemodelcat( self.noise_trim_fname_cache) slices = subgridding_tools.uniform_slices(len(noisemodel_vals["bias"]), num_subgrids) for i, slc in enumerate(slices): outname = self.noise_trim_fname_cache.replace( ".hd5", "sub{}.hd5".format(i)) with tables.open_file(outname, "w") as outfile: outfile.create_array(outfile.root, "bias", noisemodel_vals["bias"][slc]) outfile.create_array(outfile.root, "error", noisemodel_vals["error"][slc]) outfile.create_array(outfile.root, "completeness", noisemodel_vals["completeness"][slc]) sub_noise_trim_fnames.append(outname) # Collect information about the parameter rangers, to make the pdf1d bins # consistent between subgrids grid_info_dict = subgridding_tools.reduce_grid_info( sub_seds_trim_fnames, sub_noise_trim_fnames, nprocs=1, cap_unique=100) ################################################## # STEP 3: GENERATE FILENAMES AND RUN THE FITTING # ################################################## def make_gridsub_fnames(base_fname, num_subgrids, extension=".fits"): return [ base_fname.replace(extension, "gridsub{}{}".format(i, extension)) for i in range(num_subgrids) ] stats_fname = tempfile.NamedTemporaryFile(suffix=".fits").name pdf1d_fname = tempfile.NamedTemporaryFile(suffix=".fits").name lnp_fname = tempfile.NamedTemporaryFile(suffix=".hd5").name subgrid_pdf1d_fnames = make_gridsub_fnames(pdf1d_fname, num_subgrids) subgrid_stats_fnames = make_gridsub_fnames(stats_fname, num_subgrids) subgrid_lnp_fnames = make_gridsub_fnames(lnp_fname, num_subgrids, extension=".hd5") for i in range(num_subgrids): sub_noisemodel_vals = noisemodel.get_noisemodelcat( sub_noise_trim_fnames[i]) fit.summary_table_memory( obsdata, sub_noisemodel_vals, sub_seds_trim_fnames[i], threshold=-40.0, save_every_npts=100, lnp_npts=500, stats_outname=subgrid_stats_fnames[i], pdf1d_outname=subgrid_pdf1d_fnames[i], lnp_outname=subgrid_lnp_fnames[i], grid_info_dict=grid_info_dict, do_not_normalize=True, ) # The do_not_normalize option is absolutely crucial! # Now merge the results merged_pdf1d_fname, merged_stats_fname = subgridding_tools.merge_pdf1d_stats( subgrid_pdf1d_fnames, subgrid_stats_fnames) # Do a full fit also normal_stats = tempfile.NamedTemporaryFile(suffix=".fits").name normal_pdf1d = tempfile.NamedTemporaryFile(suffix=".fits").name normal_lnp = tempfile.NamedTemporaryFile(suffix=".hd5").name fit.summary_table_memory( obsdata, noisemodel_vals, self.seds_trim_fname_cache, threshold=-40.0, save_every_npts=100, lnp_npts=500, stats_outname=normal_stats, pdf1d_outname=normal_pdf1d, lnp_outname=normal_lnp, do_not_normalize=True, ) # Here, we also need to use do_not_normalize, otherwise Pmax will be # different by a factor # CHECKS tolerance = 1e-6 fits_normal = fits.open(normal_pdf1d) fits_new = fits.open(merged_pdf1d_fname) if not len(fits_new) == len(fits_normal): raise AssertionError() # A similar problem to the above will also occur here for k in range(1, len(fits_new)): qname = fits_new[k].header["EXTNAME"] np.testing.assert_allclose( fits_new[k].data, fits_normal[qname].data, rtol=tolerance, atol=tolerance, ) table_normal = Table.read(normal_stats) table_new = Table.read(merged_stats_fname) if not len(table_normal) == len(table_new): raise AssertionError() # These will normally fail, as the merging process can not be made # bit-correct due do floating point math (exacerbated by exponentials) for c in table_new.colnames: if c == "Name" or c == "RA" or c == "DEC": np.testing.assert_equal( table_normal[c], table_new[c], err_msg="column {} is not equal".format(c), ) else: np.testing.assert_allclose( table_normal[c], table_new[c], rtol=tolerance, equal_nan=True, err_msg="column {} is not close enough".format(c), )
def test_merge_pdf1d_stats(): ###################################### # STEP 1: GET SOME DATA TO WORK WITH # ###################################### vega_fname = download_rename("vega.hd5") obs_fname = download_rename("b15_4band_det_27_A.fits") noise_trim_fname = download_rename( "beast_example_phat_noisemodel_trim.grid.hd5") seds_trim_fname = download_rename("beast_example_phat_seds_trim.grid.hd5") # download cached version of fitting results # stats_fname_cache = download_rename('beast_example_phat_stats.fits') # pdf1d_fname_cache = download_rename('beast_example_phat_pdf1d.fits') # read in the observed data filters = [ "HST_WFC3_F275W", "HST_WFC3_F336W", "HST_ACS_WFC_F475W", "HST_ACS_WFC_F814W", "HST_WFC3_F110W", "HST_WFC3_F160W", ] basefilters = ["F275W", "F336W", "F475W", "F814W", "F110W", "F160W"] obs_colnames = [f.lower() + "_rate" for f in basefilters] obsdata = Observations(obs_fname, filters, obs_colnames, vega_fname=vega_fname) ######################################################################################### # STEP 2: SPLIT THE GRIDS AND GENERATE THE GRID INFO DICT AS IN THE SUBGRIDDING EXAMPLE # ######################################################################################### num_subgrids = 3 # Split SED grid sub_seds_trim_fnames = subgridding_tools.split_grid(seds_trim_fname, num_subgrids, overwrite=True) # Split noise grid (a standardized function does not exist) sub_noise_trim_fnames = [] noisemodel_vals = get_noisemodelcat(noise_trim_fname) slices = subgridding_tools.uniform_slices(len(noisemodel_vals["bias"]), num_subgrids) for i, slc in enumerate(slices): outname = noise_trim_fname.replace(".hd5", "sub{}.hd5".format(i)) with tables.open_file(outname, "w") as outfile: outfile.create_array(outfile.root, "bias", noisemodel_vals["bias"][slc]) outfile.create_array(outfile.root, "error", noisemodel_vals["error"][slc]) outfile.create_array(outfile.root, "completeness", noisemodel_vals["completeness"][slc]) sub_noise_trim_fnames.append(outname) # Collect information about the parameter rangers, to make the pdf1d bins # consistent between subgrids grid_info_dict = subgridding_tools.reduce_grid_info(sub_seds_trim_fnames, sub_noise_trim_fnames, nprocs=1, cap_unique=100) ################################################## # STEP 3: GENERATE FILENAMES AND RUN THE FITTING # ################################################## def make_gridsub_fnames(base_fname, num_subgrids, extension=".fits"): return [ base_fname.replace(extension, "gridsub{}{}".format(i, extension)) for i in range(num_subgrids) ] stats_fname = "/tmp/beast_example_phat_stats.fits" pdf1d_fname = "/tmp/beast_example_phat_pdf1d.fits" lnp_fname = "/tmp/beast_example_phat_lnp.hd5" subgrid_pdf1d_fnames = make_gridsub_fnames(pdf1d_fname, num_subgrids) subgrid_stats_fnames = make_gridsub_fnames(stats_fname, num_subgrids) subgrid_lnp_fnames = make_gridsub_fnames(lnp_fname, num_subgrids, extension=".hd5") for i in range(num_subgrids): sub_noisemodel_vals = get_noisemodelcat(sub_noise_trim_fnames[i]) fit.summary_table_memory( obsdata, sub_noisemodel_vals, sub_seds_trim_fnames[i], threshold=-40.0, save_every_npts=100, lnp_npts=60, stats_outname=subgrid_stats_fnames[i], pdf1d_outname=subgrid_pdf1d_fnames[i], lnp_outname=subgrid_lnp_fnames[i], grid_info_dict=grid_info_dict, do_not_normalize=True, ) # The do_not_normalize option is absolutely crucial! # Now merge the results merged_pdf1d_fname, merged_stats_fname = subgridding_tools.merge_pdf1d_stats( subgrid_pdf1d_fnames, subgrid_stats_fnames) # Do a full fit also normal_stats = "normal_stats.fits" normal_pdf1d = "normal_pdf1d.fits" normal_lnp = "normal_lnp.hd5" fit.summary_table_memory( obsdata, noisemodel_vals, seds_trim_fname, threshold=-40.0, save_every_npts=100, lnp_npts=60, stats_outname=normal_stats, pdf1d_outname=normal_pdf1d, lnp_outname=normal_lnp, do_not_normalize=True, ) # Here, we also need to use do_not_normalize, otherwise Pmax will be # different by a factor # CHECKS tolerance = 1e-6 print("comparing pdf1d") # fits_cache = fits.open(pdf1d_fname_cache) fits_normal = fits.open(normal_pdf1d) fits_new = fits.open(merged_pdf1d_fname) if not len(fits_new) == len(fits_normal): raise AssertionError() # A similar problem to the above will also occur here for k in range(1, len(fits_new)): qname = fits_new[k].header["EXTNAME"] print(qname) np.testing.assert_allclose(fits_new[k].data, fits_normal[qname].data, rtol=tolerance, atol=tolerance) print("comparing stats") # table_cache = Table.read(stats_fname_cache) table_normal = Table.read(normal_stats) table_new = Table.read(merged_stats_fname) if not len(table_normal) == len(table_new): raise AssertionError() # These will normally fail, as the merging process can not be made # bit-correct due do floating point math (exacerbated by exponentials) for c in table_new.colnames: print(c) if c == "Name" or c == "RA" or c == "DEC": np.testing.assert_equal( table_normal[c], table_new[c], err_msg="column {} is not equal".format(c), ) else: np.testing.assert_allclose( table_normal[c], table_new[c], rtol=tolerance, equal_nan=True, err_msg="column {} is not close enough".format(c), )
def fit_submodel( photometry_file, modelsedgrid_file, noise_file, stats_file, pdf_file, pdf2d_file, pdf2d_param_list, lnp_file, grid_info_file=None, resume=False, ): """ Code to run the SED fitting Parameters ---------- photometry_file : string path+name of the photometry file modelsedgrid_file : string path+name of the physics model grid file noise_file : string path+name of the noise model file stats_file : string path+name of the file to contain stats output pdf_file : string path+name of the file to contain 1D PDF output pdf2d_file : string path+name of the file to contain 2D PDF output pdf2d_param_list: list of strings or None parameters for which to make 2D PDFs (or None) lnp_file : string path+name of the file to contain log likelihood output grid_info_file : string (default=None) path+name for pickle file that contains dictionary with subgrid min/max/n_unique (required for a run with subgrids) resume : boolean (default=False) choose whether to resume existing run or start over Returns ------- noisefile : string name of the created noise file """ # read in the photometry catalog obsdata = datamodel.get_obscat(photometry_file, datamodel.filters) # check if it's a subgrid run by looking in the file name if "gridsub" in modelsedgrid_file: subgrid_run = True print("loading grid_info_dict from " + grid_info_file) with open(grid_info_file, "rb") as p: grid_info_dict = pickle.loads(p.read()) else: subgrid_run = False # load the SED grid and noise model modelsedgrid = FileSEDGrid(modelsedgrid_file) noisemodel_vals = noisemodel.get_noisemodelcat(noise_file) if subgrid_run: fit.summary_table_memory( obsdata, noisemodel_vals, modelsedgrid, resume=resume, threshold=-10.0, save_every_npts=100, lnp_npts=500, stats_outname=stats_file, pdf1d_outname=pdf_file, pdf2d_outname=pdf2d_file, pdf2d_param_list=pdf2d_param_list, grid_info_dict=grid_info_dict, lnp_outname=lnp_file, do_not_normalize=True, surveyname=datamodel.surveyname, ) print("Done fitting on grid " + modelsedgrid_file) else: fit.summary_table_memory( obsdata, noisemodel_vals, modelsedgrid, resume=resume, threshold=-10.0, save_every_npts=100, lnp_npts=500, stats_outname=stats_file, pdf1d_outname=pdf_file, pdf2d_outname=pdf2d_file, pdf2d_param_list=pdf2d_param_list, lnp_outname=lnp_file, surveyname=datamodel.surveyname, ) print("Done fitting on grid " + modelsedgrid_file)
# read in the the AST noise model noisemodel_vals = noisemodel.get_noisemodelcat(noisemodelfile) # read in the observed data obsdata = datamodel.get_obscat(datamodel.obsfile, datamodel.filters) # output files print(datamodel.project) statsfile = datamodel.project + '/' + datamodel.project + \ '_stats.fits' pdf1dfile = statsfile.replace('stats.fits', 'pdf1d.fits') lnpfile = statsfile.replace('stats.fits', 'lnp.hd5') fit.summary_table_memory(obsdata, noisemodel_vals, modelsedgrid, resume=args.resume, threshold=-10., save_every_npts=100, lnp_npts=60, stats_outname=statsfile, pdf1d_outname=pdf1dfile, lnp_outname=lnpfile) new_time = time.clock() print('time to fit: ', (new_time - start_time) / 60., ' min') # print help if no arguments if not any(vars(args).values()): parser.print_help()
# read in the the AST noise model noisemodel_vals = noisemodel.get_noisemodelcat(noisemodel_trimname) # read in the observed data obsdata = datamodel.get_obscat(datamodel.obsfile, datamodel.filters) # output files statsfile = stats_filebase + '_stats.fits' pdf1dfile = statsfile.replace('stats.fits', 'pdf1d.fits') lnpfile = statsfile.replace('stats.fits', 'lnp.hd5') fit.summary_table_memory(obsdata, noisemodel_vals, sed_trimname, resume=args.resume, threshold=-10., save_every_npts=100, lnp_npts=500, stats_outname=statsfile, pdf1d_outname=pdf1dfile, lnp_outname=lnpfile, surveyname=datamodel.surveyname) new_time = time.clock() print('time to fit: ', (new_time - start_time) / 60., ' min') # print help if no arguments if not any(vars(args).values()): parser.print_help()
def test_fit_grid(): # download the needed files vega_fname = _download_rename('vega.hd5') obs_fname = _download_rename('b15_4band_det_27_A.fits') noise_trim_fname = _download_rename( \ 'beast_example_phat_noisemodel_trim.grid.hd5') seds_trim_fname = _download_rename( \ 'beast_example_phat_seds_trim.grid.hd5') # download cached version of fitting results stats_fname_cache = _download_rename('beast_example_phat_stats.fits') pdf1d_fname_cache = _download_rename('beast_example_phat_pdf1d.fits') #lnp_fname_cache = _download_rename('beast_example_phat_lnp.hd5') ################ # read in the the AST noise model noisemodel_vals = noisemodel.get_noisemodelcat(noise_trim_fname) # read in the observed data filters = [ 'HST_WFC3_F275W', 'HST_WFC3_F336W', 'HST_ACS_WFC_F475W', 'HST_ACS_WFC_F814W', 'HST_WFC3_F110W', 'HST_WFC3_F160W' ] basefilters = ['F275W', 'F336W', 'F475W', 'F814W', 'F110W', 'F160W'] obs_colnames = [f.lower() + '_rate' for f in basefilters] obsdata = get_obscat(obs_fname, filters, obs_colnames, vega_fname=vega_fname) # output files stats_fname = '/tmp/beast_example_phat_stats.fits' pdf1d_fname = '/tmp/beast_example_phat_pdf1d.fits' lnp_fname = '/tmp/beast_example_phat_lnp.hd5' print(seds_trim_fname) fit.summary_table_memory(obsdata, noisemodel_vals, seds_trim_fname, threshold=-10., save_every_npts=100, lnp_npts=60, stats_outname=stats_fname, pdf1d_outname=pdf1d_fname, lnp_outname=lnp_fname) # check that the stats files are exactly the same table_cache = Table.read(stats_fname_cache) table_new = Table.read(stats_fname) assert len(table_new) == len(table_cache) for tcolname in table_new.colnames: np.testing.assert_equal(table_new[tcolname], table_cache[tcolname], '%s columns not equal' % tcolname) # lnp files not checked as they are randomly sparsely sampled # hence will be different every time the fitting is run # check that the pdf1d files are exactly the same fits_cache = fits.open(pdf1d_fname_cache) fits_new = fits.open(pdf1d_fname) assert len(fits_new) == len(fits_cache) for k in range(1, len(fits_new)): qname = fits_new[k].header['EXTNAME'] np.testing.assert_equal(fits_new[k].data, fits_cache[qname].data, '%s pdf1d not equal' % qname)