예제 #1
0
    def __init__(self, inputFile, filters=filters):
        """ Construct the interface """
        desc = 'GENERIC star: %s' % inputFile
        Observations.__init__(self, inputFile, desc=desc)
        self.setFilters(filters)
        #some bad values smaller than expected
        # in physical flux units
        self.setBadValue(6e-40)

        # rate column needed as this is the *flux* column
        for ik, k in enumerate(filters):
            self.data.set_alias(k, obs_colnames[ik])
예제 #2
0
    def test_trim_grid(self):
        """
        Generate trim the sed grid and noise model using cached versions of the
        both and compare the result to a cached version.
        """
        # read in the observed data
        obsdata = Observations(self.obs_fname_cache, self.settings.filters,
                               self.settings.obs_colnames)

        # get the modesedgrid
        modelsedgrid = SEDGrid(self.seds_fname_cache)

        # read in the noise model just created
        noisemodel_vals = noisemodel.get_noisemodelcat(self.noise_fname_cache)

        # trim the model sedgrid
        seds_trim_fname = tempfile.NamedTemporaryFile(suffix=".hd5").name
        noise_trim_fname = tempfile.NamedTemporaryFile(suffix=".hd5").name

        trim_models(
            modelsedgrid,
            noisemodel_vals,
            obsdata,
            seds_trim_fname,
            noise_trim_fname,
            sigma_fac=3.0,
        )

        # compare the new to the cached version
        compare_hdf5(self.seds_trim_fname_cache, seds_trim_fname, ctype="seds")
        compare_hdf5(self.noise_trim_fname_cache,
                     noise_trim_fname,
                     ctype="noise")
예제 #3
0
def test_trim_grid():

    # download the needed files
    vega_fname = download_rename("vega.hd5")
    seds_fname = download_rename("beast_example_phat_seds.grid.hd5")
    noise_fname = download_rename("beast_example_phat_noisemodel.grid.hd5")
    obs_fname = download_rename("b15_4band_det_27_A.fits")

    # download cached version of noisemodel on the sed grid
    noise_trim_fname_cache = download_rename(
        "beast_example_phat_noisemodel_trim.grid.hd5")
    seds_trim_fname_cache = download_rename(
        "beast_example_phat_seds_trim.grid.hd5")

    ################

    # read in the observed data
    filters = [
        "HST_WFC3_F275W",
        "HST_WFC3_F336W",
        "HST_ACS_WFC_F475W",
        "HST_ACS_WFC_F814W",
        "HST_WFC3_F110W",
        "HST_WFC3_F160W",
    ]
    basefilters = ["F275W", "F336W", "F475W", "F814W", "F110W", "F160W"]
    obs_colnames = [f.lower() + "_rate" for f in basefilters]

    obsdata = Observations(obs_fname,
                           filters,
                           obs_colnames,
                           vega_fname=vega_fname)

    # get the modesedgrid
    modelsedgrid = SEDGrid(seds_fname)

    # read in the noise model just created
    noisemodel_vals = noisemodel.get_noisemodelcat(noise_fname)

    # trim the model sedgrid
    seds_trim_fname = "beast_example_phat_seds_trim.grid.hd5"
    noise_trim_fname = seds_trim_fname.replace("_seds", "_noisemodel")

    trim_models(
        modelsedgrid,
        noisemodel_vals,
        obsdata,
        seds_trim_fname,
        noise_trim_fname,
        sigma_fac=3.0,
    )

    # compare the new to the cached version
    compare_hdf5(seds_trim_fname_cache, seds_trim_fname, ctype="seds")
    compare_hdf5(noise_trim_fname_cache, noise_trim_fname, ctype="noise")
예제 #4
0
    def test_fit_grid(self):
        """
        Fit a cached version of the observations with cached version of the
        trimmed sed grid and noisemodel and compare the result to cached
        versions of the stats and pdf1d files.
        """
        # read in the the AST noise model
        noisemodel_vals = noisemodel.get_noisemodelcat(
            self.noise_trim_fname_cache)

        # read in the observed data
        obsdata = Observations(self.obs_fname_cache, self.settings.filters,
                               self.settings.obs_colnames)
        # output files
        stats_fname = tempfile.NamedTemporaryFile(suffix=".fits").name
        pdf1d_fname = tempfile.NamedTemporaryFile(suffix=".fits").name
        pdf2d_fname = tempfile.NamedTemporaryFile(suffix=".fits").name
        lnp_fname = tempfile.NamedTemporaryFile(suffix=".hd5").name

        fit.summary_table_memory(
            obsdata,
            noisemodel_vals,
            self.seds_trim_fname_cache,
            threshold=-10.0,
            save_every_npts=100,
            lnp_npts=500,
            max_nbins=200,
            stats_outname=stats_fname,
            pdf1d_outname=pdf1d_fname,
            pdf2d_outname=pdf2d_fname,
            pdf2d_param_list=["Av", "M_ini", "logT"],
            lnp_outname=lnp_fname,
            surveyname=self.settings.surveyname,
        )

        # check that the stats files are exactly the same
        table_cache = Table.read(self.stats_fname_cache)
        table_new = Table.read(stats_fname)

        compare_tables(table_cache, table_new)

        # lnp files not checked as they are randomly sparsely sampled
        #   hence will be different every time the fitting is run

        # check that the pdf1d/pdf2d files are exactly the same
        compare_fits(self.pdf1d_fname_cache, pdf1d_fname)
        compare_fits(self.pdf2d_fname_cache, pdf2d_fname)
예제 #5
0
        ]
        with open(subgrid_names_file, "w") as fname_file:
            for fname in required_names:
                fname_file.write(fname + "\n")

        # seds_fname = '{}seds.grid.hd5'.format(file_prefix)
        # subgridding_tools.merge_grids(seds_fname, final_sub_names)

    if args.ast:
        # Determine magnitude range for ASTs
        mag_cuts = settings.ast_maglimit
        bright_cuts = None
        if len(mag_cuts) == 1:
            tmp_cuts = mag_cuts
            obsdata = Observations(settings.obsfile,
                                   settings.filters,
                                   obs_colnames=settings.obs_colnames)

            faintest_mags = np.zeros(len(settings.filters))
            brightest_mags = np.zeros(len(settings.filters))
            for k, filtername in enumerate(obsdata.filters):
                sfiltername = obsdata.data.resolve_alias(filtername)
                sfiltername = sfiltername.replace("rate", "vega")
                sfiltername = sfiltername.replace("RATE", "VEGA")
                (keep, ) = np.where(obsdata[sfiltername] < 99.0)
                faintest_mags[k] = np.percentile(obsdata[keep][sfiltername],
                                                 90.0)
                brightest_mags[k] = np.amin(obsdata[keep][sfiltername])

            # max. mags from the gst observation cat.
            mag_cuts = (faintest_mags + tmp_cuts
예제 #6
0
    def test_merge_pdf1d_stats(self):
        """
        Using cached versions of the observations, sed grid, and noise model,
        split the grids and do the fitting on the subgrids and original
        grid.  Merge the results from the subgrids and compare to the results
        from fitting the full grid.
        """
        ######################################
        # STEP 1: GET SOME DATA TO WORK WITH #
        ######################################

        # read in the observed data
        obsdata = Observations(self.obs_fname_cache, self.settings.filters,
                               self.settings.obs_colnames)

        #########################################################################################
        # STEP 2: SPLIT THE GRIDS AND GENERATE THE GRID INFO DICT AS IN THE SUBGRIDDING EXAMPLE #
        #########################################################################################
        num_subgrids = 3

        # Split SED grid
        sub_seds_trim_fnames = subgridding_tools.split_grid(
            self.seds_trim_fname_cache, num_subgrids, overwrite=True)

        # Split noise grid (a standardized function does not exist)
        sub_noise_trim_fnames = []

        noisemodel_vals = noisemodel.get_noisemodelcat(
            self.noise_trim_fname_cache)
        slices = subgridding_tools.uniform_slices(len(noisemodel_vals["bias"]),
                                                  num_subgrids)
        for i, slc in enumerate(slices):
            outname = self.noise_trim_fname_cache.replace(
                ".hd5", "sub{}.hd5".format(i))
            with tables.open_file(outname, "w") as outfile:
                outfile.create_array(outfile.root, "bias",
                                     noisemodel_vals["bias"][slc])
                outfile.create_array(outfile.root, "error",
                                     noisemodel_vals["error"][slc])
                outfile.create_array(outfile.root, "completeness",
                                     noisemodel_vals["completeness"][slc])
            sub_noise_trim_fnames.append(outname)

        # Collect information about the parameter rangers, to make the pdf1d bins
        # consistent between subgrids
        grid_info_dict = subgridding_tools.reduce_grid_info(
            sub_seds_trim_fnames,
            sub_noise_trim_fnames,
            nprocs=1,
            cap_unique=100)

        ##################################################
        # STEP 3: GENERATE FILENAMES AND RUN THE FITTING #
        ##################################################
        def make_gridsub_fnames(base_fname, num_subgrids, extension=".fits"):
            return [
                base_fname.replace(extension,
                                   "gridsub{}{}".format(i, extension))
                for i in range(num_subgrids)
            ]

        stats_fname = tempfile.NamedTemporaryFile(suffix=".fits").name
        pdf1d_fname = tempfile.NamedTemporaryFile(suffix=".fits").name
        lnp_fname = tempfile.NamedTemporaryFile(suffix=".hd5").name

        subgrid_pdf1d_fnames = make_gridsub_fnames(pdf1d_fname, num_subgrids)
        subgrid_stats_fnames = make_gridsub_fnames(stats_fname, num_subgrids)
        subgrid_lnp_fnames = make_gridsub_fnames(lnp_fname,
                                                 num_subgrids,
                                                 extension=".hd5")

        for i in range(num_subgrids):
            sub_noisemodel_vals = noisemodel.get_noisemodelcat(
                sub_noise_trim_fnames[i])
            fit.summary_table_memory(
                obsdata,
                sub_noisemodel_vals,
                sub_seds_trim_fnames[i],
                threshold=-40.0,
                save_every_npts=100,
                lnp_npts=500,
                stats_outname=subgrid_stats_fnames[i],
                pdf1d_outname=subgrid_pdf1d_fnames[i],
                lnp_outname=subgrid_lnp_fnames[i],
                grid_info_dict=grid_info_dict,
                do_not_normalize=True,
            )
            # The do_not_normalize option is absolutely crucial!

        # Now merge the results
        merged_pdf1d_fname, merged_stats_fname = subgridding_tools.merge_pdf1d_stats(
            subgrid_pdf1d_fnames, subgrid_stats_fnames)

        # Do a full fit also
        normal_stats = tempfile.NamedTemporaryFile(suffix=".fits").name
        normal_pdf1d = tempfile.NamedTemporaryFile(suffix=".fits").name
        normal_lnp = tempfile.NamedTemporaryFile(suffix=".hd5").name
        fit.summary_table_memory(
            obsdata,
            noisemodel_vals,
            self.seds_trim_fname_cache,
            threshold=-40.0,
            save_every_npts=100,
            lnp_npts=500,
            stats_outname=normal_stats,
            pdf1d_outname=normal_pdf1d,
            lnp_outname=normal_lnp,
            do_not_normalize=True,
        )
        # Here, we also need to use do_not_normalize, otherwise Pmax will be
        # different by a factor

        # CHECKS
        tolerance = 1e-6
        fits_normal = fits.open(normal_pdf1d)
        fits_new = fits.open(merged_pdf1d_fname)

        if not len(fits_new) == len(fits_normal):
            raise AssertionError()

        # A similar problem to the above will also occur here
        for k in range(1, len(fits_new)):
            qname = fits_new[k].header["EXTNAME"]
            np.testing.assert_allclose(
                fits_new[k].data,
                fits_normal[qname].data,
                rtol=tolerance,
                atol=tolerance,
            )

        table_normal = Table.read(normal_stats)
        table_new = Table.read(merged_stats_fname)

        if not len(table_normal) == len(table_new):
            raise AssertionError()

        # These will normally fail, as the merging process can not be made
        # bit-correct due do floating point math (exacerbated by exponentials)
        for c in table_new.colnames:
            if c == "Name" or c == "RA" or c == "DEC":
                np.testing.assert_equal(
                    table_normal[c],
                    table_new[c],
                    err_msg="column {} is not equal".format(c),
                )
            else:
                np.testing.assert_allclose(
                    table_normal[c],
                    table_new[c],
                    rtol=tolerance,
                    equal_nan=True,
                    err_msg="column {} is not close enough".format(c),
                )
예제 #7
0
def test_merge_pdf1d_stats():
    ######################################
    # STEP 1: GET SOME DATA TO WORK WITH #
    ######################################
    vega_fname = download_rename("vega.hd5")
    obs_fname = download_rename("b15_4band_det_27_A.fits")
    noise_trim_fname = download_rename(
        "beast_example_phat_noisemodel_trim.grid.hd5")
    seds_trim_fname = download_rename("beast_example_phat_seds_trim.grid.hd5")

    # download cached version of fitting results
    # stats_fname_cache = download_rename('beast_example_phat_stats.fits')
    # pdf1d_fname_cache = download_rename('beast_example_phat_pdf1d.fits')

    # read in the observed data
    filters = [
        "HST_WFC3_F275W",
        "HST_WFC3_F336W",
        "HST_ACS_WFC_F475W",
        "HST_ACS_WFC_F814W",
        "HST_WFC3_F110W",
        "HST_WFC3_F160W",
    ]
    basefilters = ["F275W", "F336W", "F475W", "F814W", "F110W", "F160W"]
    obs_colnames = [f.lower() + "_rate" for f in basefilters]

    obsdata = Observations(obs_fname,
                           filters,
                           obs_colnames,
                           vega_fname=vega_fname)

    #########################################################################################
    # STEP 2: SPLIT THE GRIDS AND GENERATE THE GRID INFO DICT AS IN THE SUBGRIDDING EXAMPLE #
    #########################################################################################
    num_subgrids = 3

    # Split SED grid
    sub_seds_trim_fnames = subgridding_tools.split_grid(seds_trim_fname,
                                                        num_subgrids,
                                                        overwrite=True)

    # Split noise grid (a standardized function does not exist)
    sub_noise_trim_fnames = []

    noisemodel_vals = get_noisemodelcat(noise_trim_fname)
    slices = subgridding_tools.uniform_slices(len(noisemodel_vals["bias"]),
                                              num_subgrids)
    for i, slc in enumerate(slices):
        outname = noise_trim_fname.replace(".hd5", "sub{}.hd5".format(i))
        with tables.open_file(outname, "w") as outfile:
            outfile.create_array(outfile.root, "bias",
                                 noisemodel_vals["bias"][slc])
            outfile.create_array(outfile.root, "error",
                                 noisemodel_vals["error"][slc])
            outfile.create_array(outfile.root, "completeness",
                                 noisemodel_vals["completeness"][slc])
        sub_noise_trim_fnames.append(outname)

    # Collect information about the parameter rangers, to make the pdf1d bins
    # consistent between subgrids
    grid_info_dict = subgridding_tools.reduce_grid_info(sub_seds_trim_fnames,
                                                        sub_noise_trim_fnames,
                                                        nprocs=1,
                                                        cap_unique=100)

    ##################################################
    # STEP 3: GENERATE FILENAMES AND RUN THE FITTING #
    ##################################################
    def make_gridsub_fnames(base_fname, num_subgrids, extension=".fits"):
        return [
            base_fname.replace(extension, "gridsub{}{}".format(i, extension))
            for i in range(num_subgrids)
        ]

    stats_fname = "/tmp/beast_example_phat_stats.fits"
    pdf1d_fname = "/tmp/beast_example_phat_pdf1d.fits"
    lnp_fname = "/tmp/beast_example_phat_lnp.hd5"

    subgrid_pdf1d_fnames = make_gridsub_fnames(pdf1d_fname, num_subgrids)
    subgrid_stats_fnames = make_gridsub_fnames(stats_fname, num_subgrids)
    subgrid_lnp_fnames = make_gridsub_fnames(lnp_fname,
                                             num_subgrids,
                                             extension=".hd5")

    for i in range(num_subgrids):
        sub_noisemodel_vals = get_noisemodelcat(sub_noise_trim_fnames[i])
        fit.summary_table_memory(
            obsdata,
            sub_noisemodel_vals,
            sub_seds_trim_fnames[i],
            threshold=-40.0,
            save_every_npts=100,
            lnp_npts=60,
            stats_outname=subgrid_stats_fnames[i],
            pdf1d_outname=subgrid_pdf1d_fnames[i],
            lnp_outname=subgrid_lnp_fnames[i],
            grid_info_dict=grid_info_dict,
            do_not_normalize=True,
        )
        # The do_not_normalize option is absolutely crucial!

    # Now merge the results
    merged_pdf1d_fname, merged_stats_fname = subgridding_tools.merge_pdf1d_stats(
        subgrid_pdf1d_fnames, subgrid_stats_fnames)

    # Do a full fit also
    normal_stats = "normal_stats.fits"
    normal_pdf1d = "normal_pdf1d.fits"
    normal_lnp = "normal_lnp.hd5"
    fit.summary_table_memory(
        obsdata,
        noisemodel_vals,
        seds_trim_fname,
        threshold=-40.0,
        save_every_npts=100,
        lnp_npts=60,
        stats_outname=normal_stats,
        pdf1d_outname=normal_pdf1d,
        lnp_outname=normal_lnp,
        do_not_normalize=True,
    )
    # Here, we also need to use do_not_normalize, otherwise Pmax will be
    # different by a factor

    # CHECKS
    tolerance = 1e-6
    print("comparing pdf1d")
    # fits_cache = fits.open(pdf1d_fname_cache)
    fits_normal = fits.open(normal_pdf1d)
    fits_new = fits.open(merged_pdf1d_fname)

    if not len(fits_new) == len(fits_normal):
        raise AssertionError()

    # A similar problem to the above will also occur here
    for k in range(1, len(fits_new)):
        qname = fits_new[k].header["EXTNAME"]
        print(qname)
        np.testing.assert_allclose(fits_new[k].data,
                                   fits_normal[qname].data,
                                   rtol=tolerance,
                                   atol=tolerance)

    print("comparing stats")
    # table_cache = Table.read(stats_fname_cache)
    table_normal = Table.read(normal_stats)
    table_new = Table.read(merged_stats_fname)

    if not len(table_normal) == len(table_new):
        raise AssertionError()

    # These will normally fail, as the merging process can not be made
    # bit-correct due do floating point math (exacerbated by exponentials)
    for c in table_new.colnames:
        print(c)
        if c == "Name" or c == "RA" or c == "DEC":
            np.testing.assert_equal(
                table_normal[c],
                table_new[c],
                err_msg="column {} is not equal".format(c),
            )
        else:
            np.testing.assert_allclose(
                table_normal[c],
                table_new[c],
                rtol=tolerance,
                equal_nan=True,
                err_msg="column {} is not close enough".format(c),
            )
예제 #8
0
def make_ast_inputs(beast_settings_info, pick_method="flux_bin_method"):
    """
    Make the list of artificial stars to be run through the photometry pipeline

    Parameters
    ----------
    beast_settings_info : string or beast.tools.beast_settings.beast_settings instance
        if string: file name with beast settings
        if class: beast.tools.beast_settings.beast_settings instance

    pick_method : string (default = "flux_bin_method")
        By default, use the flux bin method to select SEDs.
        If set to "random_seds", randomly select SEDs from the model grid.
        If set to "suppl_seds", supplement the existing input ASTs by randomly
        selecting additional SEDs from the list of non-selected models.

    """

    # process beast settings info
    if isinstance(beast_settings_info, str):
        settings = beast_settings.beast_settings(beast_settings_info)
    elif isinstance(beast_settings_info, beast_settings.beast_settings):
        settings = beast_settings_info
    else:
        raise TypeError(
            "beast_settings_info must be string or beast.tools.beast_settings.beast_settings instance"
        )

    # read in the photometry catalog
    obsdata = Observations(settings.obsfile,
                           settings.filters,
                           obs_colnames=settings.obs_colnames)

    # --------------------
    # select SEDs
    # --------------------

    modelsedgrid_filename = "./{0}/{0}_seds.grid.hd5".format(settings.project)
    Nrealize = settings.ast_realization_per_model

    # file names for stars and corresponding SED parameters
    if pick_method == "suppl_seds":
        outfile_seds = "./{0}/{0}_inputAST_seds_suppl.txt".format(
            settings.project)
        outfile_params = "./{0}/{0}_ASTparams_suppl.fits".format(
            settings.project)
    else:
        outfile_seds = "./{0}/{0}_inputAST_seds.txt".format(settings.project)
        outfile_params = "./{0}/{0}_ASTparams.fits".format(settings.project)

    # if the SED file doesn't exist, create SEDs
    if not os.path.isfile(outfile_seds):

        print("Selecting SEDs for ASTs")

        if pick_method == "flux_bin_method":

            N_fluxes = settings.ast_n_flux_bins
            min_N_per_flux = settings.ast_n_per_flux_bin
            bins_outfile = "./{0}/{0}_ASTfluxbins.txt".format(settings.project)

            chosen_seds = pick_models_toothpick_style(
                modelsedgrid_filename,
                settings.filters,
                N_fluxes,
                min_N_per_flux,
                outfile=outfile_seds,
                outfile_params=outfile_params,
                bins_outfile=bins_outfile,
            )

        if pick_method == "random_pick":

            # construct magnitude cuts
            mag_cuts = settings.ast_maglimit
            Nfilters = settings.ast_bands_above_maglimit

            if len(mag_cuts) == 1:
                tmp_cuts = mag_cuts
                min_mags = np.zeros(len(settings.filters))
                for k, filtername in enumerate(obsdata.filters):
                    sfiltername = obsdata.filter_aliases[filtername]
                    sfiltername = sfiltername.replace("rate", "vega")
                    sfiltername = sfiltername.replace("RATE", "VEGA")
                    (keep, ) = np.where(obsdata[sfiltername] < 99.0)
                    min_mags[k] = np.percentile(obsdata[keep][sfiltername],
                                                90.0)

                # max. mags from the gst observation cat.
                mag_cuts = min_mags + tmp_cuts

            N_models = settings.ast_models_selected_per_age

            chosen_seds = pick_models(
                modelsedgrid_filename,
                settings.filters,
                mag_cuts,
                Nfilter=Nfilters,
                N_stars=N_models,
                Nrealize=Nrealize,
                outfile=outfile_seds,
                outfile_params=outfile_params,
            )

        if pick_method == "suppl_seds":

            print("Supplementing ASTs")

            nAST = settings.ast_N_supplement
            existingASTfile = settings.ast_existing_file
            mag_cuts = settings.ast_suppl_maglimit
            color_cuts = settings.ast_suppl_colorlimit

            chosen_seds = supplement_ast(
                modelsedgrid_filename,
                settings.filters,
                nAST=nAST,
                existingASTfile=existingASTfile,
                outASTfile=outfile_seds,
                outASTfile_params=outfile_params,
                mag_cuts=mag_cuts,
                color_cuts=color_cuts,
            )

    # if the SED file does exist, read them in
    else:
        print("Reading existing AST SEDs")
        chosen_seds = Table.read(outfile_seds, format="ascii")

    # --------------------
    # assign positions
    # --------------------

    # if we want ASTs with positions included (rather than just the fluxes from
    # the section above)
    if settings.ast_with_positions:

        print("Assigning positions to artifical stars")

        outfile = "./{0}/{0}_inputAST.txt".format(settings.project)
        if pick_method == "suppl_seds":
            outfile = "./{0}/{0}_inputAST_suppl.txt".format(settings.project)

        # if we're replicating SEDs across source density or background bins
        if settings.ast_density_table is not None:
            if hasattr(settings, "ast_reference_image_hdu_extension"):
                hdu_ext = settings.ast_reference_image_hdu_extension
            else:
                hdu_ext = 1

            make_ast_xy_list.pick_positions_from_map(
                obsdata,
                chosen_seds,
                settings.ast_density_table,
                settings.sd_binmode,
                settings.sd_Nbins,
                settings.sd_binwidth,
                settings.sd_custom,
                settings.ast_realization_per_model,
                outfile=outfile,
                refimage=settings.ast_reference_image,
                refimage_hdu=hdu_ext,
                wcs_origin=1,
                Nrealize=1,
                set_coord_boundary=settings.ast_coord_boundary,
                region_from_filters="all",
                erode_boundary=settings.ast_erode_selection_region,
            )
        # if we're not using SD/background maps, SEDs will be distributed
        # based on catalog sources
        else:
            make_ast_xy_list.pick_positions(
                obsdata,
                outfile_seds,
                outfile,
                settings.ast_pixel_distribution,
                refimage=settings.ast_reference_image,
            )
예제 #9
0
        start_time = time.clock()

        if noisefile == old_noisefile:
            print("not reading noisefile - same as last")
            # print(noisefile)
        else:
            print("reading noisefile")
            # read in the noise model
            noisemodel_vals = noisemodel.get_noisemodelcat(noisefile)
            old_noisefile = noisefile

        # read in the observed data
        print("getting the observed data")
        obsdata = Observations(
            obsfile, modelsedgrid.filters, obs_colnames=datamodel.obs_colnames
        )
        # trim the model sedgrid
        #   set n_detected = 0 to disable the trimming of models based on
        #      the ASTs (e.g. extrapolations are ok)
        #   this is needed as the ASTs in the NIR bands do not go faint enough
        trim_grid.trim_models(
            modelsedgrid,
            noisemodel_vals,
            obsdata,
            sed_trimname,
            noisemodel_trimname,
            sigma_fac=3.0,
        )

        new_time = time.clock()
예제 #10
0
def make_ast_inputs(flux_bin_method=True):
    """
    Make the list of artificial stars to be run through the photometry pipeline

    Parameters
    ----------
    flux_bin_method : boolean (default=True)
        If True, use the flux bin method to select SEDs.  If False, randomly
        select SEDs from the model grid.

    """

    # before doing ANYTHING, force datamodel to re-import (otherwise, any
    # changes within this python session will not be loaded!)
    importlib.reload(datamodel)
    # check input parameters
    verify_params.verify_input_format(datamodel)

    # read in the photometry catalog
    obsdata = Observations(datamodel.obsfile,
                           datamodel.filters,
                           obs_colnames=datamodel.obs_colnames)

    # --------------------
    # select SEDs
    # --------------------

    Nrealize = datamodel.ast_realization_per_model
    Nfilters = datamodel.ast_bands_above_maglimit

    # file names for stars and corresponding SED parameters
    outfile_seds = "./{0}/{0}_inputAST_seds.txt".format(datamodel.project)
    outfile_params = "./{0}/{0}_ASTparams.fits".format(datamodel.project)

    # if the SED file doesn't exist, create SEDs
    if not os.path.isfile(outfile_seds):

        print("Selecting SEDs for ASTs")

        if flux_bin_method:

            N_fluxes = datamodel.ast_n_flux_bins
            min_N_per_flux = datamodel.ast_n_per_flux_bin
            bins_outfile = "./{0}/{0}_ASTfluxbins.txt".format(
                datamodel.project)
            modelsedgrid_filename = "./{0}/{0}_seds.grid.hd5".format(
                datamodel.project)

            chosen_seds = pick_models_toothpick_style(
                modelsedgrid_filename,
                datamodel.filters,
                Nfilters,
                N_fluxes,
                min_N_per_flux,
                outfile=outfile_seds,
                outfile_params=outfile_params,
                bins_outfile=bins_outfile,
            )

        else:

            # construct magnitude cuts

            mag_cuts = datamodel.ast_maglimit

            if len(mag_cuts) == 1:
                tmp_cuts = mag_cuts
                min_mags = np.zeros(len(datamodel.filters))
                for k, filtername in enumerate(obsdata.filters):
                    sfiltername = obsdata.data.resolve_alias(filtername)
                    sfiltername = sfiltername.replace("rate", "vega")
                    sfiltername = sfiltername.replace("RATE", "VEGA")
                    (keep, ) = np.where(obsdata[sfiltername] < 99.0)
                    min_mags[k] = np.percentile(obsdata[keep][sfiltername],
                                                90.0)

                # max. mags from the gst observation cat.
                mag_cuts = min_mags + tmp_cuts

            N_models = datamodel.ast_models_selected_per_age

            chosen_seds = pick_models(
                modelsedgrid_filename,
                datamodel.filters,
                mag_cuts,
                Nfilter=Nfilters,
                N_stars=N_models,
                Nrealize=Nrealize,
                outfile=outfile_seds,
                outfile_params=outfile_params,
            )

    # if the SED file does exist, read them in
    else:
        print("Reading existing AST SEDs")
        chosen_seds = Table.read(outfile_seds, format="ascii")

    # --------------------
    # assign positions
    # --------------------

    # if we want ASTs with positions included (rather than just the fluxes from
    # the section above)
    if datamodel.ast_with_positions:

        print("Assigning positions to artifical stars")

        outfile = "./{0}/{0}_inputAST.txt".format(datamodel.project)

        # if we're replicating SEDs across source density or background bins
        if datamodel.ast_density_table is not None:
            make_ast_xy_list.pick_positions_from_map(
                obsdata,
                chosen_seds,
                datamodel.ast_density_table,
                datamodel.ast_N_bins,
                datamodel.ast_realization_per_model,
                outfile=outfile,
                refimage=datamodel.ast_reference_image,
                refimage_hdu=1,
                wcs_origin=1,
                Nrealize=1,
                set_coord_boundary=datamodel.ast_coord_boundary,
                region_from_filters="all",
            )

        # if we're not using SD/background maps, SEDs will be distributed
        # based on catalog sources
        else:
            make_ast_xy_list.pick_positions(
                obsdata,
                outfile,
                datamodel.ast_pixel_distribution,
                refimage=datamodel.ast_reference_image,
            )
def generate_files_for_tests(run_beast=True, run_tools=True):
    """
    Use the metal_small example to generate a full set of files for the BEAST
    regression tests.

    Parameters
    ----------
    run_beast : boolean (default=True)
        if True, run the BEAST

    run_tools : boolean (default=True)
        if True, run the code to generate things for tools
    """

    # read in BEAST settings
    settings_orig = beast_settings.beast_settings("beast_settings.txt")
    # also make a version with subgrids
    settings_subgrids = copy.deepcopy(settings_orig)
    settings_subgrids.n_subgrid = 2
    settings_subgrids.project = f"{settings_orig.project}_subgrids"

    # ==========================================
    # run the beast for each set of settings
    # ==========================================

    if run_beast:

        for settings in [settings_orig, settings_subgrids]:

            # -----------------
            # physics model
            # -----------------
            create_physicsmodel.create_physicsmodel(
                settings,
                nsubs=settings.n_subgrid,
                nprocs=1,
            )

            # -----------------
            # ASTs
            # -----------------

            # currently only works for no subgrids
            if settings.n_subgrid == 1:
                make_ast_inputs.make_ast_inputs(settings,
                                                pick_method="flux_bin_method")

            # -----------------
            # obs model
            # -----------------
            create_obsmodel.create_obsmodel(
                settings,
                use_sd=False,
                nsubs=settings.n_subgrid,
                nprocs=1,
                use_rate=True,
            )

            # -----------------
            # trimming
            # -----------------

            # make file names
            file_dict = create_filenames.create_filenames(
                settings, use_sd=False, nsubs=settings.n_subgrid)

            # read in the observed data
            obsdata = Observations(settings.obsfile, settings.filters,
                                   settings.obs_colnames)

            for i in range(settings.n_subgrid):

                # get the modesedgrid on which to generate the noisemodel
                modelsedgridfile = file_dict["modelsedgrid_files"][i]
                modelsedgrid = SEDGrid(modelsedgridfile)

                # read in the noise model just created
                noisemodel_vals = noisemodel.get_noisemodelcat(
                    file_dict["noise_files"][i])

                # trim the model sedgrid
                sed_trimname = file_dict["modelsedgrid_trim_files"][i]
                noisemodel_trimname = file_dict["noise_trim_files"][i]

                trim_grid.trim_models(
                    modelsedgrid,
                    noisemodel_vals,
                    obsdata,
                    sed_trimname,
                    noisemodel_trimname,
                    sigma_fac=3.0,
                )

            # -----------------
            # fitting
            # -----------------

            run_fitting.run_fitting(
                settings,
                use_sd=False,
                nsubs=settings.n_subgrid,
                nprocs=1,
                pdf2d_param_list=["Av", "M_ini", "logT"],
                pdf_max_nbins=200,
            )

            # -----------------
            # merging
            # -----------------

            # it'll automatically skip for no subgrids
            merge_files.merge_files(settings,
                                    use_sd=False,
                                    nsubs=settings.n_subgrid)

            print("\n\n")

    # ==========================================
    # reference files for assorted tools
    # ==========================================

    if run_tools:

        # -----------------
        # compare_spec_type
        # -----------------

        # the input settings
        input = {
            "spec_ra": [72.67213351],
            "spec_dec": [-67.71720515],
            "spec_type": ["A"],
            "spec_subtype": [0],
            "lumin_class": ["IV"],
            "match_radius": 0.2,
        }

        # run it
        output = compare_spec_type.compare_spec_type(
            settings_orig.obsfile,
            "{0}/{0}_stats.fits".format(settings_orig.project),
            **input,
        )

        # save the inputs and outputs
        asdf.AsdfFile({
            "input": input,
            "output": output
        }).write_to("{0}/{0}_compare_spec_type.asdf".format(
            settings_orig.project))

        # -----------------
        # star_type_probability
        # -----------------

        # input settings
        input = {
            "output_filebase": None,
            "ext_O_star_params": {
                "min_M_ini": 10,
                "min_Av": 0.5,
                "max_Av": 5
            },
        }

        # run it
        output = star_type_probability.star_type_probability(
            "{0}/{0}_pdf1d.fits".format(settings_orig.project),
            "{0}/{0}_pdf2d.fits".format(settings_orig.project),
            **input,
        )

        # save the inputs and outputs
        asdf.AsdfFile({
            "input": input,
            "output": output
        }).write_to("{0}/{0}_star_type_probability.asdf".format(
            settings_orig.project))

    # ==========================================
    # asdf file permissions
    # ==========================================

    # for unknown reasons, asdf currently writes files with permissions set
    # to -rw-------.  This changes it to -rw-r--r-- (like the rest of the
    # BEAST files) so Karl can easily copy them over to the cached file
    # website.

    # list of asdf files
    asdf_files = glob.glob("*/*.asdf")
    # go through each one to change permissions
    for fname in asdf_files:
        os.chmod(fname,
                 stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
예제 #12
0
def fit_submodel(
    settings,
    photometry_file,
    modelsedgrid_file,
    noise_file,
    pdf_max_nbins,
    stats_file,
    pdf_file,
    pdf2d_file,
    pdf2d_param_list,
    lnp_file,
    grid_info_file=None,
    resume=False,
):
    """
    Code to run the SED fitting

    Parameters
    ----------
    settings : beast.tools.beast_settings.beast_settings instance
        object with the beast settings

    photometry_file : string
        path+name of the photometry file

    modelsedgrid_file : string
        path+name of the physics model grid file

    noise_file : string
        path+name of the noise model file

    pdf_max_nbins : int
        Maxiumum number of bins to use for the 1D and 2D PDFs

    stats_file : string
        path+name of the file to contain stats output

    pdf_file : string
        path+name of the file to contain 1D PDF output

    pdf2d_file : string
        path+name of the file to contain 2D PDF output

    pdf2d_param_list: list of strings or None
        parameters for which to make 2D PDFs (or None)

    lnp_file : string
        path+name of the file to contain log likelihood output

    grid_info_file : string (default=None)
        path+name for pickle file that contains dictionary with subgrid
        min/max/n_unique (required for a run with subgrids)

    resume : boolean (default=False)
        choose whether to resume existing run or start over


    Returns
    -------
    noisefile : string
        name of the created noise file

    """

    # read in the photometry catalog
    obsdata = Observations(photometry_file,
                           settings.filters,
                           obs_colnames=settings.obs_colnames)

    # check if it's a subgrid run by looking in the file name
    if "gridsub" in modelsedgrid_file:
        subgrid_run = True
        print("loading grid_info_dict from " + grid_info_file)
        with open(grid_info_file, "rb") as p:
            grid_info_dict = pickle.loads(p.read())
    else:
        subgrid_run = False

    # load the SED grid and noise model
    modelsedgrid = SEDGrid(modelsedgrid_file)
    noisemodel_vals = noisemodel.get_noisemodelcat(noise_file)

    if subgrid_run:
        fit.summary_table_memory(
            obsdata,
            noisemodel_vals,
            modelsedgrid,
            resume=resume,
            threshold=-10.0,
            save_every_npts=100,
            lnp_npts=500,
            max_nbins=pdf_max_nbins,
            stats_outname=stats_file,
            pdf1d_outname=pdf_file,
            pdf2d_outname=pdf2d_file,
            pdf2d_param_list=pdf2d_param_list,
            grid_info_dict=grid_info_dict,
            lnp_outname=lnp_file,
            do_not_normalize=True,
            surveyname=settings.surveyname,
        )
        print("Done fitting on grid " + modelsedgrid_file)

    else:

        fit.summary_table_memory(
            obsdata,
            noisemodel_vals,
            modelsedgrid,
            resume=resume,
            threshold=-10.0,
            save_every_npts=100,
            lnp_npts=500,
            max_nbins=pdf_max_nbins,
            stats_outname=stats_file,
            pdf1d_outname=pdf_file,
            pdf2d_outname=pdf2d_file,
            pdf2d_param_list=pdf2d_param_list,
            lnp_outname=lnp_file,
            surveyname=settings.surveyname,
        )
        print("Done fitting on grid " + modelsedgrid_file)
예제 #13
0
def test_fit_grid():

    # download the needed files
    vega_fname = download_rename("vega.hd5")
    obs_fname = download_rename("b15_4band_det_27_A.fits")
    noise_trim_fname = download_rename(
        "beast_example_phat_noisemodel_trim.grid.hd5")
    seds_trim_fname = download_rename("beast_example_phat_seds_trim.grid.hd5")

    # download cached version of fitting results
    stats_fname_cache = download_rename("beast_example_phat_stats.fits")
    pdf1d_fname_cache = download_rename("beast_example_phat_pdf1d.fits")

    ################

    # read in the the AST noise model
    noisemodel_vals = noisemodel.get_noisemodelcat(noise_trim_fname)

    # read in the observed data
    filters = [
        "HST_WFC3_F275W",
        "HST_WFC3_F336W",
        "HST_ACS_WFC_F475W",
        "HST_ACS_WFC_F814W",
        "HST_WFC3_F110W",
        "HST_WFC3_F160W",
    ]
    basefilters = ["F275W", "F336W", "F475W", "F814W", "F110W", "F160W"]
    obs_colnames = [f.lower() + "_rate" for f in basefilters]

    obsdata = Observations(obs_fname,
                           filters,
                           obs_colnames,
                           vega_fname=vega_fname)
    # output files
    stats_fname = "/tmp/beast_example_phat_stats.fits"
    pdf1d_fname = "/tmp/beast_example_phat_pdf1d.fits"
    lnp_fname = "/tmp/beast_example_phat_lnp.hd5"

    fit.summary_table_memory(
        obsdata,
        noisemodel_vals,
        seds_trim_fname,
        threshold=-10.0,
        save_every_npts=100,
        lnp_npts=60,
        max_nbins=50,
        stats_outname=stats_fname,
        pdf1d_outname=pdf1d_fname,
        lnp_outname=lnp_fname,
    )

    # check that the stats files are exactly the same
    table_cache = Table.read(stats_fname_cache)
    table_new = Table.read(stats_fname)

    compare_tables(table_cache, table_new)

    # lnp files not checked as they are randomly sparsely sampled
    #   hence will be different every time the fitting is run

    # check that the pdf1d files are exactly the same
    compare_fits(pdf1d_fname_cache, pdf1d_fname)