コード例 #1
0
    def test_create_physicsmodel_no_subgrid(self):
        """
        Test create_physicsmodel.py, assuming no subgrids
        """

        # run create_physicsmodel
        create_physicsmodel.create_physicsmodel(self.settings,
                                                nsubs=self.settings.n_subgrid,
                                                nprocs=1)

        # check that files match
        # - isochrones
        table_cache = Table.read(
            self.iso_fname_cache,
            format="ascii.csv",
            comment="#",
            delimiter=",",
        )
        table_new = Table.read(
            f"./{self.settings.project}/{self.settings.project}_iso.csv",
            format="ascii.csv",
            comment="#",
            delimiter=",",
        )
        compare_tables(table_cache, table_new)
        # - spectra with priors
        compare_hdf5(
            self.priors_fname_cache,
            f"./{self.settings.project}/{self.settings.project}_spec_w_priors.grid.hd5",
        )
        # - SEDs grid
        compare_hdf5(
            self.seds_fname_cache,
            f"./{self.settings.project}/{self.settings.project}_seds.grid.hd5",
        )
コード例 #2
0
    def test_create_physicsmodel_with_subgrid(self):
        """
        Test create_physicsmodel.py, assuming two subgrids
        """

        # run create_physicsmodel
        create_physicsmodel.create_physicsmodel(
            self.settings_sg, nsubs=self.settings_sg.n_subgrid, nprocs=1)

        # check that files match

        # - isochrones
        table_cache = Table.read(
            self.iso_fname_cache,
            format="ascii.csv",
            comment="#",
            delimiter=",",
        )
        table_new = Table.read(
            "beast_metal_small_subgrids/beast_metal_small_subgrids_iso.csv",
            format="ascii.csv",
            comment="#",
            delimiter=",",
        )
        compare_tables(table_cache, table_new)

        # - spectra with priors
        compare_hdf5(
            self.priors_fname_cache,
            "./beast_metal_small_subgrids/beast_metal_small_subgrids_spec_w_priors.grid.hd5",
        )
        compare_hdf5(
            self.priors_sub0_fname_cache,
            "beast_metal_small_subgrids/beast_metal_small_subgrids_spec_w_priors.gridsub0.hd5",
        )
        compare_hdf5(
            self.priors_sub1_fname_cache,
            "beast_metal_small_subgrids/beast_metal_small_subgrids_spec_w_priors.gridsub1.hd5",
        )

        # - SEDs grid
        compare_hdf5(
            self.seds_sub0_fname_cache,
            "beast_metal_small_subgrids/beast_metal_small_subgrids_seds.gridsub0.hd5",
        )
        compare_hdf5(
            self.seds_sub1_fname_cache,
            "beast_metal_small_subgrids/beast_metal_small_subgrids_seds.gridsub1.hd5",
        )

        # - list of subgrids
        with open("./beast_metal_small_subgrids/subgrid_fnames.txt") as f:
            temp = f.read()
        subgrid_list = [x for x in temp.split("\n") if x != ""]
        expected_list = [
            "beast_metal_small_subgrids/beast_metal_small_subgrids_seds.gridsub0.hd5",
            "beast_metal_small_subgrids/beast_metal_small_subgrids_seds.gridsub1.hd5",
        ]
        assert subgrid_list == expected_list, "subgrid_fnames.txt has incorrect content"
コード例 #3
0
def beast_production_wrapper():
    """
    This does all of the steps for a full production run, and can be used as
    a wrapper to automatically do most steps for multiple fields.
    * make beast settings file
    * make source density map
    * make background density map
    * make physics model (SED grid)
    * make input list for ASTs

    Places for user to manually do things:
    * editing code before use
        - beast_settings_template.py: setting up the file with desired parameters
        - here: list the catalog filter names with the corresponding BEAST names
        - here: choose settings (pixel size, filter, mag range) for the source density map
        - here: choose settings (pixel size, reference image) for the background map

    """

    # get the list of fields
    field_names = ["M31-B17-WEST"]

    # reference image headers
    ast_ref_im = ["./data/M31-B17-WEST_F475W_drz_head.fits"]

    # filter for sorting
    ref_filter = ["F475W", "F475W"]

    # filter for checking flags
    flag_filter = ["F475W", "F475W"]

    # coordinates for the boundaries of the field
    boundary_coord = [[
        [
            11.3569155882981, 11.3332629372269, 11.1996295883163,
            11.2230444144334
        ],
        [
            42.0402119924196, 41.9280756279533, 41.9439995726193,
            42.0552016945299
        ],
    ]]

    # number of fields
    # n_field = len(field_names)

    # Need to know what the correspondence is between filter names in the
    # catalog and the BEAST filter names.
    #
    # These will be used to automatically determine the filters present in
    # each GST file and fill in the beast settings file.  The order doesn't
    # matter, as long as the order in one list matches the order in the other
    # list.
    #
    gst_filter_names = ["F275W", "F336W", "F475W", "F814W", "F110W", "F160W"]
    beast_filter_names = [
        "HST_WFC3_F275W",
        "HST_WFC3_F336W",
        "HST_ACS_WFC_F475W",
        "HST_ACS_WFC_F814W",
        "HST_WFC3_F110W",
        "HST_WFC3_F160W",
    ]

    # for b in range(n_field):
    for b in [0]:

        print("********")
        print("field {0} (b={1})".format(field_names[b], b))
        print("********")

        # only create an AST input list if the ASTs don't already exist
        ast_input_file = ("./" + field_names[b] + "_beast/" + field_names[b] +
                          "_beast_inputAST.txt")
        if os.path.isfile(ast_input_file):
            print("AST input file already exists... skipping \n")
            continue

        # -----------------
        # data file names
        # -----------------

        # paths for the data/AST files
        gst_file = "./data/" + field_names[b] + ".st.fits"
        ast_file = "./data/" + field_names[b] + ".gst.fake.fits"

        # region file with catalog stars
        # make_ds9_region_file.region_file_fits(gst_file)
        # make_ds9_region_file.region_file_fits(ast_file)

        # -----------------
        # 1a. make magnitude histograms
        # -----------------

        print("")
        print("making magnitude histograms")
        print("")

        # if not os.path.isfile('./data/'+field_names[b]+'.gst_maghist.pdf'):
        peak_mags = plot_mag_hist.plot_mag_hist(gst_file,
                                                stars_per_bin=70,
                                                max_bins=75)
        # test = plot_mag_hist.plot_mag_hist(ast_file, stars_per_bin=200, max_bins=30)

        # -----------------
        # 1b. make a source density map
        # -----------------

        print("")
        print("making source density map")
        print("")

        # source density map
        sd_map = gst_file.replace(".fits", "_source_den_image.fits")
        if not os.path.isfile(sd_map):
            # if True:
            # - pixel size of 5 arcsec
            # - use ref_filter[b] between vega mags of 15 and peak_mags[ref_filter[b]]-0.5
            sourceden_args = types.SimpleNamespace(
                subcommand="sourceden",
                catfile=gst_file,
                pixsize=5,
                npix=None,
                mag_name=ref_filter[b] + "_VEGA",
                mag_cut=[15, peak_mags[ref_filter[b]] - 0.5],
                flag_name=flag_filter[b] + "_FLAG",
            )
            create_background_density_map.main_make_map(sourceden_args)

        # new file name with the source density column
        # gst_file_sd = gst_file.replace(".fits", "_with_sourceden.fits")

        with fits.open(sd_map) as hdu_sd:
            sd_data = hdu_sd[0].data[hdu_sd[0].data != 0]
            ast_n_bins = np.ceil((np.max(sd_data) - np.min(sd_data)) / 1.0)

        # -----------------
        # 4/5. edit photometry/AST catalogs
        # -----------------

        # remove sources that are
        # - in regions without full imaging coverage,
        # - flagged in flag_filter

        # print('')
        # print('editing photometry/AST catalogs')
        # print('')

        # gst_file_cut = gst_file.replace('.fits', '_with_sourceden_cut.fits')
        # ast_file_cut = ast_file.replace('.fits', '_cut.fits')

        # cut_catalogs.cut_catalogs(
        #    gst_file_sd, gst_file_cut,
        #    #ast_file, ast_file_cut,
        #    partial_overlap=True, flagged=True, flag_filter=flag_filter[b],
        #    region_file=True)

        # -----------------
        # 0. make beast settings file
        # -----------------

        print("")
        print("creating beast settings file")
        print("")

        # get the boundaries of the image
        boundary_ra = boundary_coord[b][0]
        boundary_dec = boundary_coord[b][1]
        # make an eroded version for ASTs (10 pix = 0.5")
        boundary_polygon = geometry.Polygon(
            [[float(boundary_ra[i]),
              float(boundary_dec[i])] for i in range(len(boundary_ra))])
        erode_polygon = boundary_polygon.buffer(-0.5 / 3600)
        boundary_ra_erode = [
            str(x) for x in erode_polygon.exterior.coords.xy[0]
        ]
        boundary_dec_erode = [
            str(x) for x in erode_polygon.exterior.coords.xy[1]
        ]

        create_beast_settings(
            gst_file,
            ast_file,
            gst_filter_names,
            beast_filter_names,
            ref_image=ast_ref_im[b],
            ast_n_bins=ast_n_bins,
            boundary_ra=boundary_ra_erode,
            boundary_dec=boundary_dec_erode,
        )
        # load in beast settings
        settings = beast_settings.beast_settings("beast_settings_" +
                                                 field_names[i] + ".txt")

        # -----------------
        # 2. make physics model
        # -----------------

        print("")
        print("making physics model")
        print("")

        model_grid_file = "./{0}_beast/{0}_beast_seds.grid.hd5".format(
            field_names[b])

        # only make the physics model if it doesn't already exist
        if not os.path.isfile(model_grid_file):
            create_physicsmodel.create_physicsmodel(settings,
                                                    nprocs=1,
                                                    nsubs=settings.n_subgrid)

        # -----------------
        # 3. make ASTs
        # -----------------

        if not os.path.isfile(ast_file):
            # if True:
            if not os.path.isfile(ast_input_file):
                # if True:
                print("")
                print("creating artificial stars")
                print("")
                make_ast_inputs.make_ast_inputs(settings, flux_bin_method=True)

            # make a region file of the ASTs
            make_ds9_region_file.region_file_txt(ast_input_file)

            # make histograms of the fluxes
            plot_ast_histogram.plot_ast(ast_input_file,
                                        sed_grid_file=model_grid_files)

            print("\n**** go run ASTs for " + field_names[b] + "! ****\n")
            continue
コード例 #4
0
def beast_ast_inputs(field_name=None,
                     ref_image=None,
                     filter_ids=None,
                     galaxy=None,
                     supp=0):
    """
    This does all of the steps for generating AST inputs and can be used
    a wrapper to automatically do most steps for multiple fields.
    * make field's beast_settings file
    * make source density map
    * make background density map
    * split catalog by source density
    * make physics model (SED grid)
    * make input list for ASTs
    * prune input ASTs

    ----
    Inputs:

    field_name (str): name of field
    ref_image (str): path to reference image
    filter_ids (list): list of indexes corresponding to the filters in the
                        observation, referenced to the master list below.
    galaxy (str): name of target galaxy (e.g., 'SMC', 'LMC')
    ----

    Places for user to manually do things:
    * editing code before use
        - here: list the catalog filter names with the corresponding BEAST names
        - here: choose settings (pixel size, filter, mag range) for the source density map
        - here: choose settings (pixel size, reference image) for the background map

    """

    # the list of fields
    field_names = [field_name]

    # the path+file for a reference image
    im_path = [ref_image]
    ref_filter = ["F475W"]

    # choose a filter to use for removing artifacts
    # (remove catalog sources with filter_FLAG > 99)
    flag_filter = ["F475W"]

    # number of fields
    n_field = len(field_names)

    # Need to know what the correspondence is between filter names in the
    # catalog and the BEAST filter names.
    #
    # These will be used to automatically determine the filters present in
    # each GST file and fill in the beast settings file.  The order doesn't
    # matter, as long as the order in one list matches the order in the other
    # list.
    #
    gst_filter_names = [
        "F225W",
        "F275W",
        "F336W",
        "F475W",
        "F814W",
        "F110W",
        "F160W",
        "F657N",
    ]
    beast_filter_names = [
        "HST_WFC3_F225W",
        "HST_WFC3_F275W",
        "HST_WFC3_F336W",
        "HST_WFC3_F475W",
        "HST_WFC3_F814W",
        "HST_WFC3_F110W",
        "HST_WFC3_F160W",
        "HST_WFC3_F657N",
    ]

    for b in range(n_field):

        print("********")
        print("field " + field_names[b])
        print("********")

        # -----------------
        # data file names
        # -----------------

        # paths for the data/AST files
        gst_file = "./data/{0}/{0}.vgst.fits".format(field_names[b])
        ast_input_file = "./{0}/{0}_inputAST.txt".format(field_names[b])

        # if no galaxy is manually indicated, try to fetch from gst_file name
        if galaxy == None:
            print("no galaxy specified")
            print("fetching galaxy from field name")
            galaxy_attempt = field_names[b].split("_")[1].split("-")[0]
            print("is this the correct galaxy? : %s" % galaxy_attempt)

            # raw_input returns the empty string for "enter"
            yes = {'yes', 'y', 'ye', ''}
            no = {'no', 'n'}

            response = 0

            while response == 0:
                choice = input().lower()
                if choice in yes:
                    galaxy = galaxy_attempt
                    response = 1
                elif choice in no:
                    print("please rerun with --galaxy specified")
                    break
                else:
                    sys.stdout.write("Please respond with 'yes' or 'no'")

        # path for the reference image (if using for the background map)
        im_file = im_path[b]

        # fetch filter ids
        gst_data = Table.read(gst_file)
        filter_cols = [c for c in gst_data.colnames if "VEGA" in c]

        # extract every filter mentioned in the table
        filters = [f.split("_")[0] for f in filter_cols]

        # match with the gst filter list
        filter_ids = [gst_filter_names.index(i) for i in filters]
        filter_ids.sort()

        gst_filter_names = [gst_filter_names[i] for i in filter_ids]
        beast_filter_names = [beast_filter_names[i] for i in filter_ids]

        print(beast_filter_names)

        # region file with catalog stars
        # make_region_file(gst_file, ref_filter[b])

        # -----------------
        # 0. make beast settings file
        # -----------------

        print("")
        print("creating beast settings file")
        print("")

        beast_settings_filename = create_beast_settings(gst_file,
                                                        ast_input_file,
                                                        gst_filter_names,
                                                        beast_filter_names,
                                                        galaxy,
                                                        ref_image=im_file,
                                                        supp=supp)

        # load in beast settings to get number of subgrids
        settings = beast_settings.beast_settings(
            beast_settings_filename
            #"beast_settings_" + galaxy + "_asts_" + field_names[b] + ".txt"
        )

        # -----------------
        # 1a. make magnitude histograms
        # -----------------

        print("")
        print("making magnitude histograms")
        print("")

        # if not os.path.isfile('./data/'+field_names[b]+'.gst_maghist.pdf'):
        peak_mags = plot_mag_hist.plot_mag_hist(gst_file,
                                                stars_per_bin=70,
                                                max_bins=75)

        # -----------------
        # 1b. make a source density map
        # -----------------

        print("")
        print("making source density map")
        print("")

        # not currently doing background density bins
        # use_bg_info = True
        use_bg_info = False
        if use_bg_info:
            background_args = types.SimpleNamespace(
                subcommand="background",
                catfile=gst_file,
                erode_boundary=settings.ast_erode_selection_region,
                pixsize=5,
                npix=None,
                reference=im_file,
                mask_radius=10,
                ann_width=20,
                cat_filter=[ref_filter, "90"],
            )

            create_background_density_map.main_make_map(background_args)

        # but we are doing source density bins!
        if not os.path.isfile(
                gst_file.replace(".fits", "_source_den_image.fits")):
            print("No sd image file found")
            # - pixel size of 10 arcsec
            # - use ref_filter[b] between vega mags of 17 and peak_mags[ref_filter[b]]-0.5
            sourceden_args = types.SimpleNamespace(
                subcommand="sourceden",
                catfile=gst_file,
                erode_boundary=settings.ast_erode_selection_region,
                pixsize=5,
                npix=None,
                mag_name=ref_filter[0] + "_VEGA",
                mag_cut=[17, peak_mags[ref_filter[0]] - 0.5],
                flag_name=flag_filter[0] + "_FLAG",
            )
            create_background_density_map.main_make_map(sourceden_args)

        # new file name with the source density column
        gst_file_sd = gst_file.replace(".fits", "_with_sourceden.fits")

        # -----------------
        # 2. make physics model
        # -----------------

        print("")
        print("making physics model")
        print("")

        # see which subgrid files already exist
        gs_str = ""
        if settings.n_subgrid > 1:
            gs_str = "sub*"

        # try to fetch the list of SED files (physics models)
        model_grid_files = sorted(
            glob.glob("./{0}/{0}_seds.grid*.hd5".format(field_names[b], )))

        # only make the physics model they don't already exist
        if len(model_grid_files) < settings.n_subgrid:
            # directly create physics model grids
            create_physicsmodel.create_physicsmodel(settings,
                                                    nprocs=1,
                                                    nsubs=settings.n_subgrid)

        # fetch the list of SED files again (physics models)
        model_grid_files = sorted(
            glob.glob("./{0}/{0}_seds.grid*.hd5".format(field_names[b], )))

        # -------------------
        # 3. make AST inputs
        # -------------------

        print("")
        print("making AST inputs")
        print("")

        # only create an AST input list if the ASTs don't already exist
        if not os.path.isfile(ast_input_file):
            make_ast_inputs.make_ast_inputs(settings,
                                            pick_method="flux_bin_method")

        # compare magnitude histograms of ASTs with catalog
        plot_ast_histogram.plot_ast_histogram(
            ast_file=ast_input_file, sed_grid_file=model_grid_files[0])

        if supp != 0:

            print("")
            print("making supplemental AST inputs")
            print("")

            ast_input_supp_file = "./{0}/{0}_inputAST_suppl.txt".format(
                field_names[b])

            if not os.path.isfile(ast_input_supp_file):
                make_ast_inputs.make_ast_inputs(settings,
                                                pick_method="suppl_seds")

        print("now go check the diagnostic plots!")
コード例 #5
0
                        help="Fit the observed data",
                        action="store_true")
    parser.add_argument("-r",
                        "--resume",
                        help="Resume a fitting run",
                        action="store_true")
    args = parser.parse_args()

    # read in BEAST settings
    settings = beast_settings.beast_settings("beast_settings.txt")

    if args.physicsmodel:

        create_physicsmodel.create_physicsmodel(
            settings,
            nsubs=settings.n_subgrid,
            nprocs=1,
        )

    if args.ast:

        make_ast_inputs.make_ast_inputs(settings, flux_bin_method=False)

    if args.observationmodel:
        print("Generating noise model from ASTs and absflux A matrix")

        create_obsmodel.create_obsmodel(
            settings,
            use_sd=False,
            nsubs=settings.n_subgrid,
            nprocs=1,
コード例 #6
0
def generate_files_for_tests(run_beast=True, run_tools=True):
    """
    Use the metal_small example to generate a full set of files for the BEAST
    regression tests.

    Parameters
    ----------
    run_beast : boolean (default=True)
        if True, run the BEAST

    run_tools : boolean (default=True)
        if True, run the code to generate things for tools
    """

    # read in BEAST settings
    settings_orig = beast_settings.beast_settings("beast_settings.txt")
    # also make a version with subgrids
    settings_subgrids = copy.deepcopy(settings_orig)
    settings_subgrids.n_subgrid = 2
    settings_subgrids.project = f"{settings_orig.project}_subgrids"

    # ==========================================
    # run the beast for each set of settings
    # ==========================================

    if run_beast:

        for settings in [settings_orig, settings_subgrids]:

            # -----------------
            # physics model
            # -----------------
            create_physicsmodel.create_physicsmodel(
                settings,
                nsubs=settings.n_subgrid,
                nprocs=1,
            )

            # -----------------
            # ASTs
            # -----------------

            # currently only works for no subgrids
            if settings.n_subgrid == 1:
                make_ast_inputs.make_ast_inputs(settings,
                                                pick_method="flux_bin_method")

            # -----------------
            # obs model
            # -----------------
            create_obsmodel.create_obsmodel(
                settings,
                use_sd=False,
                nsubs=settings.n_subgrid,
                nprocs=1,
                use_rate=True,
            )

            # -----------------
            # trimming
            # -----------------

            # make file names
            file_dict = create_filenames.create_filenames(
                settings, use_sd=False, nsubs=settings.n_subgrid)

            # read in the observed data
            obsdata = Observations(settings.obsfile, settings.filters,
                                   settings.obs_colnames)

            for i in range(settings.n_subgrid):

                # get the modesedgrid on which to generate the noisemodel
                modelsedgridfile = file_dict["modelsedgrid_files"][i]
                modelsedgrid = SEDGrid(modelsedgridfile)

                # read in the noise model just created
                noisemodel_vals = noisemodel.get_noisemodelcat(
                    file_dict["noise_files"][i])

                # trim the model sedgrid
                sed_trimname = file_dict["modelsedgrid_trim_files"][i]
                noisemodel_trimname = file_dict["noise_trim_files"][i]

                trim_grid.trim_models(
                    modelsedgrid,
                    noisemodel_vals,
                    obsdata,
                    sed_trimname,
                    noisemodel_trimname,
                    sigma_fac=3.0,
                )

            # -----------------
            # fitting
            # -----------------

            run_fitting.run_fitting(
                settings,
                use_sd=False,
                nsubs=settings.n_subgrid,
                nprocs=1,
                pdf2d_param_list=["Av", "M_ini", "logT"],
                pdf_max_nbins=200,
            )

            # -----------------
            # merging
            # -----------------

            # it'll automatically skip for no subgrids
            merge_files.merge_files(settings,
                                    use_sd=False,
                                    nsubs=settings.n_subgrid)

            print("\n\n")

    # ==========================================
    # reference files for assorted tools
    # ==========================================

    if run_tools:

        # -----------------
        # compare_spec_type
        # -----------------

        # the input settings
        input = {
            "spec_ra": [72.67213351],
            "spec_dec": [-67.71720515],
            "spec_type": ["A"],
            "spec_subtype": [0],
            "lumin_class": ["IV"],
            "match_radius": 0.2,
        }

        # run it
        output = compare_spec_type.compare_spec_type(
            settings_orig.obsfile,
            "{0}/{0}_stats.fits".format(settings_orig.project),
            **input,
        )

        # save the inputs and outputs
        asdf.AsdfFile({
            "input": input,
            "output": output
        }).write_to("{0}/{0}_compare_spec_type.asdf".format(
            settings_orig.project))

        # -----------------
        # star_type_probability
        # -----------------

        # input settings
        input = {
            "output_filebase": None,
            "ext_O_star_params": {
                "min_M_ini": 10,
                "min_Av": 0.5,
                "max_Av": 5
            },
        }

        # run it
        output = star_type_probability.star_type_probability(
            "{0}/{0}_pdf1d.fits".format(settings_orig.project),
            "{0}/{0}_pdf2d.fits".format(settings_orig.project),
            **input,
        )

        # save the inputs and outputs
        asdf.AsdfFile({
            "input": input,
            "output": output
        }).write_to("{0}/{0}_star_type_probability.asdf".format(
            settings_orig.project))

    # ==========================================
    # asdf file permissions
    # ==========================================

    # for unknown reasons, asdf currently writes files with permissions set
    # to -rw-------.  This changes it to -rw-r--r-- (like the rest of the
    # BEAST files) so Karl can easily copy them over to the cached file
    # website.

    # list of asdf files
    asdf_files = glob.glob("*/*.asdf")
    # go through each one to change permissions
    for fname in asdf_files:
        os.chmod(fname,
                 stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
コード例 #7
0
def beast_production_wrapper():
    """
    This does all of the steps for a full production run, and can be used as
    a wrapper to automatically do most steps for multiple fields.
    * make datamodel.py file
    * make source density map
    * make background density map
    * split catalog by source density
    * make physics model (SED grid)
    * make input list for ASTs
    * make noise model
    * generate batch script to trim models
    * generate batch script to fit models
    * merge stats files back together
    * spatially reorder the results

    Places for user to manually do things:
    * editing code before use
        - datamodel_template.py: setting up the file with desired parameters
        - here: list the catalog filter names with the corresponding BEAST names
        - here: choose settings (pixel size, filter, mag range) for the source density map
        - here: choose settings (pixel size, reference image) for the background map
        - here: choose settings (filter, number per file) for dividing catalog by source density
        - here: choose settings (# files, nice level) for the trimming/fitting batch scripts
    * process the ASTs, as described in BEAST documentation
    * run the trimming scripts
    * run the fitting scripts

    BEWARE: When running the trimming/fitting scripts, ensure that the correct
    datamodel.py file is in use.  Since it gets updated every time this code is
    run, you may be unexpectedly be using one from another field.
    """

    # the list of fields
    field_names = ["15275_IC1613"]

    # distance moduli and velocities
    # http://adsabs.harvard.edu/abs/2013AJ....146...86T
    dist_mod = [24.36]
    velocity = [-236]

    # the path+file for a reference image
    im_path = ["../beast_dwarfs/images/15275_IC1613_F555W_drz.fits.gz"]
    ref_filter = ["F555W"]

    # choose a filter to use for removing artifacts
    # (remove catalog sources with filter_FLAG > 99)
    flag_filter = ["F555W"]

    # number of fields
    n_field = len(field_names)

    # Need to know what the correspondence is between filter names in the
    # catalog and the BEAST filter names.
    #
    # These will be used to automatically determine the filters present in
    # each GST file and fill in the datamodel.py file.  The order doesn't
    # matter, as long as the order in one list matches the order in the other
    # list.
    #
    gst_filter_names = ["F275W", "F336W", "F390M", "F555W", "F814W", "F110W", "F160W"]
    beast_filter_names = [
        "HST_WFC3_F275W",
        "HST_WFC3_F336W",
        "HST_WFC3_F390M",
        "HST_WFC3_F555W",
        "HST_WFC3_F814W",
        "HST_WFC3_F110W",
        "HST_WFC3_F160W",
    ]

    for b in range(n_field):
        # for b in [0]:

        print("********")
        print("field " + field_names[b])
        print("********")

        # -----------------
        # data file names
        # -----------------

        # paths for the data/AST files
        gst_file = "./data/" + field_names[b] + ".gst.fits"
        ast_file = "./data/" + field_names[b] + ".gst.fake.fits"
        # path for the reference image (if using for the background map)
        im_file = im_path[b]

        # region file with catalog stars
        # make_region_file(gst_file, ref_filter[b])

        # -----------------
        # 0. make datamodel file
        # -----------------

        # need to do this first, because otherwise any old version that exists
        # will be imported, and changes made here won't get imported again

        print("")
        print("creating datamodel file")
        print("")

        create_datamodel(
            gst_file,
            ast_file,
            gst_filter_names,
            beast_filter_names,
            dist_mod[b],
            velocity[b],
            ref_image=im_file,
        )

        # load in datamodel to get number of subgrids
        import datamodel

        importlib.reload(datamodel)

        # -----------------
        # 1a. make magnitude histograms
        # -----------------

        print("")
        print("making magnitude histograms")
        print("")

        # if not os.path.isfile('./data/'+field_names[b]+'.gst_maghist.pdf'):
        peak_mags = plot_mag_hist.plot_mag_hist(gst_file, stars_per_bin=70, max_bins=75)
        # test = plot_mag_hist.plot_mag_hist(ast_file, stars_per_bin=200, max_bins=30)

        # -----------------
        # 1b. make a source density map
        # -----------------

        print("")
        print("making source density map")
        print("")

        # not currently doing background density bins
        # use_bg_info = True
        use_bg_info = False
        if use_bg_info:
            background_args = types.SimpleNamespace(
                subcommand="background",
                catfile=gst_file,
                pixsize=5,
                npix=None,
                reference=im_file,
                mask_radius=10,
                ann_width=20,
                cat_filter=[ref_filter, "90"],
            )
            create_background_density_map.main_make_map(background_args)

        # but we are doing source density bins!
        if not os.path.isfile(gst_file.replace(".fits", "_source_den_image.fits")):
            # - pixel size of 10 arcsec
            # - use ref_filter[b] between vega mags of 17 and peak_mags[ref_filter[b]]-0.5
            sourceden_args = types.SimpleNamespace(
                subcommand="sourceden",
                catfile=gst_file,
                pixsize=8,
                npix=None,
                mag_name=ref_filter + "_VEGA",
                mag_cut=[15, peak_mags[ref_filter - 0.5]],
            )
            create_background_density_map.main_make_map(sourceden_args)

        # new file name with the source density column
        gst_file_sd = gst_file.replace(".fits", "_with_sourceden.fits")

        # -----------------
        # 2. make physics model
        # -----------------

        print("")
        print("making physics model")
        print("")

        # see which subgrid files already exist
        gs_str = ""
        if datamodel.n_subgrid > 1:
            gs_str = "sub*"

        spec_files = glob.glob(
            "./"
            + field_names[b]
            + "_beast/"
            + field_names[b]
            + "_beast_spec_w_priors.grid"
            + gs_str
            + ".hd5"
        )

        # only make the physics model they don't already exist
        if len(spec_files) < datamodel.n_subgrid:
            create_physicsmodel.create_physicsmodel(nprocs=1, nsubs=datamodel.n_subgrid)

        # list of SED files
        model_grid_files = sorted(
            glob.glob(
                "./"
                + field_names[b]
                + "_beast/"
                + field_names[b]
                + "_beast_seds.grid"
                + gs_str
                + ".hd5"
            )
        )

        # -----------------
        # 3. make ASTs
        # -----------------

        # only create an AST input list if the ASTs don't already exist
        ast_input_file = (
            "./" + field_names[b] + "_beast/" + field_names[b] + "_beast_inputAST.txt"
        )

        if not os.path.isfile(ast_file):
            if not os.path.isfile(ast_input_file):
                print("")
                print("creating artificial stars")
                print("")
                make_ast_inputs.make_ast_inputs(flux_bin_method=True)

            split_ast_input_file.split_asts(
                field_names[b] + "_beast", ast_input_file, 2000
            )

            print("\n**** go run ASTs for " + field_names[b] + "! ****\n")
            continue

        # -----------------
        # 4/5. edit photometry/AST catalogs
        # -----------------

        # remove sources that are
        # - in regions without full imaging coverage,
        # - flagged in flag_filter

        print("")
        print("editing photometry/AST catalogs")
        print("")

        # - photometry
        gst_file_cut = gst_file.replace(".fits", "_with_sourceden_cut.fits")
        cut_catalogs.cut_catalogs(
            gst_file_sd,
            gst_file_cut,
            partial_overlap=True,
            flagged=True,
            flag_filter=flag_filter[b],
            region_file=True,
        )

        # - ASTs
        ast_file_cut = ast_file.replace(".fits", "_cut.fits")
        cut_catalogs.cut_catalogs(
            ast_file,
            ast_file_cut,
            partial_overlap=True,
            flagged=True,
            flag_filter=flag_filter[b],
            region_file=True,
        )
        # test = plot_mag_hist.plot_mag_hist(ast_file_cut, stars_per_bin=200, max_bins=30)

        # edit the datamodel.py file to have the correct photometry file name
        # (AST file name is already automatically the cut version)
        create_datamodel(
            gst_file_cut,
            ast_file_cut,
            gst_filter_names,
            beast_filter_names,
            dist_mod[b],
            velocity[b],
            ref_image=im_file,
        )

        # -----------------
        # 6. split observations by source density
        # -----------------

        print("")
        print("splitting observations by source density")
        print("")

        # - photometry

        if len(glob.glob(gst_file_cut.replace(".fits", "*sub*fits"))) == 0:

            # a smaller value for Ns_file will mean more individual files/runs,
            # but each run will take a shorter amount of time

            subdivide_obscat_by_source_density.split_obs_by_source_density(
                gst_file_cut,
                bin_width=1,
                sort_col=ref_filter + "_RATE",
                Ns_file=6250,
            )

        # - ASTs

        # check if any files exist already
        ast_files = sorted(glob.glob(ast_file_cut.replace(".fits", "_SD_*.fits")))

        if len(ast_files) == 0:
            split_asts_by_source_density.split_asts(
                ast_file_cut, gst_file.replace(".fits", "_sourceden_map.hd5")
            )

        # -- at this point, we can run the code to create lists of filenames
        file_dict = create_filenames.create_filenames(
            use_sd=True, nsubs=datamodel.n_subgrid
        )

        # figure out how many files there are
        sd_sub_info = file_dict["sd_sub_info"]
        # - number of SD bins
        temp = set([i[0] for i in sd_sub_info])
        print("** total SD bins: " + str(len(temp)))
        # - the unique sets of SD+sub
        unique_sd_sub = [
            x for i, x in enumerate(sd_sub_info) if i == sd_sub_info.index(x)
        ]
        print("** total SD subfiles: " + str(len(unique_sd_sub)))

        # -----------------
        # 7. make noise models
        # -----------------

        print("")
        print("making noise models")
        print("")

        # create the noise model (this code will check if it exists)
        create_obsmodel.create_obsmodel(
            use_sd=True, nsubs=datamodel.n_subgrid, nprocs=1
        )

        # -----------------
        # 8. make script to trim models
        # -----------------

        print("")
        print("setting up script to trim models")
        print("")

        # save any at-queue commands
        at_list = []

        # iterate through each model grid
        for i in range(datamodel.n_subgrid):

            # gst list
            temp = file_dict["photometry_files"]
            gst_input_list = [x for i, x in enumerate(temp) if i == temp.index(x)]

            # create corresponding files for each of those
            ast_input_list = []
            noise_files = []
            trim_prefix = []

            for j in range(len(gst_input_list)):
                # get the sd/sub number
                curr_sd = unique_sd_sub[j][0]
                curr_sub = unique_sd_sub[j][1]
                subfolder = "SD{0}_sub{1}".format(curr_sd, curr_sub)

                # create file names
                ast_input_list.append(
                    ast_file_cut.replace(".fits", "_SD" + curr_sd + ".fits")
                )
                if datamodel.n_subgrid > 1:
                    noise_files.append(
                        "./"
                        + field_names[b]
                        + "_beast/"
                        + field_names[b]
                        + "_beast_noisemodel_SD"
                        + curr_sd
                        + ".gridsub"
                        + str(i)
                        + ".hd5"
                    )
                    trim_prefix.append(
                        "./"
                        + field_names[b]
                        + "_beast/"
                        + subfolder
                        + "/"
                        + field_names[b]
                        + "_beast_"
                        + subfolder
                        + "_gridsub"
                        + str(i)
                    )
                if datamodel.n_subgrid == 1:
                    noise_files.append(
                        "./"
                        + field_names[b]
                        + "_beast/"
                        + field_names[b]
                        + "_beast_noisemodel_SD"
                        + curr_sd
                        + ".hd5"
                    )
                    trim_prefix.append(
                        "./"
                        + field_names[b]
                        + "_beast/"
                        + field_names[b]
                        + "_beast_"
                        + subfolder
                    )

            # check if the trimmed grids exist before moving on
            if datamodel.n_subgrid > 1:
                trim_files = sorted(
                    glob.glob(
                        "./"
                        + field_names[b]
                        + "_beast/SD*_sub*/"
                        + field_names[b]
                        + "_beast_*_gridsub"
                        + str(i)
                        + "_sed_trim.grid.hd5"
                    )
                )
            if datamodel.n_subgrid == 1:
                trim_files = sorted(
                    glob.glob(
                        "./"
                        + field_names[b]
                        + "_beast/"
                        + field_names[b]
                        + "_beast_*_sub*_sed_trim.grid.hd5"
                    )
                )

            if len(trim_files) < len(gst_input_list):

                job_path = "./" + field_names[b] + "_beast/trim_batch_jobs/"
                if datamodel.n_subgrid > 1:
                    file_prefix = "BEAST_gridsub" + str(i)
                if datamodel.n_subgrid == 1:
                    file_prefix = "BEAST"

                # generate trimming at-queue commands
                setup_batch_beast_trim.generic_batch_trim(
                    model_grid_files[i],
                    noise_files,
                    gst_input_list,
                    ast_input_list,
                    trim_prefix,
                    job_path=job_path,
                    file_prefix=file_prefix,
                    num_subtrim=1,
                    nice=19,
                    prefix="source activate b13",
                )

                at_list.append(
                    "at -f " + job_path + file_prefix + "_batch_trim.joblist now"
                )

        if len(at_list) > 0:
            print("\n**** go run trimming code for " + field_names[b] + "! ****")
            print("Here are the command(s) to run:")
            for cmd in at_list:
                print(cmd)
            return
        else:
            print("all files are trimmed for " + field_names[b])

        # -----------------
        # 9. make script to fit models
        # -----------------

        print("")
        print("setting up script to fit models")
        print("")

        fit_run_info = setup_batch_beast_fit.setup_batch_beast_fit(
            num_percore=1,
            nice=19,
            overwrite_logfile=False,
            prefix="source activate b13",
            use_sd=True,
            nsubs=datamodel.n_subgrid,
            nprocs=1,
        )

        # check if the fits exist before moving on
        tot_remaining = len(fit_run_info["done"]) - np.sum(fit_run_info["done"])
        if tot_remaining > 0:
            print("\n**** go run fitting code for " + field_names[b] + "! ****")
            print(
                "Here are the "
                + str(len(fit_run_info["files_to_run"]))
                + " commands to run:"
            )
            for job_file in fit_run_info["files_to_run"]:
                print("at -f ./" + job_file + " now")
            continue
        else:
            print("all fits are complete for " + field_names[b])

        # -----------------
        # 10. merge stats files from each fit
        # -----------------

        print("")
        print("merging stats files")
        print("")

        merge_files.merge_files(use_sd=True, nsubs=datamodel.n_subgrid)
コード例 #8
0
    parser.add_argument("-f",
                        "--fit",
                        help="Fit the observed data",
                        action="store_true")
    parser.add_argument("-r",
                        "--resume",
                        help="Resume a fitting run",
                        action="store_true")
    args = parser.parse_args()

    # check input parameters, print what is the problem, stop run_beast
    verify_params.verify_input_format(datamodel)

    if args.physicsmodel:

        create_physicsmodel.create_physicsmodel(nsubs=1, nprocs=1)

    if args.ast:

        make_ast_inputs.make_ast_inputs(flux_bin_method=False)

    if args.observationmodel:
        print("Generating noise model from ASTs and absflux A matrix")

        create_obsmodel.create_obsmodel(use_sd=False,
                                        nsubs=1,
                                        nprocs=1,
                                        use_rate=False)

        # in the absence of ASTs, the splinter noise model can be used
        # instead of the toothpick model above
コード例 #9
0
def beast_ast_inputs(field_name=None, ref_image=None, filter_ids=None, galaxy=None):
    """
    This does all of the steps for generating AST inputs and can be used
    a wrapper to automatically do most steps for multiple fields.
    * make field's beast_settings file
    * make source density map
    * make background density map
    * split catalog by source density
    * make physics model (SED grid)
    * make input list for ASTs
    * prune input ASTs

    ----
    Inputs:

    field_name (str): name of field
    ref_image (str): path to reference image
    filter_ids (list): list of indexes corresponding to the filters in the
                        observation, referenced to the master list below.
    galaxy (str): name of target galaxy (e.g., 'SMC', 'LMC')
    ----

    Places for user to manually do things:
    * editing code before use
        - here: list the catalog filter names with the corresponding BEAST names
        - here: choose settings (pixel size, filter, mag range) for the source density map
        - here: choose settings (pixel size, reference image) for the background map

    """

    # the list of fields
    field_names = [field_name]

    # the path+file for a reference image
    im_path = [ref_image]
    ref_filter = ["F475W"]

    # choose a filter to use for removing artifacts
    # (remove catalog sources with filter_FLAG > 99)
    flag_filter = ["F475W"]

    # number of fields
    n_field = len(field_names)

    # Need to know what the correspondence is between filter names in the
    # catalog and the BEAST filter names.
    #
    # These will be used to automatically determine the filters present in
    # each GST file and fill in the beast settings file.  The order doesn't
    # matter, as long as the order in one list matches the order in the other
    # list.
    #
    gst_filter_names = [
        "F225W",
        "F275W",
        "F336W",
        "F475W",
        "F814W",
        "F110W",
        "F160W",
        "F657N",
    ]
    beast_filter_names = [
        "HST_WFC3_F225W",
        "HST_WFC3_F275W",
        "HST_WFC3_F336W",
        "HST_WFC3_F475W",
        "HST_WFC3_F814W",
        "HST_WFC3_F110W",
        "HST_WFC3_F160W",
        "HST_WFC3_F657N",
    ]

    filter_ids = [int(i) for i in filter_ids]

    gst_filter_names = [gst_filter_names[i] for i in filter_ids]
    beast_filter_names = [beast_filter_names[i] for i in filter_ids]

    for b in range(n_field):

        print("********")
        print("field " + field_names[b])
        print("********")

        # -----------------
        # data file names
        # -----------------

        # paths for the data/AST files
        gst_file = "./data/" + field_names[b] + ".st.fits"
        ast_file = "./data/" + field_names[b] + ".st.fake.fits"
        # path for the reference image (if using for the background map)
        im_file = im_path[b]

        # region file with catalog stars
        # make_region_file(gst_file, ref_filter[b])

        # -----------------
        # 0. make beast settings file
        # -----------------

        print("")
        print("creating beast settings file")
        print("")

        create_beast_settings(
            gst_file,
            ast_file,
            gst_filter_names,
            beast_filter_names,
            galaxy,
            ref_image=im_file,
        )

        # load in beast settings to get number of subgrids
        settings = beast_settings.beast_settings(
            "beast_settings_" + galaxy + "_asts_" + field_names[b] + ".txt"
        )

        # -----------------
        # 1a. make magnitude histograms
        # -----------------

        print("")
        print("making magnitude histograms")
        print("")

        # if not os.path.isfile('./data/'+field_names[b]+'.gst_maghist.pdf'):
        peak_mags = plot_mag_hist.plot_mag_hist(gst_file, stars_per_bin=70, max_bins=75)

        # -----------------
        # 1b. make a source density map
        # -----------------

        print("")
        print("making source density map")
        print("")

        # not currently doing background density bins
        # use_bg_info = True
        use_bg_info = False
        if use_bg_info:
            background_args = types.SimpleNamespace(
                subcommand="background",
                catfile=gst_file,
                pixsize=5,
                npix=None,
                reference=im_file,
                mask_radius=10,
                ann_width=20,
                cat_filter=[ref_filter, "90"],
            )
            create_background_density_map.main_make_map(background_args)

        # but we are doing source density bins!
        if not os.path.isfile(gst_file.replace(".fits", "_source_den_image.fits")):
            # - pixel size of 10 arcsec
            # - use ref_filter[b] between vega mags of 17 and peak_mags[ref_filter[b]]-0.5
            sourceden_args = types.SimpleNamespace(
                subcommand="sourceden",
                catfile=gst_file,
                pixsize=5,
                npix=None,
                mag_name=ref_filter[0] + "_VEGA",
                mag_cut=[17, peak_mags[ref_filter[0]] - 0.5],
                flag_name=flag_filter[0] + "_FLAG",
            )
            create_background_density_map.main_make_map(sourceden_args)

        # new file name with the source density column
        gst_file_sd = gst_file.replace(".fits", "_with_sourceden.fits")

        # -----------------
        # 2. make physics model
        # -----------------

        print("")
        print("making physics model")
        print("")

        # see which subgrid files already exist
        gs_str = ""
        if settings.n_subgrid > 1:
            gs_str = "sub*"

        sed_files = glob.glob(
            "./{0}_beast/{0}_beast_seds.grid{1}.hd5".format(field_names[b], gs_str)
        )

        # only make the physics model they don't already exist
        if len(sed_files) < settings.n_subgrid:
            # directly create physics model grids
            create_physicsmodel.create_physicsmodel(
                settings, nprocs=1, nsubs=settings.n_subgrid
            )

        # -------------------
        # 3. make AST inputs
        # -------------------

        # only create an AST input list if the ASTs don't already exist
        ast_input_file = "./" + field_names[b] + "/" + field_names[b] + "_inputAST.txt"

        if not os.path.isfile(ast_input_file):
            make_ast_inputs.make_ast_inputs(settings, pick_method="flux_bin_method")

        # list of SED files (physics models)
        model_grid_files = sorted(
            glob.glob(
                "./{0}/{0}_seds.grid*.hd5".format(
                    field_names[b],
                )
            )
        )

        # --------------------
        # 3.1 "prune" AST inputs
        # --------------------

        # prune input AST by flux (empirically determined)
        ast_input_tab = Table.read(ast_input_file, format="ascii")
        ast_input_tab_pruned = ast_input_tab.copy()

        if "F336W" in gst_filter_names:
            prune_spots = (
                (ast_input_tab_pruned["HST_WFC3_F336W"] > 30.5)
                & (ast_input_tab_pruned["HST_WFC3_F475W"] > 32.5)
                & (ast_input_tab_pruned["HST_WFC3_F814W"] > 29.0)
            )
        else:
            prune_spots = (ast_input_tab_pruned["HST_WFC3_F475W"] > 32.5) & (
                ast_input_tab_pruned["HST_WFC3_F814W"] > 29.0
            )

        ast_input_tab_pruned = ast_input_tab_pruned[~prune_spots]

        # write pruned ast input table to a txt file
        ast_input_file_pruned = (
            "./" + field_name + "/" + field_name + "_inputAST_pruned.txt"
        )
        ast_input_tab_pruned.write(
            ast_input_file_pruned, format="ascii", overwrite=True
        )

        # print out number of pruned ASTs per source density bin as a sanity check
        print("pruned input AST statistics per bin")
        input_ast_bin_stats(settings, ast_input_file_pruned, field_names[b])

        # compare magnitude histograms of pruned ASTs with catalog
        plot_ast_histogram.plot_ast_histogram(
            ast_file=ast_input_file_pruned, sed_grid_file=model_grid_files[0]
        )

        print("now go check the diagnostic plots!")