Beispiel #1
0
def test_reduce_grid_info():
    seds_trim_fname = download_rename("beast_example_phat_seds_trim.grid.hd5")
    sub_fnames = subgridding_tools.split_grid(seds_trim_fname, 3)

    complete_g_info = subgridding_tools.subgrid_info(seds_trim_fname)
    cap_unique = 50
    sub_g_info = subgridding_tools.reduce_grid_info(sub_fnames,
                                                    nprocs=3,
                                                    cap_unique=cap_unique)

    for q in complete_g_info:
        if q not in sub_g_info:
            raise AssertionError()
        if not complete_g_info[q]["min"] == sub_g_info[q]["min"]:
            raise AssertionError()
        if not complete_g_info[q]["max"] == sub_g_info[q]["max"]:
            raise AssertionError()
        num_unique = len(complete_g_info[q]["unique"])
        if num_unique > cap_unique:
            # Cpan still be larger if one of the sub results during the
            # reduction is larger. This is as intended.
            if not sub_g_info[q]["num_unique"] >= cap_unique:
                raise AssertionError()
        else:
            if not sub_g_info[q]["num_unique"] == num_unique:
                raise AssertionError()
Beispiel #2
0
    def test_reduce_grid_info(self):
        """
        Split a cached version of a sed grid and check that [not quite
        sure what this is checking - details needed].
        """
        sub_fnames = subgridding_tools.split_grid(self.seds_trim_fname_cache,
                                                  3)

        complete_g_info = subgridding_tools.subgrid_info(
            self.seds_trim_fname_cache)
        cap_unique = 50
        sub_g_info = subgridding_tools.reduce_grid_info(sub_fnames,
                                                        nprocs=3,
                                                        cap_unique=cap_unique)

        for q in complete_g_info:
            if q not in sub_g_info:
                raise AssertionError()
            if not complete_g_info[q]["min"] == sub_g_info[q]["min"]:
                raise AssertionError()
            if not complete_g_info[q]["max"] == sub_g_info[q]["max"]:
                raise AssertionError()
            num_unique = len(complete_g_info[q]["unique"])
            if num_unique > cap_unique:
                # Cpan still be larger if one of the sub results during the
                # reduction is larger. This is as intended.
                if not sub_g_info[q]["num_unique"] >= cap_unique:
                    raise AssertionError()
            else:
                if not sub_g_info[q]["num_unique"] == num_unique:
                    raise AssertionError()
Beispiel #3
0
def split_and_check(grid_fname, num_subgrids):
    complete_g = SEDGrid(grid_fname)
    sub_fnames = subgridding_tools.split_grid(grid_fname, num_subgrids)

    # count the number of grid cells
    sub_seds = []
    sub_grids = []

    for sub_fname in sub_fnames:
        sub_g = SEDGrid(sub_fname)

        sub_seds.append(sub_g.seds)
        sub_grids.append(sub_g.grid)

        np.testing.assert_equal(complete_g.lamb, sub_g.lamb)
        if not complete_g.grid.colnames == sub_g.grid.colnames:
            raise AssertionError()

    sub_seds_reconstructed = np.concatenate(sub_seds)
    np.testing.assert_equal(sub_seds_reconstructed, complete_g.seds)

    sub_grids_reconstructed = np.concatenate(sub_grids)
    np.testing.assert_equal(sub_grids_reconstructed, complete_g.grid)

    # the split method skips anything that already exists, so if we
    # want to use this function multiple times for the same test
    # grid, we need to do this.
    for f in sub_fnames:
        os.remove(f)
Beispiel #4
0
def split_and_check(grid_fname, num_subgrids):
    """
    Split a sed grid into subgrids and test the contents of the subgrids
    are as expected and concatenating the subgrid components (seds, grid)
    gives the full sed grid.

    Parameters
    ----------
    grid_fname : str
        filename for the sed grid

    num_subgrids : int
        number of subgrids to split the sed grid into
    """
    complete_g = SEDGrid(grid_fname)
    sub_fnames = subgridding_tools.split_grid(grid_fname, num_subgrids)

    # count the number of grid cells
    sub_seds = []
    sub_grids = []

    for sub_fname in sub_fnames:
        sub_g = SEDGrid(sub_fname)

        sub_seds.append(sub_g.seds)
        sub_grids.append(sub_g.grid)

        np.testing.assert_equal(complete_g.lamb, sub_g.lamb)
        if not complete_g.grid.colnames == sub_g.grid.colnames:
            raise AssertionError()

    sub_seds_reconstructed = np.concatenate(sub_seds)
    np.testing.assert_equal(sub_seds_reconstructed, complete_g.seds)

    sub_grids_reconstructed = np.concatenate(sub_grids)
    np.testing.assert_equal(sub_grids_reconstructed, complete_g.grid)

    # the split method skips anything that already exists, so if we
    # want to use this function multiple times for the same test
    # grid, we need to do this.
    for f in sub_fnames:
        os.remove(f)
def test_reduce_grid_info():
    seds_trim_fname = download_rename('beast_example_phat_seds_trim.grid.hd5')
    sub_fnames = subgridding_tools.split_grid(seds_trim_fname, 3)

    complete_g_info = subgridding_tools.subgrid_info(seds_trim_fname)
    cap_unique = 50
    sub_g_info = subgridding_tools.reduce_grid_info(
        sub_fnames, nprocs=3, cap_unique=cap_unique)

    for q in complete_g_info:
        assert q in sub_g_info
        assert complete_g_info[q]['min'] == sub_g_info[q]['min']
        assert complete_g_info[q]['max'] == sub_g_info[q]['max']
        num_unique = len(complete_g_info[q]['unique'])
        if num_unique > cap_unique:
            # Cpan still be larger if one of the sub results during the
            # reduction is larger. This is as intended.
            assert sub_g_info[q]['num_unique'] >= cap_unique
        else:
            assert sub_g_info[q]['num_unique'] == num_unique
Beispiel #6
0
def create_physicsmodel(nsubs=1, nprocs=1, subset=[None, None]):
    """
    Create the physics model grid.  If nsubs > 1, this will make sub-grids.


    Parameters
    ----------
    nsubs : int (default=1)
        number of subgrids to split the physics model into

    nprocs : int (default=1)
        Number of parallel processes to use
        (currently only implemented for subgrids)

    subset : list of two ints (default=[None,None])
        Only process subgrids in the range [start,stop].
        (only relevant if nsubs > 1)

    """

    # before doing ANYTHING, force datamodel to re-import (otherwise, any
    # changes within this python session will not be loaded!)
    importlib.reload(datamodel)
    # check input parameters
    verify_params.verify_input_format(datamodel)

    # filename for the SED grid
    modelsedgrid_filename = "%s/%s_seds.grid.hd5" % (
        datamodel.project,
        datamodel.project,
    )

    # grab the current subgrid slice
    subset_slice = slice(subset[0], subset[1])

    # make sure the project directory exists
    create_project_dir(datamodel.project)

    # download and load the isochrones
    (iso_fname, oiso) = make_iso_table(
        datamodel.project,
        oiso=datamodel.oiso,
        logtmin=datamodel.logt[0],
        logtmax=datamodel.logt[1],
        dlogt=datamodel.logt[2],
        z=datamodel.z,
    )

    # remove the isochrone points with logL=-9.999
    oiso = ezIsoch(oiso.selectWhere("*", "logL > -9"))

    if hasattr(datamodel, "add_spectral_properties_kwargs"):
        extra_kwargs = datamodel.add_spectral_properties_kwargs
    else:
        extra_kwargs = None

    if hasattr(datamodel, "velocity"):
        redshift = (datamodel.velocity / const.c).decompose().value
    else:
        redshift = 0

    # generate the spectral library (no dust extinction)
    (spec_fname, g_spec) = make_spectral_grid(
        datamodel.project,
        oiso,
        osl=datamodel.osl,
        redshift=redshift,
        distance=datamodel.distances,
        distance_unit=datamodel.distance_unit,
        extLaw=datamodel.extLaw,
        add_spectral_properties_kwargs=extra_kwargs,
    )

    # add the stellar priors as weights
    #   also computes the grid weights for the stellar part
    (pspec_fname, g_pspec) = add_stellar_priors(
        datamodel.project,
        g_spec,
        age_prior_model=datamodel.age_prior_model,
        mass_prior_model=datamodel.mass_prior_model,
        met_prior_model=datamodel.met_prior_model,
    )

    # --------------------
    # no subgrids
    # --------------------

    if nsubs == 1:
        # generate the SED grid by integrating the filter response functions
        #   effect of dust extinction applied before filter integration
        #   also computes the dust priors as weights
        make_extinguished_sed_grid(
            datamodel.project,
            g_pspec,
            datamodel.filters,
            extLaw=datamodel.extLaw,
            av=datamodel.avs,
            rv=datamodel.rvs,
            fA=datamodel.fAs,
            rv_prior_model=datamodel.rv_prior_model,
            av_prior_model=datamodel.av_prior_model,
            fA_prior_model=datamodel.fA_prior_model,
            spec_fname=modelsedgrid_filename,
            add_spectral_properties_kwargs=extra_kwargs,
        )

    # --------------------
    # use subgrids
    # --------------------

    if nsubs > 1:
        # Work with the whole grid up to there (otherwise, priors need a
        # rework - they don't like having only a subset of the parameter
        # space, especially when there's only one age for example)

        # Make subgrids, by splitting the spectral grid into equal sized pieces
        custom_sub_pspec = subgridding_tools.split_grid(pspec_fname, nsubs)

        file_prefix = "{0}/{0}_".format(datamodel.project)

        # function to process the subgrids individually
        def gen_subgrid(i, sub_name):
            sub_g_pspec = FileSEDGrid(sub_name)
            sub_seds_fname = "{}seds.gridsub{}.hd5".format(file_prefix, i)

            # generate the SED grid by integrating the filter response functions
            #   effect of dust extinction applied before filter integration
            #   also computes the dust priors as weights
            (sub_seds_fname, sub_g_seds) = make_extinguished_sed_grid(
                datamodel.project,
                sub_g_pspec,
                datamodel.filters,
                extLaw=datamodel.extLaw,
                av=datamodel.avs,
                rv=datamodel.rvs,
                fA=datamodel.fAs,
                rv_prior_model=datamodel.rv_prior_model,
                av_prior_model=datamodel.av_prior_model,
                fA_prior_model=datamodel.fA_prior_model,
                add_spectral_properties_kwargs=extra_kwargs,
                seds_fname=sub_seds_fname,
            )

            return sub_seds_fname

        # run the above function
        par_tuples = [
            (i, sub_name) for i, sub_name in enumerate(custom_sub_pspec)
        ][subset_slice]

        parallel_wrapper(gen_subgrid, par_tuples, nprocs=nprocs)

        # Save a list of subgrid names that we expect to see
        required_names = [
            "{}seds.gridsub{}.hd5".format(file_prefix, i) for i in range(nsubs)
        ]

        outdir = os.path.join(".", datamodel.project)
        subgrid_names_file = os.path.join(outdir, "subgrid_fnames.txt")

        with open(subgrid_names_file, "w") as fname_file:
            for fname in required_names:
                fname_file.write(fname + "\n")
Beispiel #7
0
            settings.project,
            oiso,
            osl=settings.osl,
            distance=settings.distances,
            distance_unit=settings.distance_unit,
            extLaw=settings.extLaw,
            add_spectral_properties_kwargs=extra_kwargs,
        )

        # Work with the whole grid up to here (otherwise, priors need a
        # rework (they don't like having only a subset of the parameter
        # space, especially when there's only one age for example)
        (pspec_fname, g_pspec) = add_stellar_priors(settings.project, g_spec)

        # Make subgrids, by splitting the spectral grid into equal sized pieces
        custom_sub_pspec = subgridding_tools.split_grid(
            pspec_fname, args.nsubs)

        file_prefix = "{0}/{0}_".format(settings.project)

        # process the subgrids individually
        def gen_subgrid(i, sub_name):
            sub_g_pspec = FileSEDGrid(sub_name)
            sub_seds_fname = "{}seds.gridsub{}.hd5".format(file_prefix, i)

            (sub_seds_fname, sub_g_seds) = make_extinguished_sed_grid(
                settings.project,
                sub_g_pspec,
                settings.filters,
                extLaw=settings.extLaw,
                av=settings.avs,
                rv=settings.rvs,
Beispiel #8
0
    def test_merge_pdf1d_stats(self):
        """
        Using cached versions of the observations, sed grid, and noise model,
        split the grids and do the fitting on the subgrids and original
        grid.  Merge the results from the subgrids and compare to the results
        from fitting the full grid.
        """
        ######################################
        # STEP 1: GET SOME DATA TO WORK WITH #
        ######################################

        # read in the observed data
        obsdata = Observations(self.obs_fname_cache, self.settings.filters,
                               self.settings.obs_colnames)

        #########################################################################################
        # STEP 2: SPLIT THE GRIDS AND GENERATE THE GRID INFO DICT AS IN THE SUBGRIDDING EXAMPLE #
        #########################################################################################
        num_subgrids = 3

        # Split SED grid
        sub_seds_trim_fnames = subgridding_tools.split_grid(
            self.seds_trim_fname_cache, num_subgrids, overwrite=True)

        # Split noise grid (a standardized function does not exist)
        sub_noise_trim_fnames = []

        noisemodel_vals = noisemodel.get_noisemodelcat(
            self.noise_trim_fname_cache)
        slices = subgridding_tools.uniform_slices(len(noisemodel_vals["bias"]),
                                                  num_subgrids)
        for i, slc in enumerate(slices):
            outname = self.noise_trim_fname_cache.replace(
                ".hd5", "sub{}.hd5".format(i))
            with tables.open_file(outname, "w") as outfile:
                outfile.create_array(outfile.root, "bias",
                                     noisemodel_vals["bias"][slc])
                outfile.create_array(outfile.root, "error",
                                     noisemodel_vals["error"][slc])
                outfile.create_array(outfile.root, "completeness",
                                     noisemodel_vals["completeness"][slc])
            sub_noise_trim_fnames.append(outname)

        # Collect information about the parameter rangers, to make the pdf1d bins
        # consistent between subgrids
        grid_info_dict = subgridding_tools.reduce_grid_info(
            sub_seds_trim_fnames,
            sub_noise_trim_fnames,
            nprocs=1,
            cap_unique=100)

        ##################################################
        # STEP 3: GENERATE FILENAMES AND RUN THE FITTING #
        ##################################################
        def make_gridsub_fnames(base_fname, num_subgrids, extension=".fits"):
            return [
                base_fname.replace(extension,
                                   "gridsub{}{}".format(i, extension))
                for i in range(num_subgrids)
            ]

        stats_fname = tempfile.NamedTemporaryFile(suffix=".fits").name
        pdf1d_fname = tempfile.NamedTemporaryFile(suffix=".fits").name
        lnp_fname = tempfile.NamedTemporaryFile(suffix=".hd5").name

        subgrid_pdf1d_fnames = make_gridsub_fnames(pdf1d_fname, num_subgrids)
        subgrid_stats_fnames = make_gridsub_fnames(stats_fname, num_subgrids)
        subgrid_lnp_fnames = make_gridsub_fnames(lnp_fname,
                                                 num_subgrids,
                                                 extension=".hd5")

        for i in range(num_subgrids):
            sub_noisemodel_vals = noisemodel.get_noisemodelcat(
                sub_noise_trim_fnames[i])
            fit.summary_table_memory(
                obsdata,
                sub_noisemodel_vals,
                sub_seds_trim_fnames[i],
                threshold=-40.0,
                save_every_npts=100,
                lnp_npts=500,
                stats_outname=subgrid_stats_fnames[i],
                pdf1d_outname=subgrid_pdf1d_fnames[i],
                lnp_outname=subgrid_lnp_fnames[i],
                grid_info_dict=grid_info_dict,
                do_not_normalize=True,
            )
            # The do_not_normalize option is absolutely crucial!

        # Now merge the results
        merged_pdf1d_fname, merged_stats_fname = subgridding_tools.merge_pdf1d_stats(
            subgrid_pdf1d_fnames, subgrid_stats_fnames)

        # Do a full fit also
        normal_stats = tempfile.NamedTemporaryFile(suffix=".fits").name
        normal_pdf1d = tempfile.NamedTemporaryFile(suffix=".fits").name
        normal_lnp = tempfile.NamedTemporaryFile(suffix=".hd5").name
        fit.summary_table_memory(
            obsdata,
            noisemodel_vals,
            self.seds_trim_fname_cache,
            threshold=-40.0,
            save_every_npts=100,
            lnp_npts=500,
            stats_outname=normal_stats,
            pdf1d_outname=normal_pdf1d,
            lnp_outname=normal_lnp,
            do_not_normalize=True,
        )
        # Here, we also need to use do_not_normalize, otherwise Pmax will be
        # different by a factor

        # CHECKS
        tolerance = 1e-6
        fits_normal = fits.open(normal_pdf1d)
        fits_new = fits.open(merged_pdf1d_fname)

        if not len(fits_new) == len(fits_normal):
            raise AssertionError()

        # A similar problem to the above will also occur here
        for k in range(1, len(fits_new)):
            qname = fits_new[k].header["EXTNAME"]
            np.testing.assert_allclose(
                fits_new[k].data,
                fits_normal[qname].data,
                rtol=tolerance,
                atol=tolerance,
            )

        table_normal = Table.read(normal_stats)
        table_new = Table.read(merged_stats_fname)

        if not len(table_normal) == len(table_new):
            raise AssertionError()

        # These will normally fail, as the merging process can not be made
        # bit-correct due do floating point math (exacerbated by exponentials)
        for c in table_new.colnames:
            if c == "Name" or c == "RA" or c == "DEC":
                np.testing.assert_equal(
                    table_normal[c],
                    table_new[c],
                    err_msg="column {} is not equal".format(c),
                )
            else:
                np.testing.assert_allclose(
                    table_normal[c],
                    table_new[c],
                    rtol=tolerance,
                    equal_nan=True,
                    err_msg="column {} is not close enough".format(c),
                )
Beispiel #9
0
def test_merge_pdf1d_stats():
    ######################################
    # STEP 1: GET SOME DATA TO WORK WITH #
    ######################################
    vega_fname = download_rename("vega.hd5")
    obs_fname = download_rename("b15_4band_det_27_A.fits")
    noise_trim_fname = download_rename(
        "beast_example_phat_noisemodel_trim.grid.hd5")
    seds_trim_fname = download_rename("beast_example_phat_seds_trim.grid.hd5")

    # download cached version of fitting results
    # stats_fname_cache = download_rename('beast_example_phat_stats.fits')
    # pdf1d_fname_cache = download_rename('beast_example_phat_pdf1d.fits')

    # read in the observed data
    filters = [
        "HST_WFC3_F275W",
        "HST_WFC3_F336W",
        "HST_ACS_WFC_F475W",
        "HST_ACS_WFC_F814W",
        "HST_WFC3_F110W",
        "HST_WFC3_F160W",
    ]
    basefilters = ["F275W", "F336W", "F475W", "F814W", "F110W", "F160W"]
    obs_colnames = [f.lower() + "_rate" for f in basefilters]

    obsdata = Observations(obs_fname,
                           filters,
                           obs_colnames,
                           vega_fname=vega_fname)

    #########################################################################################
    # STEP 2: SPLIT THE GRIDS AND GENERATE THE GRID INFO DICT AS IN THE SUBGRIDDING EXAMPLE #
    #########################################################################################
    num_subgrids = 3

    # Split SED grid
    sub_seds_trim_fnames = subgridding_tools.split_grid(seds_trim_fname,
                                                        num_subgrids,
                                                        overwrite=True)

    # Split noise grid (a standardized function does not exist)
    sub_noise_trim_fnames = []

    noisemodel_vals = get_noisemodelcat(noise_trim_fname)
    slices = subgridding_tools.uniform_slices(len(noisemodel_vals["bias"]),
                                              num_subgrids)
    for i, slc in enumerate(slices):
        outname = noise_trim_fname.replace(".hd5", "sub{}.hd5".format(i))
        with tables.open_file(outname, "w") as outfile:
            outfile.create_array(outfile.root, "bias",
                                 noisemodel_vals["bias"][slc])
            outfile.create_array(outfile.root, "error",
                                 noisemodel_vals["error"][slc])
            outfile.create_array(outfile.root, "completeness",
                                 noisemodel_vals["completeness"][slc])
        sub_noise_trim_fnames.append(outname)

    # Collect information about the parameter rangers, to make the pdf1d bins
    # consistent between subgrids
    grid_info_dict = subgridding_tools.reduce_grid_info(sub_seds_trim_fnames,
                                                        sub_noise_trim_fnames,
                                                        nprocs=1,
                                                        cap_unique=100)

    ##################################################
    # STEP 3: GENERATE FILENAMES AND RUN THE FITTING #
    ##################################################
    def make_gridsub_fnames(base_fname, num_subgrids, extension=".fits"):
        return [
            base_fname.replace(extension, "gridsub{}{}".format(i, extension))
            for i in range(num_subgrids)
        ]

    stats_fname = "/tmp/beast_example_phat_stats.fits"
    pdf1d_fname = "/tmp/beast_example_phat_pdf1d.fits"
    lnp_fname = "/tmp/beast_example_phat_lnp.hd5"

    subgrid_pdf1d_fnames = make_gridsub_fnames(pdf1d_fname, num_subgrids)
    subgrid_stats_fnames = make_gridsub_fnames(stats_fname, num_subgrids)
    subgrid_lnp_fnames = make_gridsub_fnames(lnp_fname,
                                             num_subgrids,
                                             extension=".hd5")

    for i in range(num_subgrids):
        sub_noisemodel_vals = get_noisemodelcat(sub_noise_trim_fnames[i])
        fit.summary_table_memory(
            obsdata,
            sub_noisemodel_vals,
            sub_seds_trim_fnames[i],
            threshold=-40.0,
            save_every_npts=100,
            lnp_npts=60,
            stats_outname=subgrid_stats_fnames[i],
            pdf1d_outname=subgrid_pdf1d_fnames[i],
            lnp_outname=subgrid_lnp_fnames[i],
            grid_info_dict=grid_info_dict,
            do_not_normalize=True,
        )
        # The do_not_normalize option is absolutely crucial!

    # Now merge the results
    merged_pdf1d_fname, merged_stats_fname = subgridding_tools.merge_pdf1d_stats(
        subgrid_pdf1d_fnames, subgrid_stats_fnames)

    # Do a full fit also
    normal_stats = "normal_stats.fits"
    normal_pdf1d = "normal_pdf1d.fits"
    normal_lnp = "normal_lnp.hd5"
    fit.summary_table_memory(
        obsdata,
        noisemodel_vals,
        seds_trim_fname,
        threshold=-40.0,
        save_every_npts=100,
        lnp_npts=60,
        stats_outname=normal_stats,
        pdf1d_outname=normal_pdf1d,
        lnp_outname=normal_lnp,
        do_not_normalize=True,
    )
    # Here, we also need to use do_not_normalize, otherwise Pmax will be
    # different by a factor

    # CHECKS
    tolerance = 1e-6
    print("comparing pdf1d")
    # fits_cache = fits.open(pdf1d_fname_cache)
    fits_normal = fits.open(normal_pdf1d)
    fits_new = fits.open(merged_pdf1d_fname)

    if not len(fits_new) == len(fits_normal):
        raise AssertionError()

    # A similar problem to the above will also occur here
    for k in range(1, len(fits_new)):
        qname = fits_new[k].header["EXTNAME"]
        print(qname)
        np.testing.assert_allclose(fits_new[k].data,
                                   fits_normal[qname].data,
                                   rtol=tolerance,
                                   atol=tolerance)

    print("comparing stats")
    # table_cache = Table.read(stats_fname_cache)
    table_normal = Table.read(normal_stats)
    table_new = Table.read(merged_stats_fname)

    if not len(table_normal) == len(table_new):
        raise AssertionError()

    # These will normally fail, as the merging process can not be made
    # bit-correct due do floating point math (exacerbated by exponentials)
    for c in table_new.colnames:
        print(c)
        if c == "Name" or c == "RA" or c == "DEC":
            np.testing.assert_equal(
                table_normal[c],
                table_new[c],
                err_msg="column {} is not equal".format(c),
            )
        else:
            np.testing.assert_allclose(
                table_normal[c],
                table_new[c],
                rtol=tolerance,
                equal_nan=True,
                err_msg="column {} is not close enough".format(c),
            )
Beispiel #10
0
def create_physicsmodel(beast_settings_info, nsubs=1, nprocs=1, subset=[None, None]):
    """
    Create the physics model grid.  If nsubs > 1, this will make sub-grids.


    Parameters
    ----------
    beast_settings_info : string or beast.tools.beast_settings.beast_settings instance
        if string: file name with beast settings
        if class: beast.tools.beast_settings.beast_settings instance

    nsubs : int (default=1)
        number of subgrids to split the physics model into

    nprocs : int (default=1)
        Number of parallel processes to use
        (currently only implemented for subgrids)

    subset : list of two ints (default=[None,None])
        Only process subgrids in the range [start,stop].
        (only relevant if nsubs > 1)

    """

    # process beast settings info
    if isinstance(beast_settings_info, str):
        settings = beast_settings.beast_settings(beast_settings_info)
    elif isinstance(beast_settings_info, beast_settings.beast_settings):
        settings = beast_settings_info
    else:
        raise TypeError(
            "beast_settings_info must be string or beast.tools.beast_settings.beast_settings instance"
        )

    # filename for the SED grid
    modelsedgrid_filename = "%s/%s_seds.grid.hd5" % (
        settings.project,
        settings.project,
    )

    # grab the current subgrid slice
    subset_slice = slice(subset[0], subset[1])

    # make sure the project directory exists
    create_project_dir(settings.project)

    # download and load the isochrones
    (iso_fname, oiso) = make_iso_table(
        settings.project,
        oiso=settings.oiso,
        logtmin=settings.logt[0],
        logtmax=settings.logt[1],
        dlogt=settings.logt[2],
        z=settings.z,
    )

    if hasattr(settings, "add_spectral_properties_kwargs"):
        extra_kwargs = settings.add_spectral_properties_kwargs
    else:
        extra_kwargs = None

    if hasattr(settings, "velocity"):
        redshift = (settings.velocity / const.c).decompose().value
    else:
        redshift = 0

    # generate the spectral library (no dust extinction)
    (spec_fname, g_spec) = make_spectral_grid(
        settings.project,
        oiso,
        osl=settings.osl,
        redshift=redshift,
        distance=settings.distances,
        distance_unit=settings.distance_unit,
        extLaw=settings.extLaw,
        add_spectral_properties_kwargs=extra_kwargs,
    )

    # add the stellar priors as weights
    #   also computes the grid weights for the stellar part
    (pspec_fname, g_pspec) = add_stellar_priors(
        settings.project,
        g_spec,
        age_prior_model=settings.age_prior_model,
        mass_prior_model=settings.mass_prior_model,
        met_prior_model=settings.met_prior_model,
        distance_prior_model=settings.distance_prior_model,
    )

    # --------------------
    # no subgrids
    # --------------------

    if nsubs == 1:
        # generate the SED grid by integrating the filter response functions
        #   effect of dust extinction applied before filter integration
        #   also computes the dust priors as weights
        make_extinguished_sed_grid(
            settings.project,
            g_pspec,
            settings.filters,
            extLaw=settings.extLaw,
            av=settings.avs,
            rv=settings.rvs,
            fA=settings.fAs,
            rv_prior_model=settings.rv_prior_model,
            av_prior_model=settings.av_prior_model,
            fA_prior_model=settings.fA_prior_model,
            spec_fname=modelsedgrid_filename,
            add_spectral_properties_kwargs=extra_kwargs,
        )

    # --------------------
    # use subgrids
    # --------------------

    if nsubs > 1:
        # Work with the whole grid up to there (otherwise, priors need a
        # rework - they don't like having only a subset of the parameter
        # space, especially when there's only one age for example)

        # Make subgrids, by splitting the spectral grid into equal sized pieces
        custom_sub_pspec = subgridding_tools.split_grid(pspec_fname, nsubs)

        file_prefix = "{0}/{0}_".format(settings.project)

        # function to process the subgrids individually
        def gen_subgrid(i, sub_name):
            sub_g_pspec = SpectralGrid(sub_name)
            sub_seds_fname = "{}seds.gridsub{}.hd5".format(file_prefix, i)

            # generate the SED grid by integrating the filter response functions
            #   effect of dust extinction applied before filter integration
            #   also computes the dust priors as weights
            (sub_seds_fname, sub_g_seds) = make_extinguished_sed_grid(
                settings.project,
                sub_g_pspec,
                settings.filters,
                extLaw=settings.extLaw,
                av=settings.avs,
                rv=settings.rvs,
                fA=settings.fAs,
                rv_prior_model=settings.rv_prior_model,
                av_prior_model=settings.av_prior_model,
                fA_prior_model=settings.fA_prior_model,
                add_spectral_properties_kwargs=extra_kwargs,
                seds_fname=sub_seds_fname,
            )

            return sub_seds_fname

        # run the above function
        par_tuples = [(i, sub_name) for i, sub_name in enumerate(custom_sub_pspec)][
            subset_slice
        ]

        parallel_wrapper(gen_subgrid, par_tuples, nprocs=nprocs)

        # Save a list of subgrid names that we expect to see
        required_names = [
            "{}seds.gridsub{}.hd5".format(file_prefix, i) for i in range(nsubs)
        ]

        outdir = os.path.join(".", settings.project)
        subgrid_names_file = os.path.join(outdir, "subgrid_fnames.txt")

        with open(subgrid_names_file, "w") as fname_file:
            for fname in required_names:
                fname_file.write(fname + "\n")