Exemple #1
0
def t_get_obscat(project,
                 obsfile=datamodel.obsfile,
                 distanceModulus=datamodel.distanceModulus,
                 filters=datamodel.filters,
                 *args,
                 **kwargs):
    """ task that generates a data catalog object with the correct arguments

    Parameters
    ----------
    obsfile: str, optional (default datamodel.obsfile)
        observation file

    distanceModulus: float, optional (default datamodel.distanceModulus)
        distance modulus to correct the data from (in magitude)

    filters: sequence(str), optional, datamodel.filters
        seaquence of filters of the data

    returns
    -------
    project: str
        project id

    obs: PHATFluxCatalog
        observation catalog
    """
    obs = datamodel.get_obscat(obsfile, distanceModulus, filters, *args,
                               **kwargs)
    return project, obs
def run_beast_production(basename,
                         physicsmodel=False,
                         ast=False,
                         observationmodel=False,
                         trim=False,
                         fitting=False,
                         resume=False,
                         source_density='',
                         sub_source_density=''):
    """
    Turns the original command-line version of run_beast_production.py into
    something callable from within a function


    Parameters
    ----------
    basename : string
        name of the gst file (assuming it's located in ./data/)

    For the info related to the other inputs, see the argparse info at the bottom
    """

    # before doing ANYTHING, force datamodel to re-import (otherwise, any
    # changes within this python session will not be loaded!)
    importlib.reload(datamodel)

    # check input parameters, print what is the problem, stop run_beast
    verify_params.verify_input_format(datamodel)

    # update the filenames as needed for production
    # - photometry sub-file
    datamodel.obsfile = basename.replace(
        '.fits',
        '_with_sourceden' + '_SD_' + source_density.replace('_', '-') +
        '_sub' + sub_source_density + '.fits')
    # - stats files
    stats_filebase = "%s/%s"%(datamodel.project,datamodel.project) \
                     + '_sd' + source_density.replace('_','-') \
                     + '_sub' + sub_source_density
    sed_trimname = stats_filebase + '_sed_trim.grid.hd5'
    # - trimmed noise model
    noisemodel_trimname = stats_filebase + '_noisemodel_trim.hd5'
    # - SED grid
    #modelsedgrid_filename = "%s/%s_seds.grid.hd5"%(datamodel.project,
    #                                               datamodel.project)
    modelsedgrid_filename = "METAL_seds.grid.hd5"

    print("***run information***")
    print("  project = " + datamodel.project)
    print("  obsfile = " + datamodel.obsfile)
    print("  astfile = " + datamodel.astfile)
    print("         noisefile = " + datamodel.noisefile)
    print("   trimmed sedfile = " + sed_trimname)
    print("trimmed noisefiles = " + noisemodel_trimname)
    print("    stats filebase = " + stats_filebase)

    # make sure the project directory exists
    pdir = create_project_dir(datamodel.project)

    if physicsmodel:

        # download and load the isochrones
        (iso_fname, oiso) = make_iso_table(datamodel.project,
                                           oiso=datamodel.oiso,
                                           logtmin=datamodel.logt[0],
                                           logtmax=datamodel.logt[1],
                                           dlogt=datamodel.logt[2],
                                           z=datamodel.z)

        if hasattr(datamodel, 'add_spectral_properties_kwargs'):
            extra_kwargs = datamodel.add_spectral_properties_kwargs
        else:
            extra_kwargs = None

        if hasattr(datamodel, 'velocity'):
            redshift = (datamodel.velocity / const.c).decompose().value
        else:
            redshift = 0

        # generate the spectral library (no dust extinction)
        (spec_fname, g_spec) = make_spectral_grid(
            datamodel.project,
            oiso,
            osl=datamodel.osl,
            redshift=redshift,
            distance=datamodel.distances,
            distance_unit=datamodel.distance_unit,
            add_spectral_properties_kwargs=extra_kwargs)

        # add the stellar priors as weights
        #   also computes the grid weights for the stellar part
        (pspec_fname, g_pspec) = add_stellar_priors(datamodel.project, g_spec)

        # generate the SED grid by integrating the filter response functions
        #   effect of dust extinction applied before filter integration
        #   also computes the dust priors as weights
        (seds_fname, g_seds) = make_extinguished_sed_grid(
            datamodel.project,
            g_pspec,
            datamodel.filters,
            extLaw=datamodel.extLaw,
            av=datamodel.avs,
            rv=datamodel.rvs,
            fA=datamodel.fAs,
            rv_prior_model=datamodel.rv_prior_model,
            av_prior_model=datamodel.av_prior_model,
            fA_prior_model=datamodel.fA_prior_model,
            spec_fname=modelsedgrid_filename,
            add_spectral_properties_kwargs=extra_kwargs)

    if ast:

        N_models = datamodel.ast_models_selected_per_age
        Nfilters = datamodel.ast_bands_above_maglimit
        Nrealize = datamodel.ast_realization_per_model
        mag_cuts = datamodel.ast_maglimit
        obsdata = datamodel.get_obscat(basename, datamodel.filters)

        if len(mag_cuts) == 1:
            tmp_cuts = mag_cuts
            min_mags = np.zeros(len(datamodel.filters))
            for k, filtername in enumerate(obsdata.filters):
                sfiltername = obsdata.data.resolve_alias(filtername)
                sfiltername = sfiltername.replace('rate', 'vega')
                sfiltername = sfiltername.replace('RATE', 'VEGA')
                keep, = np.where(obsdata[sfiltername] < 99.)
                min_mags[k] = np.percentile(obsdata[keep][sfiltername], 90.)

            # max. mags from the gst observation cat.
            mag_cuts = min_mags + tmp_cuts

        outfile = './' + datamodel.project + '/' + datamodel.project + '_inputAST.txt'
        outfile_params = './' + datamodel.project + '/' + datamodel.project + '_ASTparams.fits'
        chosen_seds = pick_models(modelsedgrid_filename,
                                  datamodel.filters,
                                  mag_cuts,
                                  Nfilter=Nfilters,
                                  N_stars=N_models,
                                  Nrealize=Nrealize,
                                  outfile=outfile,
                                  outfile_params=outfile_params)

        if datamodel.ast_with_positions == True:
            separation = datamodel.ast_pixel_distribution
            filename = datamodel.project + '/' + datamodel.project + '_inputAST.txt'

            if datamodel.ast_reference_image is not None:
                # With reference image, use the background or source density map if available
                if datamodel.ast_density_table is not None:
                    pick_positions_from_map(
                        obsdata,
                        chosen_seds,
                        datamodel.ast_density_table,
                        datamodel.ast_N_bins,
                        datamodel.ast_realization_per_model,
                        outfile=filename,
                        refimage=datamodel.ast_reference_image,
                        refimage_hdu=0,
                        Nrealize=1,
                        set_coord_boundary=datamodel.ast_coord_boundary)
                else:
                    pick_positions(obsdata,
                                   filename,
                                   separation,
                                   refimage=datamodel.ast_reference_image)

            else:
                # Without reference image, we can only use this function
                if datamodel.ast_density_table is None:
                    pick_positions(obsdata, filename, separation)
                else:
                    print(
                        "To use ast_density_table, ast_reference_image must be specified."
                    )

    if observationmodel:
        print('Generating noise model from ASTs and absflux A matrix')

        # get the modesedgrid on which to generate the noisemodel
        modelsedgrid = FileSEDGrid(modelsedgrid_filename)

        # generate the AST noise model
        noisemodel.make_toothpick_noise_model( \
            datamodel.noisefile,
            datamodel.astfile,
            modelsedgrid,
            use_rate=True,
            absflux_a_matrix=datamodel.absflux_a_matrix)

    if trim:
        print('Trimming the model and noise grids')

        # read in the observed data
        obsdata = datamodel.get_obscat(basename, datamodel.filters)

        # get the modesedgrid on which to generate the noisemodel
        modelsedgrid = FileSEDGrid(modelsedgrid_filename)

        # read in the noise model just created
        noisemodel_vals = noisemodel.get_noisemodelcat(datamodel.noisefile)

        # trim the model sedgrid
        trim_grid.trim_models(modelsedgrid,
                              noisemodel_vals,
                              obsdata,
                              sed_trimname,
                              noisemodel_trimname,
                              sigma_fac=3.)

    if fitting:
        start_time = time.clock()

        # read in the the AST noise model
        noisemodel_vals = noisemodel.get_noisemodelcat(noisemodel_trimname)

        # read in the observed data
        obsdata = datamodel.get_obscat(datamodel.obsfile, datamodel.filters)

        # output files
        statsfile = stats_filebase + '_stats.fits'
        pdf1dfile = statsfile.replace('stats.fits', 'pdf1d.fits')
        lnpfile = statsfile.replace('stats.fits', 'lnp.hd5')

        fit.summary_table_memory(obsdata,
                                 noisemodel_vals,
                                 sed_trimname,
                                 resume=resume,
                                 threshold=-10.,
                                 save_every_npts=100,
                                 lnp_npts=500,
                                 stats_outname=statsfile,
                                 pdf1d_outname=pdf1dfile,
                                 lnp_outname=lnpfile,
                                 surveyname=datamodel.surveyname)

        new_time = time.clock()
        print('time to fit: ', (new_time - start_time) / 60., ' min')
Exemple #3
0
def make_ast_inputs(flux_bin_method=True):
    """
    Make the list of artificial stars to be run through the photometry pipeline

    Parameters
    ----------
    flux_bin_method : boolean (default=True)
        If True, use the flux bin method to select SEDs.  If False, randomly
        select SEDs from the model grid.

    """

    # before doing ANYTHING, force datamodel to re-import (otherwise, any
    # changes within this python session will not be loaded!)
    importlib.reload(datamodel)
    # check input parameters
    verify_params.verify_input_format(datamodel)

    # read in the photometry catalog
    obsdata = datamodel.get_obscat(datamodel.obsfile, datamodel.filters)

    # --------------------
    # select SEDs
    # --------------------

    Nrealize = datamodel.ast_realization_per_model
    Nfilters = datamodel.ast_bands_above_maglimit

    # file names for stars and corresponding SED parameters
    outfile_seds = "./{0}/{0}_inputAST_seds.txt".format(datamodel.project)
    outfile_params = "./{0}/{0}_ASTparams.fits".format(datamodel.project)

    # if the SED file doesn't exist, create SEDs
    if not os.path.isfile(outfile_seds):

        print("Selecting SEDs for ASTs")

        if flux_bin_method:

            N_fluxes = datamodel.ast_n_flux_bins
            min_N_per_flux = datamodel.ast_n_per_flux_bin
            bins_outfile = "./{0}/{0}_ASTfluxbins.txt".format(datamodel.project)
            modelsedgrid_filename = "./{0}/{0}_seds.grid.hd5".format(datamodel.project)

            chosen_seds = pick_models_toothpick_style(
                modelsedgrid_filename,
                datamodel.filters,
                Nfilters,
                N_fluxes,
                min_N_per_flux,
                outfile=outfile_seds,
                outfile_params=outfile_params,
                bins_outfile=bins_outfile,
            )

        else:

            # construct magnitude cuts

            mag_cuts = datamodel.ast_maglimit

            if len(mag_cuts) == 1:
                tmp_cuts = mag_cuts
                min_mags = np.zeros(len(datamodel.filters))
                for k, filtername in enumerate(obsdata.filters):
                    sfiltername = obsdata.data.resolve_alias(filtername)
                    sfiltername = sfiltername.replace("rate", "vega")
                    sfiltername = sfiltername.replace("RATE", "VEGA")
                    (keep,) = np.where(obsdata[sfiltername] < 99.0)
                    min_mags[k] = np.percentile(obsdata[keep][sfiltername], 90.0)

                # max. mags from the gst observation cat.
                mag_cuts = min_mags + tmp_cuts


            N_models = datamodel.ast_models_selected_per_age

            chosen_seds = pick_models(
                modelsedgrid_filename,
                datamodel.filters,
                mag_cuts,
                Nfilter=Nfilters,
                N_stars=N_models,
                Nrealize=Nrealize,
                outfile=outfile_seds,
                outfile_params=outfile_params,
            )

    # if the SED file does exist, read them in
    else:
        print("Reading existing AST SEDs")
        chosen_seds = Table.read(outfile_seds, format="ascii")

    # --------------------
    # assign positions
    # --------------------

    # if we want ASTs with positions included (rather than just the fluxes from
    # the section above)
    if datamodel.ast_with_positions:

        print("Assigning positions to artifical stars")

        outfile = "./{0}/{0}_inputAST.txt".format(datamodel.project)

        # if we're replicating SEDs across source density or background bins
        if datamodel.ast_density_table is not None:
            make_ast_xy_list.pick_positions_from_map(
                obsdata,
                chosen_seds,
                datamodel.ast_density_table,
                datamodel.ast_N_bins,
                datamodel.ast_realization_per_model,
                outfile=outfile,
                refimage=datamodel.ast_reference_image,
                refimage_hdu=1,
                wcs_origin=1,
                Nrealize=1,
                set_coord_boundary=datamodel.ast_coord_boundary,
                region_from_filters="all",
            )

        # if we're not using SD/background maps, SEDs will be distributed
        # based on catalog sources
        else:
            make_ast_xy_list.pick_positions(
                obsdata,
                outfile,
                datamodel.ast_pixel_distribution,
                refimage=datamodel.ast_reference_image,
            )
Exemple #4
0
def fit_submodel(
    photometry_file,
    modelsedgrid_file,
    noise_file,
    stats_file,
    pdf_file,
    pdf2d_file,
    pdf2d_param_list,
    lnp_file,
    grid_info_file=None,
    resume=False,
):
    """
    Code to run the SED fitting

    Parameters
    ----------
    photometry_file : string
        path+name of the photometry file

    modelsedgrid_file : string
        path+name of the physics model grid file

    noise_file : string
        path+name of the noise model file

    stats_file : string
        path+name of the file to contain stats output

    pdf_file : string
        path+name of the file to contain 1D PDF output

    pdf2d_file : string
        path+name of the file to contain 2D PDF output

    pdf2d_param_list: list of strings or None
        parameters for which to make 2D PDFs (or None)

    lnp_file : string
        path+name of the file to contain log likelihood output

    grid_info_file : string (default=None)
        path+name for pickle file that contains dictionary with subgrid
        min/max/n_unique (required for a run with subgrids)

    resume : boolean (default=False)
        choose whether to resume existing run or start over


    Returns
    -------
    noisefile : string
        name of the created noise file

    """

    # read in the photometry catalog
    obsdata = datamodel.get_obscat(photometry_file, datamodel.filters)

    # check if it's a subgrid run by looking in the file name
    if "gridsub" in modelsedgrid_file:
        subgrid_run = True
        print("loading grid_info_dict from " + grid_info_file)
        with open(grid_info_file, "rb") as p:
            grid_info_dict = pickle.loads(p.read())
    else:
        subgrid_run = False

    # load the SED grid and noise model
    modelsedgrid = FileSEDGrid(modelsedgrid_file)
    noisemodel_vals = noisemodel.get_noisemodelcat(noise_file)

    if subgrid_run:
        fit.summary_table_memory(
            obsdata,
            noisemodel_vals,
            modelsedgrid,
            resume=resume,
            threshold=-10.0,
            save_every_npts=100,
            lnp_npts=500,
            stats_outname=stats_file,
            pdf1d_outname=pdf_file,
            pdf2d_outname=pdf2d_file,
            pdf2d_param_list=pdf2d_param_list,
            grid_info_dict=grid_info_dict,
            lnp_outname=lnp_file,
            do_not_normalize=True,
            surveyname=datamodel.surveyname,
        )
        print("Done fitting on grid " + modelsedgrid_file)

    else:

        fit.summary_table_memory(
            obsdata,
            noisemodel_vals,
            modelsedgrid,
            resume=resume,
            threshold=-10.0,
            save_every_npts=100,
            lnp_npts=500,
            stats_outname=stats_file,
            pdf1d_outname=pdf_file,
            pdf2d_outname=pdf2d_file,
            pdf2d_param_list=pdf2d_param_list,
            lnp_outname=lnp_file,
            surveyname=datamodel.surveyname,
        )
        print("Done fitting on grid " + modelsedgrid_file)
Exemple #5
0
            add_spectral_properties_kwargs=extra_kwargs)

    if args.ast:
        # get the modesedgrid on which to grab input AST
        modelsedgridfile = datamodel.project + '/' + datamodel.project + \
                       '_seds.grid.hd5'
        modelsedgrid = FileSEDGrid(modelsedgridfile)

        N_models = datamodel.ast_models_selected_per_age
        Nfilters = datamodel.ast_bands_above_maglimit
        Nrealize = datamodel.ast_realization_per_model
        mag_cuts = datamodel.ast_maglimit

        if len(mag_cuts) == 1:
            tmp_cuts = mag_cuts
            obsdata = datamodel.get_obscat(datamodel.obsfile,
                                           datamodel.filters)

            min_mags = np.zeros(len(datamodel.filters))
            for k, filtername in enumerate(obsdata.filters):
                sfiltername = obsdata.data.resolve_alias(filtername)
                sfiltername = sfiltername.replace('rate', 'vega')
                sfiltername = sfiltername.replace('RATE', 'VEGA')
                keep, = np.where(obsdata[sfiltername] < 99.)
                min_mags[k] = np.percentile(obsdata[keep][sfiltername], 90.)

            # max. mags from the gst observation cat.
            mag_cuts = min_mags + tmp_cuts

        pick_models(modelsedgrid,
                    mag_cuts,
                    Nfilter=Nfilters,
Exemple #6
0
        print("working on " + sed_trimname)

        start_time = time.clock()

        if noisefile == old_noisefile:
            print("not reading noisefile - same as last")
            # print(noisefile)
        else:
            print("reading noisefile")
            # read in the noise model
            noisemodel_vals = noisemodel.get_noisemodelcat(noisefile)
            old_noisefile = noisefile

        # read in the observed data
        print("getting the observed data")
        obsdata = datamodel.get_obscat(obsfile, modelsedgrid.filters)

        # trim the model sedgrid
        #   set n_detected = 0 to disable the trimming of models based on
        #      the ASTs (e.g. extrapolations are ok)
        #   this is needed as the ASTs in the NIR bands do not go faint enough
        trim_grid.trim_models(
            modelsedgrid,
            noisemodel_vals,
            obsdata,
            sed_trimname,
            noisemodel_trimname,
            sigma_fac=3.0,
        )

        new_time = time.clock()
    if args.ast:
        # get the modesedgrid on which to grab input AST
        modelsedgridfile = datamodel.project + '/' + datamodel.project + \
                       '_seds.grid.hd5'
        modelsedgrid = FileSEDGrid(modelsedgridfile)

        N_models = datamodel.ast_models_selected_per_age
        Nfilters = datamodel.ast_bands_above_maglimit
        Nrealize = datamodel.ast_realization_per_model
        mag_cuts = datamodel.ast_maglimit

        if len(mag_cuts) == 1:
            tmp_cuts = mag_cuts
            obsdata = datamodel.get_obscat(datamodel.obsfile,
                                           datamodel.distanceModulus,
                                           datamodel.filters)

            min_mags = np.zeros(len(datamodel.filters))
            for k, filtername in enumerate(obsdata.filters):
                sfiltername = obsdata.data.resolve_alias(filtername)
                sfiltername = sfiltername.replace('rate', 'vega')
                sfiltername = sfiltername.replace('RATE', 'VEGA')
                keep, = np.where(obsdata[sfiltername] < 99.)
                min_mags[k] = np.percentile(obsdata[keep][sfiltername], 90.)

            # max. mags from the gst observation cat.
            mag_cuts = min_mags + tmp_cuts

        pick_models(modelsedgrid,
                    mag_cuts,
Exemple #8
0
def pick_positions(filename,separation,refimage=None):
    """
    Assigns postions to fake star list generated by pick_models

    INPUTS:
    -------

    filename:   string
                Name of AST list generated by pick_models
    separation: float
                Minimum pixel separation between AST and star in photometry 
                catalog provided in the datamodel.
    refimage:   Name of the reference image.  If supplied, the method will use the 
                reference image header to convert from RA and DEC to X and Y.

    OUTPUTS:
    --------

    Ascii table that replaces [filename] with a new version of [filename] that contains the necessary
    position columns for running the ASTs though DOLPHOT
    """

    noise = 3.0 #Spreads the ASTs in a circular annulus of 3 pixel width instead of all being 
                #precisely [separation] from an observed star.

    catalog = datamodel.get_obscat(datamodel.obsfile,datamodel.distanceModulus,datamodel.filters)
    colnames = catalog.data.columns    

    if 'X' or 'x' in colnames:
        if 'X' in colnames:
           x_positions = catalog.data[:]['X']
           y_positions = catalog.data[:]['Y']
        if 'x' in colnames:
           x_positions = catalog.data[:]['x']
           y_positions = catalog.data[:]['y']
    else:
        if refimage:
            if 'RA' or 'ra' in colnames:
                if 'RA' in colnames:
                    ra_positions = catalog.data[:]['RA']
                    dec_positions = catalog.data[:]['DEC']
                if 'ra' in colnames:
                    ra_positions = catalog.data[:]['ra']
                    dec_positions = catalog.data[:]['dec']
            else:
                raise "Your catalog does not supply X, Y or RA, DEC information for spatial AST distribution"

        else:
            raise "You must supply a Reference Image to determine spatial AST distribution."
        wcs = WCS(refimage)
        x_positions,y_positions = wcs.all_world2pix(ra_positions,dec_positions,0)
 
    astmags = ascii.read(filename)

    n_asts = len(astmags)

    # keep is defined to ensure that no fake stars are put outside of the image boundaries

    keep = (x_positions > np.min(x_positions) + separation + noise) & (x_positions < np.max(x_positions) - separation - noise) & \
           (y_positions > np.min(y_positions) + separation + noise) & (y_positions < np.max(y_positions) - separation - noise)

    x_positions = x_positions[keep]
    y_positions = y_positions[keep]

    ncat = len(x_positions)
    ind = np.random.random(n_asts)*ncat
    ind = ind.astype('int')


    # Here we generate the circular distribution of ASTs surrounding random observed stars
 
    separation = np.random.random(n_asts)*noise + separation
    theta = np.random.random(n_asts) * 2.0 * np.pi
    xvar = separation * np.cos(theta)
    yvar = separation * np.sin(theta)
    
    new_x = x_positions[ind]+xvar; new_y = y_positions[ind]+yvar
    column1 = 0 * new_x
    column2 = column1 + 1
    column1 = Column(name='zeros',data=column1.astype('int'))
    column2 = Column(name='ones',data=column2.astype('int'))
    column3 = Column(name='X',data=new_x,format='%.2f')
    column4 = Column(name='Y',data=new_y,format='%.2f')
    astmags.add_column(column1,0)
    astmags.add_column(column2,1)
    astmags.add_column(column3,2)
    astmags.add_column(column4,3)
    
    ascii.write(astmags,filename,overwrite=True)
def make_ast_inputs(flux_bin_method=True):
    """
    Make the list of artificial stars to be run through the photometry pipeline

    Parameters
    ----------
    flux_bin_method : boolean (default=True)
        If True, use the flux bin method to select SEDs.  If False, randomly
        select SEDs from the model grid.

    """

    # before doing ANYTHING, force datamodel to re-import (otherwise, any
    # changes within this python session will not be loaded!)
    importlib.reload(datamodel)
    # check input parameters
    verify_params.verify_input_format(datamodel)

    # construct magnitude cuts

    mag_cuts = datamodel.ast_maglimit
    obsdata = datamodel.get_obscat(datamodel.obsfile, datamodel.filters)

    if len(mag_cuts) == 1:
        tmp_cuts = mag_cuts
        min_mags = np.zeros(len(datamodel.filters))
        for k, filtername in enumerate(obsdata.filters):
            sfiltername = obsdata.data.resolve_alias(filtername)
            sfiltername = sfiltername.replace("rate", "vega")
            sfiltername = sfiltername.replace("RATE", "VEGA")
            keep, = np.where(obsdata[sfiltername] < 99.0)
            min_mags[k] = np.percentile(obsdata[keep][sfiltername], 90.0)

        # max. mags from the gst observation cat.
        mag_cuts = min_mags + tmp_cuts

    # --------------------
    # select SEDs
    # --------------------

    Nrealize = datamodel.ast_realization_per_model
    Nfilters = datamodel.ast_bands_above_maglimit

    # file names for stars and corresponding SED parameters
    outfile = "./" + datamodel.project + "/" + datamodel.project + "_inputAST.txt"
    outfile_params = (
        "./" + datamodel.project + "/" + datamodel.project + "_ASTparams.fits"
    )

    if flux_bin_method:

        N_fluxes = datamodel.ast_n_flux_bins
        min_N_per_flux = datamodel.ast_n_per_flux_bin
        bins_outfile = (
            "./" + datamodel.project + "/" + datamodel.project + "_ASTfluxbins.txt"
        )

        chosen_seds = pick_models_toothpick_style(
            modelsedgrid_filename,
            datamodel.filters,
            mag_cuts,
            Nfilters,
            N_fluxes,
            min_N_per_flux,
            outfile=outfile,
            outfile_params=outfile_params,
            bins_outfile=bins_outfile,
        )

    else:

        N_models = datamodel.ast_models_selected_per_age

        chosen_seds = pick_models(
            modelsedgrid_filename,
            datamodel.filters,
            mag_cuts,
            Nfilter=Nfilters,
            N_stars=N_models,
            Nrealize=Nrealize,
            outfile=outfile,
            outfile_params=outfile_params,
        )

    # --------------------
    # assign positions
    # --------------------

    if datamodel.ast_with_positions:
        separation = datamodel.ast_pixel_distribution
        filename = datamodel.project + "/" + datamodel.project + "_inputAST.txt"

        if datamodel.ast_reference_image is not None:
            # With reference image, use one of these options
            if datamodel.ast_source_density_table is not None:
                pick_positions_from_map(
                    obsdata,
                    chosen_seds,
                    datamodel.ast_source_density_table,
                    datamodel.ast_N_bins,
                    datamodel.ast_realization_per_model,
                    outfile=filename,
                    refimage=datamodel.ast_reference_image,
                    refimage_hdu=0,
                    Nrealize=1,
                    set_coord_boundary=datamodel.ast_coord_boundary,
                )

            elif datamodel.ast_background_table is not None:
                pick_positions_from_map(
                    obsdata,
                    chosen_seds,
                    datamodel.ast_background_table,
                    datamodel.ast_N_bins,
                    datamodel.ast_realization_per_model,
                    outfile=filename,
                    refimage=datamodel.ast_reference_image,
                    refimage_hdu=0,
                    Nrealize=1,
                    set_coord_boundary=datamodel.ast_coord_boundary,
                )
            else:
                pick_positions(
                    obsdata,
                    filename,
                    separation,
                    refimage=datamodel.ast_reference_image,
                )

        else:
            # Without reference image, we can only use this function
            if (
                datamodel.ast_source_density_table is None
                and datamodel.ast_background_table is None
            ):
                pick_positions(obsdata, filename, separation)
            else:
                print(
                    "To use ast_source_density_table or ast_background_table, ast_reference_image must be specified."
                )