Exemplo n.º 1
0
def test_filter_column_slice():
    t = gen_test_table()
    q = Query('a > 2')
    assert (q.filter(t, 'b') == t['b'][t['a'] > 2]).all()
    q = Query('a > 2', 'b < 2')
    assert (q.filter(t, 'c') == t['c'][(t['a'] > 2) & (t['b'] < 2)]).all()
    q = Query(None)
    assert (q.filter(t, 'a') == t['a']).all()
Exemplo n.º 2
0
def _get_redshifts(filepath, night=None, target_ids=None):
    z1, z2 = load_fits(filepath, [1, 2])
    if not any(col in z2.colnames
               for col in ["SV3_DESI_TARGET", "DESI_TARGET"]):
        return
    # z2.sort(["TARGETID", "NUM_ITER"])
    z2 = unique(z2, "TARGETID", keep="last")
    q = Query() if target_ids is None else QueryMaker.isin(
        "TARGETID", target_ids)
    z1 = q.filter(
        z1,
        ["TARGETID", "Z", "ZERR", "ZWARN", "CHI2", "DELTACHI2", "SPECTYPE"])
    z2_cols = [
        "TARGETID", "TARGET_RA", "TARGET_DEC", "FLUX_R", "FLUX_G", "SHAPE_R",
        "FLUX_IVAR_R", "FLUX_IVAR_G", "EBV", "OBSCONDITIONS", "NIGHT",
        "TILEID", "FIBER", "SV3_SCND_TARGET", "SV3_BGS_TARGET",
        "SV3_DESI_TARGET", "SCND_TARGET", "BGS_TARGET"
    ]
    z2_cols_exist = [col for col in z2_cols if col in z2.colnames]
    z2_cols_not_exist = [col for col in z2_cols if col not in z2.colnames]
    z2 = q.filter(z2, z2_cols_exist)
    for col in z2_cols_not_exist:
        z2[col] = np.int32(-1)
    if night is not None:
        z2["NIGHT"] = night
    if len(z1) and len(z2):
        z = join(z1, z2, "TARGETID")
        if len(z):
            for BAND in ("G", "R"):
                z[f"SIGMA_{BAND}"] = z[f"FLUX_{BAND}"] * np.sqrt(
                    np.abs(z[f"FLUX_IVAR_{BAND}"]))
                z[f"MW_TRANSMISSION_{BAND}"] = mw_xtinct(z["EBV"], BAND)
            const = 2.5 / np.log(10)
            for band in "gr":
                BAND = band.upper()
                with np.errstate(divide="ignore", invalid="ignore"):
                    z[f"{band}_mag"] = _fill_not_finite(22.5 - const * np.log(
                        z[f"FLUX_{BAND}"] / z[f"MW_TRANSMISSION_{BAND}"]))
                    z[f"{band}_err"] = _fill_not_finite(
                        const / np.abs(z[f"SIGMA_{BAND}"]))
            return z
Exemplo n.º 3
0
    def load(self,
             hosts=None,
             has_spec=None,
             cuts=None,
             return_as=None,
             columns=None,
             version=None):
        """
        load object catalogs (aka "base catalogs")

        Parameters
        ----------
        hosts : int, str, list, None, optional
            host names/IDs or a list of host names/IDs or short-hand names like
            "paper1" or "paper1_complete"

        has_spec : bool, optional
            If set to True, load only objects that have spectra

        cuts : easyquery.Query, str, tuple, optional
            To apply to the objects when loaded

        return_as : str, optional
            If set to 'list' (default when `has_spec` is None), return a list that contains all tables
            If set to 'stacked' (default when `has_spec` is True), return a stacked table
            If set to 'iter', return an iterator for looping over hosts
            If set to 'dict', return a dictionary with host ids being the keys

        columns : list, optional
            If set, only load a subset of columns

        version : int or str, optional
            Set to 'paper1' for paper1 catalogs

        Returns
        -------
        objects : astropy.table.Table, list, or iterator
            (depending on `return_as`)

        Examples
        --------
        >>> import SAGA
        >>> from SAGA import ObjectCuts as C
        >>> saga_database = SAGA.Database('/path/to/SAGA/Dropbox')
        >>> saga_object_catalog = SAGA.ObjectCatalog(saga_database)

        To load all spectra, with some basic cuts applied:
        >>> specs = saga_object_catalog.load(has_spec=True, cuts=C.basic_cut)

        Load the base catalog for a certain host, with some basic cuts applied:
        >>> specs = saga_object_catalog.load(hosts='AnaK', cuts=C.basic_cut)

        Load base catalog for all paper1 hosts, with some basic cuts applied,
        and stored as a list:
        >>> base_tables = saga_object_catalog.load(hosts='paper1', cuts=C.basic_cut, return_as='list')

        Load base catalog for all paper1 hosts, with some basic cuts applied,
        and stored as one single big table:
        >>> bases_table = saga_object_catalog.load(hosts='paper1', cuts=C.basic_cut, return_as='stacked')
        """

        if return_as is None:
            return_as = 'stacked' if has_spec else 'list'
        return_as = return_as.lower()
        if return_as[0] not in 'slid':
            raise ValueError(
                '`return_as` should be "list", "stacked", "iter", or "dict"')

        if version is None:
            base_key = 'base'
        elif str(version).lower() in ('paper1', 'p1', 'v0p1', '0', '0.1'):
            base_key = 'base_v0p1'
        elif version in (1, 2):
            base_key = 'base_v{}'.format(version)
        else:
            raise ValueError('`version` must be None, \'paper1\', 1 or 2.')

        if has_spec and base_key == 'base_v0p1':
            t = self._database['saga_spectra_May2017'].read()

            if hosts is not None:
                host_ids = self._host_catalog.resolve_id(hosts, 'NSA')
                t = Query(
                    (lambda x: np.in1d(x, host_ids), 'HOST_NSAID')).filter(t)

            t = self._annotate_catalog(t)

            if cuts is not None:
                t = Query(cuts).filter(t)

            if return_as[0] != 's':
                if hosts is None:
                    host_ids = np.unique(t['HOST_NSAID'])
                output_iterator = (self._slice_columns(
                    Query('HOST_NSAID == {}'.format(i)).filter(t), columns)
                                   for i in host_ids)
                if return_as[0] == 'i':
                    return output_iterator
                if return_as[0] == 'd':
                    return dict(zip(host_ids, output_iterator))
                return list(output_iterator)

            return self._slice_columns(t, columns)

        else:
            q = Query(cuts)
            if has_spec:
                q = q & C.has_spec
            elif has_spec is not None:
                q = q & (~C.has_spec)

            hosts = self._host_catalog.resolve_id(hosts, 'string')

            need_coord = (columns is None or 'coord' in columns)
            to_add_skycoord = (need_coord and return_as[0] != 's'
                               )  # because skycoord cannot be stacked

            output_iterator = (self._slice_columns(
                q.filter(
                    self._annotate_catalog(
                        self._database[base_key, host].read(),
                        to_add_skycoord)), columns,
                (need_coord and not to_add_skycoord)) for host in hosts)

            if return_as[0] == 'i':
                return output_iterator
            if return_as[0] == 's':
                out = vstack(list(output_iterator), 'outer', 'error')
                if need_coord:
                    out = self._slice_columns(add_skycoord(out), columns)
                return out
            if return_as[0] == 'd':
                return dict(zip(hosts, output_iterator))
            return list(output_iterator)
Exemplo n.º 4
0
    def load(self,
             hosts=None,
             has_spec=None,
             cuts=None,
             iter_hosts=False,
             columns=None):
        """
        load object catalogs (aka "base catalogs")

        Parameters
        ----------
        hosts : int, str, list, None, optional
            host names/IDs or a list of host names/IDs or short-hand names like
            "paper1" or "paper1_complete"

        has_spec : bool, optional
            If set to True, load only objects that have spectra

        cuts : easyquery.Query, str, tuple, optional
            To apply to the objects when loaded

        iter_hosts : bool, optional
            If set to True, return an iterator for looping over hosts

        columns : list, optional
            If set, only load a subset of columns

        Returns
        -------
        objects : astropy.table.Table

        Examples
        --------
        >>> import SAGA
        >>> from SAGA import ObjectCuts as C
        >>> saga_database = SAGA.Database('/path/to/SAGA/Dropbox')
        >>> saga_objects = SAGA.ObjectCatalog(saga_database)

        To load all spectra, with some basic cuts applied:
        >>> specs = saga_objects.load(has_spec=True, cuts=C.basic_cut)

        Load the base catalog for a certain host, with some basic cuts applied:
        >>> specs = saga_objects.load(hosts='AnaK', cuts=C.basic_cut)

        Load base catalog for all paper1 hosts, with some basic cuts applied,
        and stored as a list:
        >>> base_tables = list(saga_objects.load(hosts='paper1', cuts=C.basic_cut, iter_hosts=True))

        Load base catalog for all paper1 hosts, with some basic cuts applied,
        and stored as one single big table:
        >>> bases_table = saga_objects.load(hosts='paper1', cuts=C.basic_cut)
        """
        if has_spec:
            t = self._database['spectra_clean'].read()

            if hosts is not None:
                host_ids = self._hosts.resolve_id(hosts)
                t = Query(
                    (lambda x: np.in1d(x, host_ids), 'HOST_NSAID')).filter(t)

            t = self._add_colors(t)

            if cuts is not None:
                t = Query(cuts).filter(t)

            if iter_hosts:
                if hosts is None:
                    host_ids = np.unique(t['HOST_NSAID'])
                return (Query('HOST_NSAID == {}'.format(i)).filter(t)
                        for i in host_ids)
            else:
                return _slice_columns(t, columns)

        else:
            q = Query(cuts)
            if has_spec is not None:
                q = q & (~C.has_spec)

            hosts = self._hosts.resolve_id(
                'all') if hosts is None else self._hosts.resolve_id(hosts)

            output_iterator = (_slice_columns(
                q.filter(self._add_colors(self._database['base',
                                                         host].read())),
                columns) for host in hosts)

            return output_iterator if iter_hosts else vstack(
                list(output_iterator))
Exemplo n.º 5
0
def find_redshifts_and_specs(t=None,
                             retrieve_specs=False,
                             exclude_bgs=False,
                             skip_redshifts=False,
                             all_lowz=False,
                             selection=is_lowz_target,
                             zcat_prefix="redrock",
                             **kwargs):
    """
    Takes a table `t` with columns "TILEID" and "NIGHT", and all redshifts for LOWZ targets.
    Set `exclude_bgs` to True to exclude targets that overlap with BGS.

    Alternatively, the input table can have a "TARGETID" column,
    in which case the function will find corresponding redshifts.

    Set `retrieve_specs` to True to also obtain the spectra.
    If this case, the returned variables are:
        redshifts, specs_flux, specs_ivar, specs_wl, specs_targetid

    Note that the function will not verify if all requested targets are found.
    It will also not verify if the redshifts table is consistent with specs.
    """
    if t is None:
        t = Table(kwargs)

    filenames_known = "FILENAME" in t.colnames
    targets_known = "TARGETID" in t.colnames

    group_keys = ["FILENAME"] if filenames_known else ["TILEID", "NIGHT"]
    assert all(c in t.colnames for c in group_keys)
    if targets_known:
        t.sort(group_keys + ["TARGETID"])

    if skip_redshifts:
        if not (filenames_known and targets_known):
            raise ValueError(
                "Must have FILENAME and TARGETID in the input table to skip redshift collection."
            )
        if not retrieve_specs:
            raise ValueError("Nothing to do!!")
        redshifts = t
    else:
        q = Query(selection)
        if all_lowz:
            q = q | Query(is_lowz, is_galaxy)
        if exclude_bgs:
            q = Query(q, ~is_bgs_target)

        redshifts = []
        for t1 in t.group_by(group_keys).groups:
            if filenames_known:
                file_iter = [(zcat_prefix,
                              _filename_to_path(t1["FILENAME"][0]))]
            else:
                file_iter = _loop_over_files(t1["TILEID"][0], t1["NIGHT"][0])
            for filetype, filepath in file_iter:
                if filetype == zcat_prefix:
                    data_this = _get_redshifts(
                        filepath, t1["NIGHT"][0],
                        t1["TARGETID"] if targets_known else None)
                    if data_this is not None and not targets_known:
                        data_this = q.filter(data_this)
                    if data_this is not None and len(data_this):
                        data_this["FILENAME"] = os.path.basename(filepath)
                        redshifts.append(data_this)

        redshifts = vstack(redshifts)
        print("Found {} redshifts".format(len(redshifts)))

    redshifts.sort(["FILENAME", "TARGETID"])

    if not retrieve_specs:
        return redshifts

    specs = []
    for redshifts_this in redshifts.group_by(["FILENAME"]).groups:
        filepath = _filename_to_path(redshifts_this["FILENAME"][0].replace(
            zcat_prefix + "-", "coadd-"))
        data_this = _get_specs(filepath, redshifts_this["TARGETID"])
        if data_this is not None:
            wl_idx = np.round((data_this[1] - WAVELENGTHS_START) /
                              WAVELENGTHS_DELTA).astype(np.int64),
            wl_idx, arr_idx = np.unique(wl_idx, return_index=True)
            specs.append((
                data_this[0],
                wl_idx,
                data_this[2][:, arr_idx],
                data_this[3][:, arr_idx],
            ))

    specs_id = np.concatenate([t[0] for t in specs])
    specs_flux = np.zeros((len(specs_id), WAVELENGTHS_LEN), dtype=np.float32)
    specs_ivar = np.zeros_like(specs_flux)
    i = 0
    for _, wl_idx, flux, ivar in specs:
        n = len(flux)
        specs_flux[i:i + n, wl_idx] = flux
        specs_ivar[i:i + n, wl_idx] = ivar
        i += n
    specs_wl = np.linspace(
        WAVELENGTHS_START,
        WAVELENGTHS_START + WAVELENGTHS_DELTA * (WAVELENGTHS_LEN - 1),
        WAVELENGTHS_LEN)

    print("Found {} specs".format(len(specs_id)))
    if len(redshifts) == len(specs_id) and not (redshifts["TARGETID"]
                                                == specs_id).all():
        print("WARNING: TARGETID in redshifts does not match those in specs")

    return redshifts, specs_flux, specs_ivar, specs_wl, specs_id
Exemplo n.º 6
0
def prepare_aat_catalog(
    target_catalog,
    write_to=None,
    verbose=True,
    flux_star_removal_threshold=20.0 * u.arcsec,
    flux_star_r_range=(17, 17.7),
    flux_star_gr_range=(0.1, 0.4),
    sky_fiber_void_radius=10.0 * u.arcsec,
    sky_fiber_needed=100,
    sky_fiber_max=1.1 * u.deg,
    sky_fiber_host_rvir_threshold=0.7 * u.deg,
    sky_fiber_radial_adjustment=2.0,
    targeting_score_threshold=900,
    seed=123,
):
    """
    Prepare AAT target catalog.

    If the host's radius is less than `sky_fiber_host_rvir_threshold`,
    all sky fiber will be distributed between `sky_fiber_max` and  host's radius.

    Otherwise, first fill the annulus between `sky_fiber_max` and host's radius,
    then distribute the rest within the host (but prefer outer region,
    as controlled by `sky_fiber_radial_adjustment`)

    Format needed:
    # TargetName(unique for header) RA(h m s) Dec(d m s) TargetType(Program,Fiducial,Sky) Priority(9 is highest) Magnitude 0 Notes
    1237648721248518305 14 42 17.79 -0 12 05.95 P 2 22.03 0 magcol=fiber2mag_r, model_r=20.69
    1237648721786045341 14 48 37.16 +0 21 33.81 P 1 21.56 0 magcol=fiber2mag_r, model_r=20.55
    """
    # pylint: disable=no-member

    if 'TARGETING_SCORE' not in target_catalog.colnames:
        return KeyError(
            '`target_catalog` does not have column "TARGETING_SCORE".'
            'Have you run `compile_target_list` or `assign_targeting_score`?')

    if not isinstance(flux_star_removal_threshold, u.Quantity):
        flux_star_removal_threshold = flux_star_removal_threshold * u.arcsec

    if not isinstance(sky_fiber_void_radius, u.Quantity):
        sky_fiber_void_radius = sky_fiber_void_radius * u.arcsec

    if not isinstance(sky_fiber_max, u.Quantity):
        sky_fiber_max = sky_fiber_max * u.deg

    if not isinstance(sky_fiber_host_rvir_threshold, u.Quantity):
        sky_fiber_host_rvir_threshold = sky_fiber_host_rvir_threshold * u.deg

    host_ra = target_catalog['HOST_RA'][0] * u.deg
    host_dec = target_catalog['HOST_DEC'][0] * u.deg
    host_dist = target_catalog['HOST_DIST'][0]
    host_rvir = np.arcsin(0.3 / host_dist) * u.rad

    annulus_actual = (sky_fiber_max**2.0 - host_rvir**2.0)
    annulus_wanted = (sky_fiber_max**2.0 - sky_fiber_host_rvir_threshold**2.0)

    if annulus_actual < 0:
        raise ValueError(
            '`sky_fiber_max` too small, this host is larger than that!')

    if annulus_wanted < 0:
        raise ValueError(
            '`sky_fiber_max` must be larger than `sky_fiber_host_rvir_threshold`!'
        )

    def _gen_dist_rand(seed_this, size):
        U = np.random.RandomState(seed_this).rand(size)
        return np.sqrt(U * annulus_actual + host_rvir**2.0)

    if annulus_actual < annulus_wanted:

        def gen_dist_rand(seed_this, size):
            size_out = int(np.around(size * annulus_actual / annulus_wanted))
            size_in = size - size_out
            dist_rand_out = _gen_dist_rand(seed_this, size_out)
            index = (1.0 / (sky_fiber_radial_adjustment + 2.0))
            dist_rand_in = (np.random.RandomState(seed_this + 1).rand(size_in)
                            **index) * host_rvir
            return np.concatenate(
                [dist_rand_out.to_value("deg"),
                 dist_rand_in.to_value("deg")]) * u.deg
    else:
        gen_dist_rand = _gen_dist_rand

    n_needed = sky_fiber_needed
    ra_sky = []
    dec_sky = []
    base_sc = SkyCoord(target_catalog['RA'], target_catalog['DEC'], unit='deg')
    while n_needed > 0:
        n_rand = int(np.ceil(n_needed * 1.1))
        dist_rand = gen_dist_rand(seed, n_rand)
        theta_rand = np.random.RandomState(seed + 1).rand(n_rand) * (2.0 *
                                                                     np.pi)
        ra_rand = np.remainder(host_ra + dist_rand * np.cos(theta_rand),
                               360.0 * u.deg)
        dec_rand = host_dec + dist_rand * np.sin(theta_rand)
        ok_mask = (dec_rand >= -90.0 * u.deg) & (dec_rand <= 90.0 * u.deg)
        ra_rand = ra_rand[ok_mask]
        dec_rand = dec_rand[ok_mask]
        sky_sc = SkyCoord(ra_rand, dec_rand)
        sep = sky_sc.match_to_catalog_sky(base_sc)[1]
        ok_mask = (sep > sky_fiber_void_radius)
        n_needed -= np.count_nonzero(ok_mask)
        ra_sky.append(ra_rand[ok_mask].to_value("deg"))
        dec_sky.append(dec_rand[ok_mask].to_value("deg"))
        seed += np.random.RandomState(seed + 2).randint(100, 200)
        del ra_rand, dec_rand, sky_sc, sep, ok_mask
    del base_sc
    ra_sky = np.concatenate(ra_sky)[:sky_fiber_needed]
    dec_sky = np.concatenate(dec_sky)[:sky_fiber_needed]

    is_target = Query('TARGETING_SCORE >= 0',
                      'TARGETING_SCORE < {}'.format(targeting_score_threshold))
    is_des = Query((lambda s: s == 'des', 'survey'))
    is_star = Query('morphology_info == 0', is_des) | Query(
        ~is_des, ~Query('is_galaxy'))
    is_flux_star = Query(is_star, 'r_mag >= {}'.format(flux_star_r_range[0]),
                         'r_mag < {}'.format(flux_star_r_range[1]))
    is_flux_star &= Query('gr >= {}'.format(flux_star_gr_range[0]),
                          'gr < {}'.format(flux_star_gr_range[1]))

    target_catalog = (is_target | is_flux_star).filter(target_catalog)
    target_catalog['Priority'] = target_catalog['TARGETING_SCORE'] // 100
    target_catalog['Priority'][Query('Priority < 1').mask(target_catalog)] = 1
    target_catalog['Priority'][Query('Priority > 8').mask(target_catalog)] = 8
    target_catalog['Priority'] = 9 - target_catalog['Priority']
    target_catalog['Priority'][is_flux_star.mask(target_catalog)] = 9

    flux_star_indices = np.flatnonzero(is_flux_star.mask(target_catalog))
    flux_star_sc = SkyCoord(*target_catalog[['RA', 'DEC'
                                             ]][flux_star_indices].itercols(),
                            unit='deg')
    target_sc = SkyCoord(*is_target.filter(target_catalog)[['RA',
                                                            'DEC']].itercols(),
                         unit='deg')
    sep = flux_star_sc.match_to_catalog_sky(target_sc)[1]
    target_catalog['Priority'][flux_star_indices[
        sep < flux_star_removal_threshold]] = 0
    target_catalog = Query('Priority > 0').filter(target_catalog)
    n_flux_star = Query('Priority == 9').count(target_catalog)
    del flux_star_indices, flux_star_sc, target_sc, sep

    target_catalog['TargetType'] = 'P'
    target_catalog['0'] = 0
    target_catalog['Notes'] = 'targets'
    target_catalog['Notes'][is_flux_star.mask(target_catalog)] = 'flux'

    target_catalog.rename_column('DEC', 'Dec')
    target_catalog.rename_column('OBJID', 'TargetName')
    target_catalog.rename_column('r_mag', 'Magnitude')

    target_catalog.sort(['TARGETING_SCORE', 'Magnitude'])
    target_catalog = target_catalog[[
        'TargetName', 'RA', 'Dec', 'TargetType', 'Priority', 'Magnitude', '0',
        'Notes'
    ]]

    sky_catalog = Table({
        'TargetName': np.arange(len(ra_sky)),
        'RA': ra_sky,
        'Dec': dec_sky,
        'TargetType': np.repeat('S', len(ra_sky)),
        'Priority': np.repeat(9, len(ra_sky)),
        'Magnitude': np.repeat(99.0, len(ra_sky)),
        '0': np.repeat(0, len(ra_sky)),
        'Notes': np.repeat('sky', len(ra_sky)),
    })

    target_catalog = vstack([target_catalog, sky_catalog])

    if verbose:
        print('# of flux stars =', n_flux_star)
        print('# of sky fibers =', len(sky_catalog))
        for rank in range(1, 10):
            print('# of Priority={} targets ='.format(rank),
                  Query('Priority == {}'.format(rank)).count(target_catalog))

    if write_to:
        if verbose:
            print('Writing to {}'.format(write_to))

        target_catalog.write(
            write_to,
            delimiter=' ',
            quotechar='"',
            format='ascii.fast_commented_header',
            overwrite=True,
            formats={
                'RA':
                lambda x: Angle(x, 'deg').wrap_at(360 * u.deg).to_string(
                    'hr', sep=' ', precision=2),  # pylint: disable=E1101
                'Dec':
                lambda x: Angle(x, 'deg').to_string(
                    'deg', sep=' ', precision=2),
                'Magnitude':
                '%.2f',
            })

        with open(write_to) as fh:
            content = fh.read()

        with open(write_to, 'w') as fh:
            fh.write(content.replace('"', ''))

    return target_catalog
Exemplo n.º 7
0
def prepare_mmt_catalog(target_catalog,
                        write_to=None,
                        flux_star_removal_threshold=20.0,
                        verbose=True):
    """
    Prepare MMT target catalog.

    Parameters
    ----------
    target_catalog : astropy.table.Table
        Need to have `TARGETING_SCORE` column.
        You can use `TargetSelection.build_target_catalogs` to generate `target_catalog`
    write_to : str, optional
        If set, it will write the catalog in MMT format to `write_to`.
    flux_star_removal_threshold : float, optional
        In arcseconds
    verbose : bool, optional

    Returns
    -------
    mmt_target_catalog : astropy.table.Table

    Examples
    --------
    >>> import SAGA
    >>> from SAGA.targets import prepare_mmt_catalog
    >>> saga_database = SAGA.Database('/path/to/SAGA/Dropbox')
    >>> saga_targets = SAGA.TargetSelection(saga_database, gmm_parameters='gmm_parameters_no_outlier')
    >>> mmt18_hosts = [161174, 52773, 163956, 69028, 144953, 165082, 165707, 145729, 165980, 147606]
    >>> for host_id, target_catalog in saga_targets.build_target_catalogs(mmt18_hosts, return_as='dict').items():
    >>>     print('Working host NSA', host_id)
    >>>     SAGA.targets.prepare_mmt_catalog(target_catalog, '/home/yymao/Downloads/mmt_nsa{}.cat'.format(host_id))
    >>>     print()

    Notes
    -----
    See https://www.cfa.harvard.edu/mmti/hectospec/hecto_software_manual.htm#4.1.1 for required format

    """

    if 'TARGETING_SCORE' not in target_catalog.colnames:
        return KeyError(
            '`target_catalog` does not have column "TARGETING_SCORE".'
            'Have you run `compile_target_list` or `assign_targeting_score`?')

    is_target = Query('TARGETING_SCORE >= 0', 'TARGETING_SCORE < 900')

    is_star = Query('PHOTPTYPE == 6')
    is_guide_star = is_star & Query('PSFMAG_R >= 14', 'PSFMAG_R < 15')
    is_flux_star = is_star & Query('PSFMAG_R >= 17', 'PSFMAG_R < 18')
    is_flux_star &= Query('PSFMAG_U - PSFMAG_G >= 0.6',
                          'PSFMAG_U - PSFMAG_G < 1.2')
    is_flux_star &= Query('PSFMAG_G - PSFMAG_R >= 0',
                          'PSFMAG_G - PSFMAG_R < 0.6')
    is_flux_star &= Query(
        '(PSFMAG_G - PSFMAG_R) > 0.75 * (PSFMAG_U - PSFMAG_G) - 0.45')

    target_catalog = (is_target | is_guide_star
                      | is_flux_star).filter(target_catalog)

    target_catalog['rank'] = target_catalog['TARGETING_SCORE'] // 100
    target_catalog['rank'][Query('rank < 2').mask(
        target_catalog)] = 2  # regular targets start at rank 2
    target_catalog['rank'][is_flux_star.mask(target_catalog)] = 1
    target_catalog['rank'][is_guide_star.mask(
        target_catalog)] = 99  # set to 99 for sorting

    flux_star_indices = np.flatnonzero(is_flux_star.mask(target_catalog))
    flux_star_sc = SkyCoord(*target_catalog[['RA', 'DEC'
                                             ]][flux_star_indices].itercols(),
                            unit='deg')
    target_sc = SkyCoord(*is_target.filter(target_catalog)[['RA',
                                                            'DEC']].itercols(),
                         unit='deg')
    sep = flux_star_sc.match_to_catalog_sky(target_sc)[1]
    target_catalog['rank'][flux_star_indices[
        sep.arcsec < flux_star_removal_threshold]] = 0
    target_catalog = Query('rank > 0').filter(target_catalog)

    if verbose:
        print('# of guide stars     =', is_guide_star.count(target_catalog))
        print('# of flux stars      =', is_flux_star.count(target_catalog))
        print('# of rank>1 targets  =', is_target.count(target_catalog))
        for rank in range(1, 9):
            print('# of rank={} targets ='.format(rank),
                  Query('rank == {}'.format(rank)).count(target_catalog))

    target_catalog['type'] = 'TARGET'
    target_catalog['type'][is_guide_star.mask(target_catalog)] = 'guide'

    target_catalog.rename_column('RA', 'ra')
    target_catalog.rename_column('DEC', 'dec')
    target_catalog.rename_column('OBJID', 'object')
    target_catalog.rename_column('r_mag', 'mag')

    target_catalog.sort(['rank', 'TARGETING_SCORE', 'mag'])
    target_catalog = target_catalog[[
        'ra', 'dec', 'object', 'rank', 'type', 'mag'
    ]]

    if write_to:
        if verbose:
            print('Writing to {}'.format(write_to))

        if not write_to.endswith('.cat'):
            print('Warning: filename should end with \'.cat\'')

        with open(write_to, 'w') as fh:
            fh.write('\t'.join(target_catalog.colnames) + '\n')
            # the MMT format is odd and *requires* "---"'s in the second header line
            fh.write('\t'.join(('-' * len(s)
                                for s in target_catalog.colnames)) + '\n')
            target_catalog.write(
                fh,
                delimiter='\t',
                format='ascii.fast_no_header',
                formats={
                    'ra':
                    lambda x: Angle(x, 'deg').wrap_at(360 * u.deg).to_string(
                        'hr', sep=':', precision=3),  # pylint: disable=E1101
                    'dec':
                    lambda x: Angle(x, 'deg').to_string(
                        'deg', sep=':', precision=3),
                    'mag':
                    '%.2f',
                    'rank':
                    lambda x: '' if x == 99 else '{:d}'.format(x),
                })

    return target_catalog