Beispiel #1
0
def prepare_fit_range(indata, bounds=None):
    """ `indata` must be binned on common velocity grid

    If no range is passed, full data range is fitted.
    """
    mcols = {}
    if not bounds:
        bounds = indata['wave'].min(), indata['wave'].max()
    bounds = np.array(bounds)
    idx = np.where((indata['Velocity'] > bounds.min())
                   & (indata['Velocity'] < bounds.max()))
    fitdata = indata[idx]
    mcols["Velocity"] = fitdata["Velocity"]
    for col in fitdata.colnames:
        if not col.endswith('flux'):  # & col.startswith("Si"):
            continue
        if not col[:-5] in fikdict:
            continue
        species = " ".join(col.split(" ")[:-1])
        mflcol = Table.MaskedColumn(fitdata[col],
                                    name="col",
                                    mask=fitdata[species + " mask"].data)
        mercol = Table.MaskedColumn(fitdata[species + " errs"],
                                    mask=fitdata[species + " mask"].data)
        mcols[col] = mflcol
        mcols[species + " errs"] = mercol
    fittable = Table(mcols)
    return fittable
Beispiel #2
0
def mask_nan_values(table):
    """
    Mask all nan values contained in columns of dtype=float.
    Useful when reading a fits table.

    :param table:
    :return:
    """
    for col_name in table.colnames:
        if isinstance(table[col_name], Column):
            if np.issubdtype(
                    table[col_name].dtype,
                    np.floating) and np.sum(np.isnan(table[col_name])) > 0:
                table[col_name] = Table.MaskedColumn(table[col_name].data,
                                                     mask=np.isnan(
                                                         table[col_name].data),
                                                     unit=table[col_name].unit)
# File can be downloaded (in CSV format) from https://docs.google.com/spreadsheets/d/18w3dbA3lga8So5sCUml7xUFyOPjfTEZ-vXSzobYprhY/edit?usp=sharing
# You *may* need to remove commas (as astropy.table may not like them...

if os.getcwd().split('/')[-1] == 'python':
    relDir = '../'
else:
    relDir = './'
fileIn = os.path.join(relDir, "data/data.csv")
tabIn = Table.read(fileIn)

jsonOut = []
for row in tabIn:
    entry = {}
    for col in tabIn.colnames:
        if type(tabIn[col]) == type(Table.MaskedColumn()):
            if not tabIn[col].mask[row.index]:
                entry[col] = row[col]
        else:
            entry[col] = row[col]
    jsonOut.append(entry)
json.dump(jsonOut, open(os.path.join(relDir, 'data/data.json'), 'w'), indent=2)
fIn = open(os.path.join(relDir, 'data/data.json'), 'r')
fOut = open(os.path.join(relDir, 'data/data.jsonp'), 'w')
lines = fIn.readlines()
lines[0] = 'data(' + lines[0]
lines[-1] = lines[-1] + ');'
for l in lines:
    fOut.write(l)
fOut.close()
fIn.close()
def main(mtlz_path,
         mtlz_name,
         correlation_cut=0.2,
         summary_column_subset=True):

    maskname = mtlz_name.split('_')[2]
    print(maskname)
    tab = Table.read(os.path.join(mtlz_path, mtlz_name), format='ascii.csv')

    ## Make appropriate cuts
    if 'SDSS_only' in tab.colnames:
        if type(tab['SDSS_only'][0]) in [bool, np.bool, np.bool_]:
            boolcut = [(not row) for row in tab['SDSS_only']]
        else:
            boolcut = [row.lower() == 'false' for row in tab['SDSS_only']]
        tab = tab[boolcut]
    if 'cor' in tab.colnames:
        tab = tab[tab['cor'] >= correlation_cut]
    if 'ID' in tab.colnames:
        tab = tab[['GAL' in id for id in tab['ID']]]
        outnames = []
        for name in tab['ID']:
            outnames.append(name.replace('GAL', '{}-'.format(maskname)))

        tab.remove_column('ID')
        tab.add_column(Table.Column(name='TARGETID', data=outnames))

    ## Make sure the names look good
    # ra_colname_bool = np.any([col.strip(' \t\r').upper() == 'RA' for col in tab.colnames])
    # if not ra_colname_bool:
    if 'RA' not in tab.colnames:
        tab.rename_column('RA_targeted', 'RA')
        tab.rename_column('DEC_targeted', 'DEC')

    # sdss_colname_bool = np.any([col.strip(' \t\r').upper() == 'SDSS_SDSS12' for col in tab.colnames])
    # if sdss_colname_bool:
    if 'sdss_SDSS12' in tab.colnames:
        newcol = []
        for ii in range(len(tab)):
            newcol.append('SDSS' + str(tab['sdss_SDSS12'][ii]))
        tab.add_column(Table.Column(data=newcol, name='SDSS12_OBJID'))
        tab.remove_column('sdss_SDSS12')
    else:
        tab.add_column(
            tab.MaskedColumn(
                data=[''] * len(tab),
                name='SDSS12_OBJID'))  #,mask=np.ones(len(tab)).astype(bool)))
    if 'sdss_zsp' in tab.colnames:
        tab.rename_column('sdss_zsp', 'SDSS_zsp')
    else:
        tab.add_column(
            Table.MaskedColumn(
                data=np.zeros(len(tab)),
                name='SDSS_zsp'))  #,mask=np.ones(len(tab)).astype(bool)))
    if 'z_est_bary' in tab.colnames:
        tab.add_column(Table.Column(data=tab['z_est_bary'].copy(), name='z'))
    if 'Proj_R_asec' in tab.colnames:
        tab.rename_column('Proj_R_asec', 'R [asec]')
    if 'velocity' in tab.colnames:
        tab.rename_column('velocity', 'v [km/s]')
    if 'FIBNAME' in tab.colnames:
        tab.rename_column('FIBNAME', 'FIBERNUM')
    ## Load up in the right order
    if summary_column_subset:
        tab = tab[[
            'TARGETID', 'FIBERNUM', 'RA', 'DEC', 'SDSS12_OBJID', 'z',
            'R [asec]', 'v [km/s]', 'SDSS_zsp'
        ]]

    if 'description' in dict(tab.meta).keys():
        desc = tab.meta.pop('description')
        tab.meta['DESCRP'] = desc

    return tab.filled()
if os.path.exists(args[2]):
    os.remove(args[2])

FREQ='118-129'
OFFSET = 0.347
CAT='tgss'
SNR_THRESH = 0.0
MATCH_THRESH = 2.0*OFFSET

t = Table.read(args[0])
t['p'] = where(t['sigma_ips_%s' % CAT] > SNR_THRESH, ones(len(t)), zeros(len(t)))
t2 = Table.read(args[1])
# add new columns to t2
t2["p_match1"] = ones(len(t2))
t2["name_match2"] = Table.MaskedColumn([""]*len(t2), mask=[True]*len(t2), dtype='S24')
t2["p_match2"] = zeros(len(t2))
n_suspicious = 0
n_undetected = 0
hi_lr = 0
lo_snr = 0
n_100 =0
n_99 = 0
n_95 = 0
n_nomatch = 0
for g in range(t['GroupID'].min(), t['GroupID'].max()):
    #group_gt_thresh = (t['GroupID'] == g) & (t['sigma_ips_%s' % CAT] > SNR_THRESH)
    group_gt_thresh = (t['GroupID'] == g)
    seps =  t[group_gt_thresh]['SepArcM_%s' % CAT]
    source_names = t[group_gt_thresh]['Source_name_%s' % CAT]
    if len(seps) < 2:
Beispiel #6
0
def slFiles2dataTables(slNames):
    """Reads in data from sourcelists, returns some or all of the following data columns in a pair of properly
    formatted astropy.table objects:

    * X Position
    * Y Position
    * Right Ascension
    * Declination
    * Flux (Inner Aperture)
    * Flux (Outer Aperture)
    * Magnitude (Inner Aperture)
    * Magnitude (Outer Aperture)
    * Flag Value

    .. note::
        'X position' and 'Y position' data columns will always be returned. However, not every sourcelist or
        catalog file is guaranteed to have any of the seven remaining data columns in the above list.

    Parameters
    ----------
    slNames : list
        A list containing the reference sourcelist filename and the comparison sourcelist filename, in that
        order.

    Returns
    -------
    refData : astropy.table object
        data from the reference sourcelist

    compData : astropy.table object
        data from the comparison sourcelist
    """
    if slNames[0].endswith(".ecsv"):
        refData_in = Table.read(slNames[0], format='ascii.ecsv')
    else:
        try:
            refData_in = Table.read(slNames[0], format='ascii.daophot')
        except Exception:
            refData_in = Table.read(slNames[0], format='ascii')
    if slNames[1].endswith(".ecsv"):
        compData_in = Table.read(slNames[1], format='ascii.ecsv')
    else:
        try:
            compData_in = Table.read(slNames[1], format='ascii.daophot')
        except Exception:
            compData_in = Table.read(slNames[1], format='ascii')
    titleSwapDict_dao1 = {"X": "X-Center", "Y": "Y-Center", "RA": "RA", "DEC": "DEC", "FLUX1": "n/a",
                          "FLUX2": "Flux(0.15)", "MAGNITUDE1": "MagAp(0.05)", "MAGNITUDE2": "MagAp(0.15)",
                          "MERR1": "MagErr(0.05)", "MERR2": "MagErr(0.15)", "MSKY": "MSky(0.15)",
                          "STDEV": "Stdev(0.15)", "FLAGS": "Flags", "ID": "ID", "CI": "CI"}
    titleSwapDict_dao2 = {"X": "X-Center", "Y": "Y-Center", "RA": "RA", "DEC": "DEC", "FLUX1": "n/a",
                          "FLUX2": "Flux(0.45)", "MAGNITUDE1": "MagAp(0.15)", "MAGNITUDE2": "MagAp(0.45)",
                          "MERR1": "MagErr(0.15)", "MERR2": "MagErr(0.45)", "MSKY": "MSky(0.45)",
                          "STDEV": "Stdev(0.45)", "FLAGS": "Flags", "ID": "ID", "CI": "CI"}
    titleSwapDict_dao3 = {"X": "X-Center", "Y": "Y-Center", "RA": "RA", "DEC": "DEC", "FLUX1": "n/a",
                          "FLUX2": "Flux(0.125)", "MAGNITUDE1": "MagAp(0.03)", "MAGNITUDE2": "MagAp(0.125)",
                          "MERR1": "MagErr(0.03)", "MERR2": "MagErr(0.125)", "MSKY": "MSky(0.125)",
                          "STDEV": "Stdev(0.125)", "FLAGS": "Flags", "ID": "ID", "CI": "CI"}
    titleSwapDict_point = {"X": "X-Center", "Y": "Y-Center", "RA": "RA", "DEC": "DEC", "FLUX1": "n/a",
                           "FLUX2": "FluxAp2", "MAGNITUDE1": "MagAp1", "MAGNITUDE2": "MagAp2",
                           "MERR1": "MagErrAp1", "MERR2": "MagErrAp2", "MSKY": "MSkyAp2",
                           "STDEV": "StdevAp2", "FLAGS": "Flags", "ID": "ID",
                           "CI": "CI"}
    titleSwapDict_segment = {"X": "X-Centroid", "Y": "Y-Centroid", "RA": "RA", "DEC": "DEC", "FLUX1": "n/a",
                             "FLUX2": "FluxAp2", "MAGNITUDE1": "MagAp1", "MAGNITUDE2": "MagAp2",
                             "MERR1": "MagErrAp1", "MERR2": "MagErrAp2", "MSKY": "n/a", "STDEV": "n/a",
                             "FLAGS": "Flags", "ID": "ID", "CI": "CI"}
    titleSwapDict_daoTemp = {"X": "XCENTER", "Y": "YCENTER", "RA": "n/a", "DEC": "n/a", "FLUX1": "FLUX1",
                             "FLUX2": "FLUX2", "MAGNITUDE1": "MAG1", "MAGNITUDE2": "MAG2", "FLAGS": "n/a",
                             "ID": "ID", "MERR1": "MERR1", "MERR2": "MERR2", "MSKY": "MSKY", "STDEV": "STDEV"}
    titleSwapDict_sourceX = {"X": "X_IMAGE", "Y": "Y_IMAGE", "RA": "RA", "DEC": "DEC", "FLUX1": "FLUX_APER1",
                             "FLUX2": "FLUX_APER2", "MAGNITUDE1": "MAG_APER1", "MAGNITUDE2": "MAG_APER2",
                             "FLAGS": "FLAGS", "ID": "NUMBER"}
    titleSwapDict_cooNew = {"X": "col1", "Y": "col2", "RA": "n/a", "DEC": "n/a", "FLUX1": "n/a",
                            "FLUX2": "n/a", "MAGNITUDE1": "n/a", "MAGNITUDE2": "n/a", "FLAGS": "n/a",
                            "ID": "col7"}
    # titleSwapDict_cooOld = {"X": "XCENTER", "Y": "YCENTER", "RA": "n/a", "DEC": "n/a", "FLUX1": "n/a",
    #                         "FLUX2": "n/a", "MAGNITUDE1": "n/a", "MAGNITUDE2": "n/a", "FLAGS": "n/a",
    #                         "ID": "ID"}

    titleSwapDict_cooOld2 = {"X": "XCENTER", "Y": "YCENTER", "RA": "RA", "DEC": "DEC", "FLUX1": "FLUX_0.05",
                             "FLUX2": "FLUX_0.15", "MAGNITUDE1": "MAG_0.05", "MAGNITUDE2": "MAG_0.15",
                             "FLAGS": "n/a", "ID": "ID", "MERR1": "MERR_0.05", "MERR2": "MERR_0.15",
                             "MSKY": "MSKY", "STDEV": "STDEV"}

    titleSwapDict_daorep = {"X": "X", "Y": "Y", "RA": "n/a", "DEC": "n/a", "FLUX1": "flux_0",
                            "FLUX2": "flux_1", "MAGNITUDE1": "mag_0", "MAGNITUDE2": "mag_1", "FLAGS": "n/a",
                            "ID": "n/a"}
    ctr = 1
    for dataTable in [refData_in, compData_in]:
        if "X-Center" in list(dataTable.keys()):
            if (("MagAp(0.05)" in list(dataTable.keys())) and (
                    "MagAp(0.15)" in list(dataTable.keys()))):  # ACS/WFC, WFC3/UVIS
                log.info("titleSwapDict_dao1")
                titleSwapDict = titleSwapDict_dao1
            elif (("MagAp(0.15)" in list(dataTable.keys())) and ("MagAp(0.45)" in list(dataTable.keys()))):  # WFC3/IR
                log.info("titleSwapDict_dao2")
                titleSwapDict = titleSwapDict_dao2
            elif (("MagAp(0.03)" in list(dataTable.keys())) and ("MagAp(0.125)" in list(dataTable.keys()))):  # ACS/HRC
                log.info("titleSwapDict_dao3")
                titleSwapDict = titleSwapDict_dao3
            elif "MagAp1" in list(dataTable.keys()):
                log.info("titleSwapDict_point")
                titleSwapDict = titleSwapDict_point
            else:
                sys.exit("ERROR: Unrecognized format. Exiting...")
        elif ("XCENTER" in list(dataTable.keys()) and "FLUX1" in list(dataTable.keys())):
            log.info("titleSwapDict_daoTemp")
            titleSwapDict = titleSwapDict_daoTemp
        elif "X_IMAGE" in list(dataTable.keys()):
            log.info("titleSwapDict_sourceX")
            titleSwapDict = titleSwapDict_sourceX
        elif "col1" in list(dataTable.keys()):
            log.info("titleSwapDict_cooNew")
            titleSwapDict = titleSwapDict_cooNew
        elif "XCENTER" in list(dataTable.keys()):
            log.info("titleSwapDict_cooOld2")
            titleSwapDict = titleSwapDict_cooOld2
        elif "X" in list(dataTable.keys()):
            log.info("titleSwapDict_daorep")
            titleSwapDict = titleSwapDict_daorep
        elif "X-Centroid" in list(dataTable.keys()) and "MagAp1" in list(dataTable.keys()):
            log.info("titleSwapDict_segment")
            titleSwapDict = titleSwapDict_segment
        else:
            sys.exit("ERROR: Unrecognized format. Exiting...")
        outTable = Table()
        for swapKey in list(titleSwapDict.keys()):
            if titleSwapDict[swapKey] != "n/a":
                try:
                    col2add = Table.Column(name=swapKey, data=dataTable[titleSwapDict[swapKey]])
                except TypeError:
                    col2add = Table.MaskedColumn(name=swapKey, data=dataTable[titleSwapDict[swapKey]])
                outTable.add_column(col2add)
        if ctr == 1:
            refData = outTable
        if ctr == 2:
            compData = outTable
        ctr += 1

    return (refData, compData)
Beispiel #7
0
def write_table(origtable, tablename=None, tabletype=None, joinsymb='|', overwrite=True, verbose=False,
                comma_replacement=';', write_empty=False, use_specprod=True):
    """
    Workflow function to write exposure, processing, and unprocessed tables. It allows for multi-valued table cells, which are
    reduced to strings using the joinsymb. It writes to a temp file before moving the fully written file to the
    name given by tablename (or the default for table of type tabletype).

    Args:
        origtable, Table. Either exposure table or processing table.
        tablename, str. Full pathname of where the table should be saved, including the extension. Originally save to
                        *.temp.{ext} and then moved to *.{ext}. If None, it looks up the default for typetable.
        tabletype, str. Used if tablename is None to get the default name for the type of table.
        joinsymb, str. The symbol used to join values in a list/array when saving. Should not be a comma.
        overwrite, bool. Whether to overwrite the file on disk if it already exists. Default is currently True.
        verbose, bool. Whether to give verbose amounts of information (True) or succinct/no outputs (False). Default is False.
        write_empty, bool. Whether to write an empty table to disk. The default is False. Warning: code is less robust
                           to column datatypes on read/write if the table is empty. May cause issues if this is set to True.
        comma_replacement, str. Replace instances of this symbol with commas when loading scalar columns in a table,
                                as e.g. BADAMPS is used in the pipeline and symbols like ';' are problematic
                                on the command line.
        use_specprod, bool. If True and tablename not specified and tabletype is exposure table, this looks for the
                            table in the SPECPROD rather than the exptab repository. Default is True.
    Returns:
        Nothing.
    """
    log = get_logger()
    if tablename is None and tabletype is None:
        log.error("Pathname or type of table is required to save the table")
        return

    if tabletype is not None:
        tabletype = standardize_tabletype(tabletype)

    if tablename is None:
        tablename = translate_type_to_pathname(tabletype, use_specprod=use_specprod)

    if not write_empty and len(origtable) == 0:
        log.warning(f'NOT writing zero length table to {tablename}')
        return
        
    if verbose:
        log.info("In write table", tablename,'\n', tabletype)
        log.info(origtable[0:2])
    basename, ext = os.path.splitext(tablename)

    temp_name = f'{basename}.temp{ext}'
    if verbose:
        log.info(ext ,temp_name)
    table = origtable.copy()

    if ext in ['.csv', '.ecsv']:
        if verbose:
            log.info("Given table: ", table.info)
        # replace_cols = {}

        for nam in table.colnames:
            ndim = table[nam].ndim
            if ndim > 1 or type(table[nam][0]) in [list, np.ndarray, np.array] or table[nam].dtype is object:
                if verbose:
                    log.info(f'{nam} is {ndim} dimensions, changing to string')
                col = [ensure_scalar(row, joinsymb=joinsymb) for row in table[nam]]
                # replace_cols[nam] = Table.Column(name=nam,data=col)
                if type(table[nam]) is Table.MaskedColumn:
                    col = Table.MaskedColumn(name=nam, data=col)
                else:
                    col = Table.Column(name=nam, data=col)
                table.replace_column(nam, col)
            elif type(table[nam][0]) in [str, np.str, np.str_]:
                col = [row.replace(',', comma_replacement) for row in table[nam]]
                if type(table[nam]) is Table.MaskedColumn:
                    col = Table.MaskedColumn(name=nam, data=col)
                else:
                    col = Table.Column(name=nam, data=col)
                table.replace_column(nam, col)

        # for nam, col in replace_cols.items():
        #     t.replace_column(nam,col)

        if np.any([c.ndim > 1 or type(table[nam][0]) in [list, np.ndarray, np.array] for c in
                   table.itercols()]) and verbose:
            log.warning("A column was still more than one dimensional")
            log.info(table.info())

        table.write(temp_name, format=f'ascii{ext}', overwrite=overwrite)
    else:
        table.write(temp_name, overwrite=True)

    os.rename(temp_name, tablename)
    if verbose:
        log.info("Written table: ", table.info)
Beispiel #8
0
def combine_vvv_2mass(vvvpsf_file,
                      twomass_file,
                      out_dir=dirconfig.cross_vvv_2mass,
                      max_error=1.00):
    """
    This function add 2MASS sources to the VVV-PSF catalog

    :param twomass_file: string
    :param vvvpsf_file: string
    :param out_dir: string
    :param max_error: number
    :return:
    """

    # Check if files exist
    if files_exist(twomass_file, vvvpsf_file):
        print('Combining: ', vvvpsf_file, twomass_file)

    # Read catalogs
    twomass_table = read_fits_table(twomass_file)
    vvvpsf_table = read_fits_table(vvvpsf_file)

    # Check if tile match
    if not twomass_table.meta['TILE'] == vvvpsf_table.meta['TILE']:
        raise ValueError(f'Files do not correspond to the same tile')

    # Cross-match
    c2mass = SkyCoord(twomass_table['RAJ2000'],
                      twomass_table['DEJ2000'],
                      unit='deg')
    cvvv = SkyCoord(vvvpsf_table['ra'], vvvpsf_table['dec'], unit='deg')
    idx, d2d, d3d = c2mass.match_to_catalog_sky(cvvv)
    match = d2d > max_error * u.arcsec

    # In this case repeated sources are not removed (otherwise they will be included in the output catalog)

    unpaired_2mass_sources = twomass_table[match]

    # Create a new table to store combined data
    unp_table = Table()

    # Add unpaired 2MASS sources to new_catalog
    unp_table['ra'] = unpaired_2mass_sources['RAJ2000']
    unp_table['dec'] = unpaired_2mass_sources['DEJ2000']
    unp_table['l'] = unpaired_2mass_sources['l']
    unp_table['b'] = unpaired_2mass_sources['b']
    unp_table['mag_J'] = unpaired_2mass_sources['J_vista']
    unp_table['eJ'] = unpaired_2mass_sources['e_Jmag']
    unp_table['mag_H'] = unpaired_2mass_sources['H_vista']
    unp_table['eH'] = unpaired_2mass_sources['e_Hmag']
    unp_table['mag_Ks'] = unpaired_2mass_sources['Ks_vista']
    unp_table['eKs'] = unpaired_2mass_sources['e_Kmag']
    unp_table['H-Ks'] = unpaired_2mass_sources[
        'H_vista'] - unpaired_2mass_sources['Ks_vista']
    unp_table['J-Ks'] = unpaired_2mass_sources[
        'J_vista'] - unpaired_2mass_sources['Ks_vista']
    unp_table['J-H'] = unpaired_2mass_sources[
        'J_vista'] - unpaired_2mass_sources['H_vista']
    unp_table['catalog'] = [
        '2MASS' for _ in range(len(unpaired_2mass_sources))
    ]
    unp_table['id'] = unpaired_2mass_sources['id']

    # Aux catalog for VVV-PSF sources
    aux_table = Table()

    # Add VVV-PSF sources to new_catalog
    aux_table['ra'] = vvvpsf_table['ra']
    aux_table['dec'] = vvvpsf_table['dec']
    aux_table['l'] = vvvpsf_table['l']
    aux_table['b'] = vvvpsf_table['b']
    aux_table['mag_Z'] = Table.MaskedColumn(vvvpsf_table['mag_Z'].data,
                                            mask=np.isnan(
                                                vvvpsf_table['mag_Z'].data))
    aux_table['er_Z'] = Table.MaskedColumn(vvvpsf_table['er_Z'].data,
                                           mask=np.isnan(
                                               vvvpsf_table['er_Z'].data))
    aux_table['mag_Y'] = Table.MaskedColumn(vvvpsf_table['mag_Y'].data,
                                            mask=np.isnan(
                                                vvvpsf_table['mag_Y'].data))
    aux_table['er_Y'] = Table.MaskedColumn(vvvpsf_table['er_Y'].data,
                                           mask=np.isnan(
                                               vvvpsf_table['er_Y'].data))
    aux_table['mag_J'] = vvvpsf_table['mag_J']
    aux_table['eJ'] = vvvpsf_table['er_J']
    aux_table['mag_H'] = vvvpsf_table['mag_H']
    aux_table['eH'] = vvvpsf_table['er_H']
    aux_table['mag_Ks'] = vvvpsf_table['mag_Ks']
    aux_table['eKs'] = vvvpsf_table['er_Ks']
    aux_table['H-Ks'] = vvvpsf_table['H-Ks']
    aux_table['J-Ks'] = vvvpsf_table['J-Ks']
    aux_table['J-H'] = vvvpsf_table['J-H']
    aux_table['catalog'] = ['PSF-VVV' for _ in range(len(vvvpsf_table))]
    aux_table['id'] = vvvpsf_table['id']

    output_table = vstack([unp_table, aux_table])

    # Add metadata to the new file
    date_time = datetime.utcnow()
    tile = vvvpsf_table.meta['TILE']
    catype = vvvpsf_table.meta['CATYPE'] + '-' + twomass_table.meta['CATYPE']
    output_table.meta = {
        'TILE': tile,
        'F2MASS': twomass_file,
        'N2MASS': len(unp_table),
        'FVVV': vvvpsf_file,
        'NVVV': len(vvvpsf_table),
        'STAGE': 'combine_vvv_2mass',
        'CATYPE': catype,
        'CDATE': date_time.strftime('%Y-%m-%d'),
        'CTIME': date_time.strftime('%H:%M:%S'),
        'AUTHOR': 'Jorge Anais'
    }

    # Write output table
    fname = f't{tile:03d}_{catype}.fits'
    output_file = path.join(out_dir, fname)
    write_fits_table(output_table, output_file)