def add_extra_info(table):
    # Change Pierre's XYZ to the one used in Gammapy at the moment
    # This was checked to be correct in https://github.com/gammasky/cta-dc/issues/17
    table['galactocentric_x'] = Column(table['POS_Y'].data, unit='kpc', description='Galactocentric X', format='%0.5f')
    table['galactocentric_y'] = Column(-table['POS_X'].data, unit='kpc', description='Galactocentric Y', format='%0.5f')
    table['galactocentric_z'] = Column(table['POS_Z'].data, unit='kpc', description='Galactocentric Y', format='%0.5f')
    table.remove_columns(['POS_X', 'POS_Y', 'POS_Z'])

    table.rename_column('Radius', 'size_physical')
    table.rename_column('size', 'sigma')

    r = np.sqrt(table['galactocentric_x'] ** 2 + table['galactocentric_y'] ** 2)
    table['galactocentric_r'] = Column(r, unit='kpc', description='Galactocentric radius in the xy plan')

    distance, glon, glat = compute_galactic_coordinates(
        x=table['galactocentric_x'].quantity,
        y=table['galactocentric_y'].quantity,
        z=table['galactocentric_z'].quantity,
    )

    table['distance'] = Column(distance, unit='kpc', description='Distance from Earth')
    table['distance'].format = '%.5f'

    table['GLON'] = Column(glon, unit='deg', description='Galactic longitude')
    table['GLON'].format = '%.5f'
    table['GLAT'] = Column(glat, unit='deg', description='Galactic latitude')
    table['GLAT'].format = '%.5f'
    table['skip'] = Column(0, description='Skip boolean, 1 skip 0 keep')
    return table
Example #2
0
def test_preserve_serialized_compatibility_mode(tmpdir):
    test_file = str(tmpdir.join('test.hdf5'))

    t1 = Table()
    t1['a'] = Column(data=[1, 2, 3], unit="s")
    t1['a'].meta['a0'] = "A0"
    t1['a'].meta['a1'] = {"a1": [0, 1]}
    t1['a'].format = '7.3f'
    t1['a'].description = 'A column'
    t1.meta['b'] = 1
    t1.meta['c'] = {"c0": [0, 1]}

    with catch_warnings() as w:
        t1.write(test_file,
                 path='the_table',
                 serialize_meta=True,
                 overwrite=True,
                 compatibility_mode=True)

    assert str(w[0].message).startswith(
        "compatibility mode for writing is deprecated")

    t2 = Table.read(test_file, path='the_table')

    assert t1['a'].unit == t2['a'].unit
    assert t1['a'].format == t2['a'].format
    assert t1['a'].description == t2['a'].description
    assert t1['a'].meta == t2['a'].meta
    assert t1.meta == t2.meta
Example #3
0
def test_preserve_serialized_compatibility_mode(tmpdir):
    test_file = str(tmpdir.join('test.hdf5'))

    t1 = Table()
    t1['a'] = Column(data=[1, 2, 3], unit="s")
    t1['a'].meta['a0'] = "A0"
    t1['a'].meta['a1'] = {"a1": [0, 1]}
    t1['a'].format = '7.3f'
    t1['a'].description = 'A column'
    t1.meta['b'] = 1
    t1.meta['c'] = {"c0": [0, 1]}

    with catch_warnings() as w:
        t1.write(test_file, path='the_table', serialize_meta=True,
                 overwrite=True, compatibility_mode=True)

    assert str(w[0].message).startswith(
        "compatibility mode for writing is deprecated")

    t2 = Table.read(test_file, path='the_table')

    assert t1['a'].unit == t2['a'].unit
    assert t1['a'].format == t2['a'].format
    assert t1['a'].description == t2['a'].description
    assert t1['a'].meta == t2['a'].meta
    assert t1.meta == t2.meta
Example #4
0
def test_preserve_serialized(tmpdir):
    test_file = str(tmpdir.join('test.hdf5'))

    t1 = Table()
    t1['a'] = Column(data=[1, 2, 3], unit="s")
    t1['a'].meta['a0'] = "A0"
    t1['a'].meta['a1'] = {"a1": [0, 1]}
    t1['a'].format = '7.3f'
    t1['a'].description = 'A column'
    t1.meta['b'] = 1
    t1.meta['c'] = {"c0": [0, 1]}

    t1.write(test_file, path='the_table', serialize_meta=True, overwrite=True)

    t2 = Table.read(test_file, path='the_table')

    assert t1['a'].unit == t2['a'].unit
    assert t1['a'].format == t2['a'].format
    assert t1['a'].description == t2['a'].description
    assert t1['a'].meta == t2['a'].meta
    assert t1.meta == t2.meta

    # Check that the meta table is fixed-width bytes (see #11299)
    h5 = h5py.File(test_file, 'r')
    meta_lines = h5[meta_path('the_table')]
    assert meta_lines.dtype.kind == 'S'
Example #5
0
def test_metadata_very_large(tmpdir):
    """Test that very large datasets work"""

    test_file = tmpdir.join('test.parquet')

    t1 = Table()
    t1['a'] = Column(data=[1, 2, 3], unit="s")
    t1['a'].meta['a0'] = "A0"
    t1['a'].meta['a1'] = {"a1": [0, 1]}
    t1['a'].format = '7.3f'
    t1['a'].description = 'A column'
    t1.meta['b'] = 1
    t1.meta['c'] = {"c0": [0, 1]}
    t1.meta["meta_big"] = "0" * (2**16 + 1)
    t1.meta["meta_biggerstill"] = "0" * (2**18)

    t1.write(test_file, overwrite=True)

    t2 = Table.read(test_file)

    assert t1['a'].unit == t2['a'].unit
    assert t1['a'].format == t2['a'].format
    assert t1['a'].description == t2['a'].description
    assert t1['a'].meta == t2['a'].meta
    assert t1.meta == t2.meta
Example #6
0
def getStarsWithAngles(paths, hdu_index=0):
    star_lists = []
    for path in paths:
        image = getImage(path, hdu_index)
        stars = getStars(image)
        columns = ['id', 'xcentroid', 'ycentroid', 'flux']
        for i in range(1, len(columns), 1):
            c = columns[i]
            stars[c].format = get_rounded_format()
        stars.keep_columns(columns)
        stars.sort('flux')
        stars.reverse()
        stars = stars[:numberOfStars()]
        angles = getStarAngles(stars)
        stars['angles'] = Column(angles, description='Angles')
        stars['angles'].format = get_rounded_format()
        #stars.sort('angles')
        star_lists.append(stars)
    return star_lists
Example #7
0
def test_preserve_serialized(tmpdir):
    test_file = str(tmpdir.join('test.hdf5'))

    t1 = Table()
    t1['a'] = Column(data=[1, 2, 3], unit="s")
    t1['a'].meta['a0'] = "A0"
    t1['a'].meta['a1'] = {"a1": [0, 1]}
    t1['a'].format = '7.3f'
    t1['a'].description = 'A column'
    t1.meta['b'] = 1
    t1.meta['c'] = {"c0": [0, 1]}

    t1.write(test_file, path='the_table', serialize_meta=True, overwrite=True)

    t2 = Table.read(test_file, path='the_table')

    assert t1['a'].unit == t2['a'].unit
    assert t1['a'].format == t2['a'].format
    assert t1['a'].description == t2['a'].description
    assert t1['a'].meta == t2['a'].meta
    assert t1.meta == t2.meta
Example #8
0
def test_preserve_serialized(tmpdir):
    test_file = str(tmpdir.join('test.hdf5'))

    t1 = Table()
    t1['a'] = Column(data=[1, 2, 3], unit="s")
    t1['a'].meta['a0'] = "A0"
    t1['a'].meta['a1'] = {"a1": [0, 1]}
    t1['a'].format = '7.3f'
    t1['a'].description = 'A column'
    t1.meta['b'] = 1
    t1.meta['c'] = {"c0": [0, 1]}

    t1.write(test_file, path='the_table', serialize_meta=True, overwrite=True)

    t2 = Table.read(test_file, path='the_table')

    assert t1['a'].unit == t2['a'].unit
    assert t1['a'].format == t2['a'].format
    assert t1['a'].description == t2['a'].description
    assert t1['a'].meta == t2['a'].meta
    assert t1.meta == t2.meta
Example #9
0
def test_preserve_serialized(tmpdir):
    """Test that writing/reading preserves unit/format/description."""

    test_file = tmpdir.join('test.parquet')

    t1 = Table()
    t1['a'] = Column(data=[1, 2, 3], unit="s")
    t1['a'].meta['a0'] = "A0"
    t1['a'].meta['a1'] = {"a1": [0, 1]}
    t1['a'].format = '7.3f'
    t1['a'].description = 'A column'
    t1.meta['b'] = 1
    t1.meta['c'] = {"c0": [0, 1]}

    t1.write(test_file, overwrite=True)

    t2 = Table.read(test_file)

    assert t1['a'].unit == t2['a'].unit
    assert t1['a'].format == t2['a'].format
    assert t1['a'].description == t2['a'].description
    assert t1['a'].meta == t2['a'].meta
    assert t1.meta == t2.meta
Example #10
0
def test_preserve_serialized_old_meta_format(tmpdir):
    """Test the old meta format

    Only for some files created prior to v4.0, in compatibility mode.
    """
    test_file = get_pkg_data_filename('data/old_meta_example.hdf5')

    t1 = Table()
    t1['a'] = Column(data=[1, 2, 3], unit="s")
    t1['a'].meta['a0'] = "A0"
    t1['a'].meta['a1'] = {"a1": [0, 1]}
    t1['a'].format = '7.3f'
    t1['a'].description = 'A column'
    t1.meta['b'] = 1
    t1.meta['c'] = {"c0": [0, 1]}

    t2 = Table.read(test_file, path='the_table')

    assert t1['a'].unit == t2['a'].unit
    assert t1['a'].format == t2['a'].format
    assert t1['a'].description == t2['a'].description
    assert t1['a'].meta == t2['a'].meta
    assert t1.meta == t2.meta
Example #11
0
def test_metadata_very_large(tmpdir):
    """Test that very large datasets work, now!"""
    test_file = str(tmpdir.join('test.hdf5'))

    t1 = Table()
    t1['a'] = Column(data=[1, 2, 3], unit="s")
    t1['a'].meta['a0'] = "A0"
    t1['a'].meta['a1'] = {"a1": [0, 1]}
    t1['a'].format = '7.3f'
    t1['a'].description = 'A column'
    t1.meta['b'] = 1
    t1.meta['c'] = {"c0": [0, 1]}
    t1.meta["meta_big"] = "0" * (2 ** 16 + 1)
    t1.meta["meta_biggerstill"] = "0" * (2 ** 18)

    t1.write(test_file, path='the_table', serialize_meta=True, overwrite=True)

    t2 = Table.read(test_file, path='the_table')

    assert t1['a'].unit == t2['a'].unit
    assert t1['a'].format == t2['a'].format
    assert t1['a'].description == t2['a'].description
    assert t1['a'].meta == t2['a'].meta
    assert t1.meta == t2.meta
Example #12
0
def coneSearch(VOService, position, radius):
    """
    Returns table from a VO cone search.

    Parameters
    ----------
    VOService : str
        Name of VO service to query (must be one of 'WENSS' or 'NVSS')
    position : list of floats
        A list specifying a new position as [RA, Dec] in either makesourcedb
        format (e.g., ['12:23:43.21', '+22.34.21.2']) or in degrees (e.g.,
        [123.2312, 23.3422])
    radius : float or str, optional
        Radius in degrees (if float) or 'value unit' (if str; e.g.,
        '30 arcsec') for cone search region in degrees
    """
    import pyvo as vo

    log = logging.getLogger('LSMTool.Load')

    # Define allowed cone-search databases. These are the ones we know how to
    # convert to makesourcedb-formated sky models.
    columnMapping = {
        'nvss':{'NVSS':'name', 'RAJ2000':'ra', 'DEJ2000':'dec', 'S1.4':'i',
            'MajAxis':'majoraxis', 'MinAxis':'minoraxis', 'referencefrequency':1.4e9},
        'wenss':{'Name':'name', 'RAJ2000':'ra', 'DEJ2000':'dec', 'Sint':'i',
            'MajAxis':'majoraxis', 'MinAxis':'minoraxis', 'PA':'orientation',
            'referencefrequency':325e6}
        }

    if VOService.lower() in allowedVOServices:
        url = allowedVOServices[VOService.lower()]
    else:
        raise ValueError('VO query service not known. Allowed services are: '
            '{0}'.format(allowedVOServices.keys()))

    # Get raw VO catalog
    log.debug('Querying VO service...')
    try:
        position = [RA2Angle(position[0])[0].value, Dec2Angle(position[1])[0].value]
    except TypeError:
        raise ValueError('VO query positon not understood.')
    try:
        radius = Angle(radius, unit='degree').value
    except TypeError:
        raise ValueError('VO query radius not understood.')
    VOcatalog = vo.conesearch(url, position, radius=radius)

    log.debug('Creating table...')
    try:
        table = Table.read(VOcatalog.votable)
    except IndexError:
        # Empty query result
        log.error('No sources found. Sky model is empty.')
        table = makeEmptyTable()
        return table

    # Remove unneeded columns
    colsToRemove = []
    for colName in table.colnames:
        if colName not in columnMapping[VOService.lower()]:
            colsToRemove.append(colName)
        elif columnMapping[VOService.lower()][colName] not in allowedColumnNames:
            colsToRemove.append(colName)
    for colName in colsToRemove:
        table.remove_column(colName)

    # Rename columns to match makesourcedb conventions
    for colName in table.colnames:
        if colName != allowedColumnNames[columnMapping[VOService.lower()][colName]]:
            table.rename_column(colName, allowedColumnNames[columnMapping[
                VOService.lower()][colName]])

    # Convert RA and Dec to Angle objects
    log.debug('Converting RA...')
    RARaw = table['Ra'].data.tolist()
    RACol = Column(name='Ra', data=RA2Angle(RARaw))
    def raformat(val):
        return Angle(val, unit='degree').to_string(unit='hourangle', sep=':')
    RACol.format = raformat
    RAIndx = table.keys().index('Ra')
    table.remove_column('Ra')
    table.add_column(RACol, index=RAIndx)

    log.debug('Converting Dec...')
    DecRaw = table['Dec'].data.tolist()
    DecCol = Column(name='Dec', data=Dec2Angle(DecRaw))
    def decformat(val):
        return Angle(val, unit='degree').to_string(unit='degree', sep='.')
    DecCol.format = decformat
    DecIndx = table.keys().index('Dec')
    table.remove_column('Dec')
    table.add_column(DecCol, index=DecIndx)

    # Make sure Name is a str column
    NameRaw = table['Name'].data.tolist()
    NameCol = Column(name='Name', data=NameRaw, dtype='{}100'.format(numpy_type))
    table.remove_column('Name')
    table.add_column(NameCol, index=0)

    # Convert flux and axis values to floats
    for name in ['I', 'MajorAxis', 'MinorAxis', 'Orientation']:
        if name in table.colnames:
            indx = table.index_column(name)
            intRaw = table[name].data.tolist()
            floatCol = Column(name=name, data=intRaw, dtype='float')
            table.remove_column(name)
            table.add_column(floatCol, index=indx)


    # Add source-type column
    types = ['POINT'] * len(table)
    if 'majoraxis' in columnMapping[VOService.lower()].values():
        for i, maj in enumerate(table[allowedColumnNames['majoraxis']]):
            if maj > 0.0:
                types[i] = 'GAUSSIAN'
    col = Column(name='Type', data=types, dtype='{}100'.format(numpy_type))
    table.add_column(col, index=1)

    # Add reference-frequency column
    refFreq = columnMapping[VOService.lower()]['referencefrequency']
    col = Column(name='ReferenceFrequency', data=np.array([refFreq]*len(table), dtype=np.float))
    table.add_column(col)

    # Set column units and default values
    def fluxformat(val):
        return '{0:0.3f}'.format(val)
    for i, colName in enumerate(table.colnames):
        log.debug("Setting units for column '{0}' to {1}".format(
            colName, allowedColumnUnits[colName.lower()]))
        if colName == 'I':
            table.columns[colName].unit = 'mJy'
            table.columns[colName].convert_unit_to('Jy')
            table.columns[colName].format = fluxformat
        else:
            table.columns[colName].unit = allowedColumnUnits[colName.lower()]

        if hasattr(table.columns[colName], 'filled') and allowedColumnDefaults[colName.lower()] is not None:
            fillVal = allowedColumnDefaults[colName.lower()]
            if colName == 'SpectralIndex':
                while len(fillVal) < 1:
                    fillVal.append(0.0)
            log.debug("Setting default value for column '{0}' to {1}".
                format(colName, fillVal))
            table.columns[colName].fill_value = fillVal

    return table
Example #13
0
def createTable(outlines, metaDict, colNames, colDefaults):
    """
    Creates an astropy table from inputs.

    Parameters
    ----------
    outlines : list of str
        Input lines
    metaDict : dict
        Input meta data
    colNames : list of str
        Input column names
    colDefaults : list
        Input column default values

    Returns
    -------
    table : astropy.table.Table object

    """
    # Before loading table into an astropy Table object, set lengths of Name,
    # Patch, and Type columns to 100 characters
    log = logging.getLogger('LSMTool.Load')

    converters = {}
    nameCol = 'col{0}'.format(colNames.index('Name')+1)
    converters[nameCol] = [ascii.convert_numpy('{}100'.format(numpy_type))]
    typeCol = 'col{0}'.format(colNames.index('Type')+1)
    converters[typeCol] = [ascii.convert_numpy('{}100'.format(numpy_type))]
    if 'Patch' in colNames:
        patchCol = 'col{0}'.format(colNames.index('Patch')+1)
        converters[patchCol] = [ascii.convert_numpy('{}100'.format(numpy_type))]

    log.debug('Creating table...')
    table = Table.read('\n'.join(outlines), guess=False, format='ascii.no_header', delimiter=',',
        names=colNames, comment='#', data_start=0, converters=converters)

    # Convert spectral index values from strings to arrays.
    if 'SpectralIndex' in table.keys():
        log.debug('Converting spectral indices...')
        specOld = table['SpectralIndex'].data.tolist()
        specVec = []
        maskVec = []
        maxLen = 0
        for l in specOld:
            try:
                if type(l) is float or type(l) is int:
                    maxLen = 1
                else:
                    specEntry = [float(f) for f in l.split(';')]
                    if len(specEntry) > maxLen:
                        maxLen = len(specEntry)
            except:
                pass
        log.debug('Maximum number of spectral-index terms in model: {0}'.format(maxLen))
        for l in specOld:
            try:
                if type(l) is float or type(l) is int:
                    specEntry = [float(l)]
                    specMask = [False]
                else:
                    specEntry = [float(f) for f in l.split(';')]
                    specMask = [False] * len(specEntry)
                while len(specEntry) < maxLen:
                    specEntry.append(0.0)
                    specMask.append(True)
                specVec.append(specEntry)
                maskVec.append(specMask)
            except:
                specVec.append([0.0]*maxLen)
                maskVec.append([True]*maxLen)
        specCol = MaskedColumn(name='SpectralIndex', data=np.array(specVec, dtype=np.float))
        specCol.mask = maskVec
        specIndx = table.keys().index('SpectralIndex')
        table.remove_column('SpectralIndex')
        table.add_column(specCol, index=specIndx)

    # Convert RA and Dec to Angle objects
    log.debug('Converting RA...')
    RARaw = table['Ra'].data.tolist()
    RACol = Column(name='Ra', data=RA2Angle(RARaw))
    def raformat(val):
        return Angle(val, unit='degree').to_string(unit='hourangle', sep=':')
    RACol.format = raformat
    RAIndx = table.keys().index('Ra')
    table.remove_column('Ra')
    table.add_column(RACol, index=RAIndx)

    log.debug('Converting Dec...')
    DecRaw = table['Dec'].data.tolist()
    DecCol = Column(name='Dec', data=Dec2Angle(DecRaw))
    def decformat(val):
        return Angle(val, unit='degree').to_string(unit='degree', sep='.')
    DecCol.format = decformat
    DecIndx = table.keys().index('Dec')
    table.remove_column('Dec')
    table.add_column(DecCol, index=DecIndx)

    def fluxformat(val):
        return '{0:0.3f}'.format(val)
    table.columns['I'].format = fluxformat

    # Set column units and default values
    for i, colName in enumerate(colNames):
        log.debug("Setting units for column '{0}' to {1}".format(
            colName, allowedColumnUnits[colName.lower()]))
        table.columns[colName].unit = allowedColumnUnits[colName.lower()]

        if hasattr(table.columns[colName], 'filled') and colDefaults[i] is not None:
            fillVal = colDefaults[i]
            if colName == 'SpectralIndex':
                while len(fillVal) < maxLen:
                    fillVal.append(0.0)
            log.debug("Setting default value for column '{0}' to {1}".
                format(colName, fillVal))
            table.columns[colName].fill_value = fillVal
    table.meta = metaDict

    return table
Example #14
0
def coneSearch(VOService, position, radius):
    """
    Returns table from a VO cone search.

    Parameters
    ----------
    VOService : str
        Name of VO service to query (must be one of 'WENSS' or 'NVSS')
    position : list of floats
        A list specifying a new position as [RA, Dec] in either makesourcedb
        format (e.g., ['12:23:43.21', '+22.34.21.2']) or in degrees (e.g.,
        [123.2312, 23.3422])
    radius : float or str, optional
        Radius in degrees (if float) or 'value unit' (if str; e.g.,
        '30 arcsec') for cone search region in degrees
    """
    import pyvo as vo

    log = logging.getLogger('LSMTool.Load')

    # Define allowed cone-search databases. These are the ones we know how to
    # convert to makesourcedb-formated sky models.
    columnMapping = {
        'nvss': {
            'NVSS': 'name',
            'RAJ2000': 'ra',
            'DEJ2000': 'dec',
            'S1.4': 'i',
            'MajAxis': 'majoraxis',
            'MinAxis': 'minoraxis',
            'referencefrequency': 1.4e9
        },
        'wenss': {
            'Name': 'name',
            'RAJ2000': 'ra',
            'DEJ2000': 'dec',
            'Sint': 'i',
            'MajAxis': 'majoraxis',
            'MinAxis': 'minoraxis',
            'PA': 'orientation',
            'referencefrequency': 325e6
        }
    }

    if VOService.lower() in allowedVOServices:
        url = allowedVOServices[VOService.lower()]
    else:
        raise ValueError('VO query service not known. Allowed services are: '
                         '{0}'.format(allowedVOServices.keys()))

    # Get raw VO catalog
    log.debug('Querying VO service...')
    try:
        position = [
            RA2Angle(position[0])[0].value,
            Dec2Angle(position[1])[0].value
        ]
    except TypeError:
        raise ValueError('VO query positon not understood.')
    try:
        radius = Angle(radius, unit='degree').value
    except TypeError:
        raise ValueError('VO query radius not understood.')
    VOcatalog = vo.conesearch(url, position, radius=radius)

    log.debug('Creating table...')
    try:
        table = Table.read(VOcatalog.votable)
    except IndexError:
        # Empty query result
        log.error('No sources found. Sky model is empty.')
        table = makeEmptyTable()
        return table

    # Remove unneeded columns
    colsToRemove = []
    for colName in table.colnames:
        if colName not in columnMapping[VOService.lower()]:
            colsToRemove.append(colName)
        elif columnMapping[
                VOService.lower()][colName] not in allowedColumnNames:
            colsToRemove.append(colName)
    for colName in colsToRemove:
        table.remove_column(colName)

    # Rename columns to match makesourcedb conventions
    for colName in table.colnames:
        if colName != allowedColumnNames[columnMapping[VOService.lower()]
                                         [colName]]:
            table.rename_column(
                colName,
                allowedColumnNames[columnMapping[VOService.lower()][colName]])

    # Convert RA and Dec to Angle objects
    log.debug('Converting RA...')
    RARaw = table['Ra'].data.tolist()
    RACol = Column(name='Ra', data=RA2Angle(RARaw))

    def raformat(val):
        return Angle(val, unit='degree').to_string(unit='hourangle', sep=':')

    RACol.format = raformat
    RAIndx = table.keys().index('Ra')
    table.remove_column('Ra')
    table.add_column(RACol, index=RAIndx)

    log.debug('Converting Dec...')
    DecRaw = table['Dec'].data.tolist()
    DecCol = Column(name='Dec', data=Dec2Angle(DecRaw))

    def decformat(val):
        return Angle(val, unit='degree').to_string(unit='degree', sep='.')

    DecCol.format = decformat
    DecIndx = table.keys().index('Dec')
    table.remove_column('Dec')
    table.add_column(DecCol, index=DecIndx)

    # Make sure Name is a str column
    NameRaw = table['Name'].data.tolist()
    NameCol = Column(name='Name',
                     data=NameRaw,
                     dtype='{}100'.format(numpy_type))
    table.remove_column('Name')
    table.add_column(NameCol, index=0)

    # Convert flux and axis values to floats
    for name in ['I', 'MajorAxis', 'MinorAxis', 'Orientation']:
        if name in table.colnames:
            indx = table.index_column(name)
            intRaw = table[name].data.tolist()
            floatCol = Column(name=name, data=intRaw, dtype='float')
            table.remove_column(name)
            table.add_column(floatCol, index=indx)

    # Add source-type column
    types = ['POINT'] * len(table)
    if 'majoraxis' in columnMapping[VOService.lower()].values():
        for i, maj in enumerate(table[allowedColumnNames['majoraxis']]):
            if maj > 0.0:
                types[i] = 'GAUSSIAN'
    col = Column(name='Type', data=types, dtype='{}100'.format(numpy_type))
    table.add_column(col, index=1)

    # Add reference-frequency column
    refFreq = columnMapping[VOService.lower()]['referencefrequency']
    col = Column(name='ReferenceFrequency',
                 data=np.array([refFreq] * len(table), dtype=np.float))
    table.add_column(col)

    # Set column units and default values
    def fluxformat(val):
        return '{0:0.3f}'.format(val)

    for i, colName in enumerate(table.colnames):
        log.debug("Setting units for column '{0}' to {1}".format(
            colName, allowedColumnUnits[colName.lower()]))
        if colName == 'I':
            table.columns[colName].unit = 'mJy'
            table.columns[colName].convert_unit_to('Jy')
            table.columns[colName].format = fluxformat
        else:
            table.columns[colName].unit = allowedColumnUnits[colName.lower()]

        if hasattr(table.columns[colName], 'filled') and allowedColumnDefaults[
                colName.lower()] is not None:
            fillVal = allowedColumnDefaults[colName.lower()]
            if colName == 'SpectralIndex':
                while len(fillVal) < 1:
                    fillVal.append(0.0)
            log.debug("Setting default value for column '{0}' to {1}".format(
                colName, fillVal))
            table.columns[colName].fill_value = fillVal

    return table
Example #15
0
    def write(self, lines):
        """
        Writes the Header of the CDS table, aka ReadMe, which
        also contains the Byte-By-Byte description of the table.
        """
        from astropy.coordinates import SkyCoord

        # list to store indices of columns that are modified.
        to_pop = []

        # For columns that are instances of ``SkyCoord`` and other ``mixin`` columns
        # or whose values are objects of these classes.
        for i, col in enumerate(self.cols):
            # If col is a ``Column`` object but its values are ``SkyCoord`` objects,
            # convert the whole column to ``SkyCoord`` object, which helps in applying
            # SkyCoord methods directly.
            if not isinstance(col, SkyCoord) and isinstance(col[0], SkyCoord):
                try:
                    col = SkyCoord(col)
                except (ValueError, TypeError):
                    # If only the first value of the column is a ``SkyCoord`` object,
                    # the column cannot be converted to a ``SkyCoord`` object.
                    # These columns are converted to ``Column`` object and then converted
                    # to string valued column.
                    if not isinstance(col, Column):
                        col = Column(col)
                    col = Column([str(val) for val in col])
                    self.cols[i] = col
                    continue

            # Replace single ``SkyCoord`` column by its coordinate components.
            if isinstance(col, SkyCoord):
                # If coordinates are given in RA/DEC, divide each them into hour/deg,
                # minute/arcminute, second/arcsecond columns.
                if 'ra' in col.representation_component_names.keys():
                    ra_col, dec_col = col.ra.hms, col.dec.dms
                    coords = [
                        ra_col.h, ra_col.m, ra_col.s, dec_col.d, dec_col.m,
                        dec_col.s
                    ]
                    names = ['RAh', 'RAm', 'RAs', 'DEd', 'DEm', 'DEs']
                    coord_units = [
                        u.h, u.min, u.second, u.deg, u.arcmin, u.arcsec
                    ]
                    coord_descrip = [
                        'Right Ascension (hour)', 'Right Ascension (minute)',
                        'Right Ascension (second)', 'Declination (degree)',
                        'Declination (arcmin)', 'Declination (arcsec)'
                    ]
                    for coord, name, coord_unit, descrip in zip(
                            coords, names, coord_units, coord_descrip):
                        # Have Sign of Declination only in the DEd column.
                        if name in ['DEm', 'DEs']:
                            coord_col = Column(list(np.abs(coord)),
                                               name=name,
                                               unit=coord_unit,
                                               description=descrip)
                        else:
                            coord_col = Column(list(coord),
                                               name=name,
                                               unit=coord_unit,
                                               description=descrip)
                        # Set default number of digits after decimal point for the
                        # second values.
                        if name in ['RAs', 'DEs']:
                            coord_col.format = '.12f'
                        self.cols.append(coord_col)

                # For all other coordinate types, simply divide into two columns
                # for latitude and longitude resp. with the unit used been as it is.
                else:
                    # Galactic coordinates.
                    if col.name == 'galactic':
                        lon_col = Column(
                            col.l,
                            name='GLON',
                            description='Galactic Longitude',
                            unit=col.representation_component_units['l'],
                            format='.12f')
                        lat_col = Column(
                            col.b,
                            name='GLAT',
                            description='Galactic Latitude',
                            unit=col.representation_component_units['b'],
                            format='.12f')
                        self.cols.append(lon_col)
                        self.cols.append(lat_col)

                    # Ecliptic coordinates, can be any of various available.
                    elif 'ecliptic' in col.name:
                        lon_col = Column(
                            col.lon,
                            name='ELON',
                            description='Ecliptic Longitude (' + col.name +
                            ')',
                            unit=col.representation_component_units['lon'],
                            format='.12f')
                        lat_col = Column(
                            col.lat,
                            name='ELAT',
                            description='Ecliptic Latitude (' + col.name + ')',
                            unit=col.representation_component_units['lat'],
                            format='.12f')
                        self.cols.append(lon_col)
                        self.cols.append(lat_col)

                    # Convert all other ``SkyCoord`` columns that are not in the above three
                    # representations to string valued columns.
                    else:
                        self.cols.append(Column(col.to_string()))

                to_pop.append(i)  # Delete original ``SkyCoord`` column.

            # Convert all other ``mixin`` columns to ``Column`` objects.
            # Parsing these may still lead to errors!
            elif not isinstance(col, Column):
                col = Column(col)
                # If column values are ``object`` types, convert them to string.
                if np.issubdtype(col.dtype, np.dtype(object).type):
                    col = Column([str(val) for val in col])
                self.cols[i] = col

        # Delete original ``SkyCoord`` column, if there were any.
        for i in to_pop:
            self.cols.pop(i)

        # Check for any left over extra coordinate columns.
        if any(x in self.colnames for x in ['RAh', 'DEd', 'ELON', 'GLAT']):
            # If there were any ``SkyCoord`` columns after the first one, then they would
            # have been skipped the division into their component columns. This is done in
            # order to not replace the data in the component columns already obtained.
            # Explicit renaming of the extra coordinate component columns by appending some
            # suffix to their name, so as to distinguish them, is not implemented.
            # Such extra ``SkyCoord`` columns are converted to string valued columns,
            # together with issuance of a warning.
            for i, col in enumerate(self.cols):
                if isinstance(col, SkyCoord):
                    self.cols[i] = Column(col.to_string())
                    message = 'Table already has coordinate system in CDS/MRT-syle columns.' \
                              + f' So column {i} is being skipped with designation' \
                              + ' of an `Unknown` string valued column.'
                    warnings.warn(message, UserWarning)

        # Get Byte-By-Byte description and fill the template
        bbb_template = Template('\n'.join(BYTE_BY_BYTE_TEMPLATE))
        byte_by_byte = bbb_template.substitute({
            'file':
            'table.dat',
            'bytebybyte':
            self.write_byte_by_byte()
        })

        # Fill up the full ReadMe
        rm_template = Template('\n'.join(MRT_TEMPLATE))
        readme_filled = rm_template.substitute({'bytebybyte': byte_by_byte})
        lines.append(readme_filled)
def findUncertainties(thisFilter='r', \
                          nside=64, tMax=730, \
                          dbFil='minion_1016_sqlite.db', \
                          crowdError=0.2, \
                          seeingCol='FWHMeff', \
                          cleanNpz=True, \
                          doPlots=False, \
                          wrapGalacs=True, \
                          selectStrip=True):
#, \
#                          hiRes=True):

    """Catalogs the uncertainties for a given database, returns the
    file path"""

    # doPlots switches on plotting. This makes things quite a bit
    # slower for coarse healpix, and I haven't worked out how to
    # specify the output plot directory yet. Recommend default to
    # False.

    # plot functions
    plotFuncs = [plots.HealpixSkyMap(), plots.HealpixHistogram()]

    opsdb = db.OpsimDatabase(dbFil)
    outDir = 'crowding_test_2017-07-25'
    resultsDb = db.ResultsDb(outDir=outDir)

    # slicer, etc.
    slicer = slicers.HealpixSlicer(nside=nside, useCache=False)
    sql = 'filter="%s" and night < %i' % (thisFilter, tMax)
    plotDict={'colorMax':27.}

    # initialise the entire bundle list
    bundleList = []

    # set up for higher-resolution spatial maps DOESN'T WORK ON LAPTOP
    #if hiRes:
    #    mafMap = maps.StellarDensityMap(nside=128)
    #else:
    #    mafMap = maps.StellarDensityMap(nside=64)

    # if passed a single number, turn the crowdErr into a list
    if str(crowdError.__class__).find('list') < 0:
        crowdVals = [np.copy(crowdError)]
    else:
        crowdVals = crowdError[:]

    # build up the bundle list. Build up a list of crowding values and
    # their column names. HARDCODED for the moment, can make the input list
    # an argument if desired.
    crowdStem = '%sCrowd' % (thisFilter)
    lCrowdCols = []
    # loop through the crowding values
    #crowdVals = [0.2, 0.1, 0.05]
    for errCrowd in crowdVals:  
        crowdName = '%s%.3f' % (crowdStem,errCrowd)
        lCrowdCols.append(crowdName) # to pass later
        metricThis = metrics.CrowdingMetric(crowding_error=errCrowd, \
                                                seeingCol='FWHMeff')
        bundleThis = metricBundles.MetricBundle(metricThis, slicer, sql, \
                                                    plotDict=plotDict, \
                                                    fileRoot=crowdName, \
                                                    runName=crowdName, \
                                                    plotFuncs=plotFuncs)

        bundleList.append(bundleThis)

    #metric = metrics.CrowdingMetric(crowding_error=crowdError, \
    #    seeingCol=seeingCol)
    #bundle = metricBundles.MetricBundle(metric,\
    #                                        slicer,sql, plotDict=plotDict, \
    #                                        plotFuncs=plotFuncs)
    #bundleList.append(bundle)
    
    # ... then the m5col
    metricCoadd = metrics.Coaddm5Metric()
    bundleCoadd = metricBundles.MetricBundle(metricCoadd,\
                                                 slicer,sql,plotDict=plotDict, \
                                                 plotFuncs=plotFuncs)
    bundleList.append(bundleCoadd)
    
    # Let's also pass through some useful statistics
    # per-HEALPIX. We'll want to bring across the output metric names
    # as well so that we can conveniently access them later.
    # some convenient plot functions
    statsCols = ['FWHMeff', 'fiveSigmaDepth', 'airmass']
    metricNames = [ 'MedianMetric', 'RobustRmsMetric', 'MinMetric', 'MaxMetric']
    statsNames = {}
    sKeyTail = '_%s_and_night_lt_%i_HEAL' % (thisFilter, tMax)

    # may as well get good plot dicts too...
    plotDicts = {}
    plotDicts['FWHMeff_MedianMetric'] = {'colorMax':2.}
    plotDicts['fiveSigmaDepth_MedianMetric'] = {'colorMax':26.}
    plotDicts['airmass_MedianMetric'] = {'colorMax':2.5}
    plotDicts['FWHMeff_RobustRmsMetric'] = {'colorMax':1.}
    plotDicts['fiveSigmaDepth_RobustRmsMetric'] = {'colorMax':2.}
    plotDicts['airmass_RobustRmsMetric'] = {'colorMax':1.}

    # initialize the minmax values for the moment
    plotDicts['FWHMeff_MinMetric'] = {'colorMax':3}
    plotDicts['FWHMeff_MaxMetric'] = {'colorMax':3}

    # ensure they all have xMax as well
    for sKey in plotDicts.keys():
        plotDicts[sKey]['xMax'] = plotDicts[sKey]['colorMax']

    for colName in statsCols:
        for metricName in metricNames:

            # lift out the appropriate plotdict
            plotDict = {}
            sDict = '%s_%s' % (colName, metricName)
            if sDict in plotDicts.keys():
                plotDict = plotDicts[sDict]

            thisMetric = getattr(metrics, metricName)
            metricObj = thisMetric(col=colName)
            bundleObj = metricBundles.MetricBundle(metricObj,slicer,sql, \
                                                       plotDict=plotDict, \
                                                       plotFuncs=plotFuncs)

            bundleList.append(bundleObj)

            # construct the output table column name and the key for
            # the bundle object
            tableCol = '%s%s_%s' % (thisFilter, colName, metricName)
            statsNames[tableCol] = 'opsim_%s_%s%s' \
                % (metricName.split('Metric')[0], colName, sKeyTail)
        
            # as a debug, see if this is actually working...
            # print tableCol, statsNames[tableCol], statsNames[tableCol]

    # try the number of visits
    col2Count = 'fiveSigmaDepth'
    metricN = metrics.CountMetric(col=col2Count)
    bundleN = metricBundles.MetricBundle(metricN, slicer, sql, \
                                             plotFuncs=plotFuncs)
    bundleList.append(bundleN)
    countCol = '%sCount' % (thisFilter)
    statsNames[countCol] = 'opsim_Count_%s%s' % (col2Count, sKeyTail)

    # convert to the bundledict...
    bundleDict = metricBundles.makeBundlesDictFromList(bundleList)
    bgroup = metricBundles.MetricBundleGroup(bundleDict, \
                                                 opsdb, outDir=outDir, \
                                                 resultsDb=resultsDb)

    # ... and run...
    bgroup.runAll()

    # ... also plot...
    if doPlots:
        bgroup.plotAll()

    # now produce the table for this run
    nameDepth = 'opsim_CoaddM5_%s_and_night_lt_%i_HEAL' \
        % (thisFilter, tMax)
    nameCrowd = 'opsim_Crowding_To_Precision_%s_and_night_lt_%i_HEAL' \
        % (thisFilter, tMax)

    npix = bgroup.bundleDict[nameDepth].metricValues.size
    nsideFound = hp.npix2nside(npix)
    ra, dec = healpyUtils.hpid2RaDec(nside, np.arange(npix))
    cc = SkyCoord(ra=np.copy(ra), dec=np.copy(dec), frame='fk5', unit='deg')

    # boolean mask for nonzero entries
    bVal = ~bgroup.bundleDict[nameDepth].metricValues.mask

    # Generate the table
    tVals = Table()
    tVals['HEALPIX'] = np.arange(npix)
    tVals['RA'] = cc.ra.degree
    tVals['DE'] = cc.dec.degree
    tVals['l'] = cc.galactic.l.degree
    tVals['b'] = cc.galactic.b.degree

    # wrap Galactics?
    if wrapGalacs:
        bBig = tVals['l'] > 180.
        tVals['l'][bBig] -= 360.

    sCoadd = '%sCoadd' % (thisFilter)
    sCrowd = '%sCrowd' % (thisFilter)

    tVals[sCoadd] = Column(bgroup.bundleDict[nameDepth].metricValues, \
                               dtype='float')

    # REPLACE the single-crowding with the set of columns, like so:
    #tVals[sCrowd] = Column(bgroup.bundleDict[nameCrowd].metricValues, \
    #                           dtype='float')

    for colCrowd in lCrowdCols:
        tVals[colCrowd] = Column(bgroup.bundleDict[colCrowd].metricValues, \
                                     dtype='float', format='%.3f')

    # enforce rounding. Three decimal places ought to be sufficient
    # for most purposes. See if the Table constructor follows this
    # through. (DOESN'T SEEM TO WORK when writing to fits anyway...)
    tVals[sCoadd].format='%.3f'
    #tVals[sCrowd].format='%.2f'  # (may only get reported to 1 d.p. anyway)

    #tVals['%sCrowdBri' % (thisFilter)] = \
    #    np.asarray(tVals[sCrowd] < tVals[sCoadd], 'int')

    # add the mask as a boolean
    tVals['%sGood' % (thisFilter)] = \
        np.asarray(bVal, 'int')

    # now add all the summary statistics for which we asked. Try
    # specifying the datatype
    for sStat in statsNames.keys():
        tVals[sStat] = Column(\
            bgroup.bundleDict[statsNames[sStat]].metricValues, \
                dtype='float')

    tVals[countCol] = Column(bgroup.bundleDict[statsNames[countCol]].metricValues, dtype='int')

    # cut down by mask
    #tVals = tVals[bVal]

    # Set metadata and write to disk. Add comments later.
    tVals.meta['nsideFound'] = nsideFound
    tVals.meta['tMax'] = tMax
    tVals.meta['crowdError'] = crowdVals
    tVals.meta['countedCol'] = col2Count[:]

    # Can select only within strip to cut down on space requirements
    sSel=''
    if selectStrip:
        bMin = -30.
        bMax = +25.
        lMin = -150.
        lMax = 80.
        sSel = '_nrPlane'

        bStrip = (tVals['b'] >= bMin) & \
            (tVals['b'] <= bMax) & \
            (tVals['l'] >= lMin) & \
            (tVals['l'] <= lMax)

        tVals = tVals[bStrip]

        tVals.meta['sel_lMin'] = lMin
        tVals.meta['sel_lMax'] = lMax
        tVals.meta['sel_bMin'] = bMin
        tVals.meta['sel_bMax'] = bMax

    # metadata
    tVals.meta['selectStrip'] = selectStrip

    # generate output path
    pathTab = '%s/table_uncty_%s_%s_nside%i_tmax%i%s.fits' % \
        (outDir, dbFil.split('_sqlite')[0], thisFilter, nside, tMax, sSel)

    # save the table
    tVals.write(pathTab, overwrite=True)

    # give this method the capability to remove the npz file (useful
    # if we want to go to very high spatial resolution for some
    # reason):
    if cleanNpz:
        for pathNp in glob.glob('%s/*.npz' % (outDir)):
            os.remove(pathNp)

    return pathTab
Example #17
0
def createTable(outlines, metaDict, colNames, colDefaults):
    """
    Creates an astropy table from inputs.

    Parameters
    ----------
    outlines : list of str
        Input lines
    metaDict : dict
        Input meta data
    colNames : list of str
        Input column names
    colDefaults : list
        Input column default values

    Returns
    -------
    table : astropy.table.Table object

    """
    # Before loading table into an astropy Table object, set lengths of Name,
    # Patch, and Type columns to 100 characters
    log = logging.getLogger('LSMTool.Load')

    converters = {}
    nameCol = 'col{0}'.format(colNames.index('Name') + 1)
    converters[nameCol] = [ascii.convert_numpy('{}100'.format(numpy_type))]
    typeCol = 'col{0}'.format(colNames.index('Type') + 1)
    converters[typeCol] = [ascii.convert_numpy('{}100'.format(numpy_type))]
    if 'Patch' in colNames:
        patchCol = 'col{0}'.format(colNames.index('Patch') + 1)
        converters[patchCol] = [
            ascii.convert_numpy('{}100'.format(numpy_type))
        ]

    log.debug('Creating table...')
    table = Table.read('\n'.join(outlines),
                       guess=False,
                       format='ascii.no_header',
                       delimiter=',',
                       names=colNames,
                       comment='#',
                       data_start=0,
                       converters=converters)

    # Convert spectral index values from strings to arrays.
    if 'SpectralIndex' in table.keys():
        log.debug('Converting spectral indices...')
        specOld = table['SpectralIndex'].data.tolist()
        specVec = []
        maskVec = []
        maxLen = 0
        for l in specOld:
            try:
                if type(l) is float or type(l) is int:
                    maxLen = 1
                else:
                    specEntry = [float(f) for f in l.split(';')]
                    if len(specEntry) > maxLen:
                        maxLen = len(specEntry)
            except:
                pass
        log.debug(
            'Maximum number of spectral-index terms in model: {0}'.format(
                maxLen))
        for l in specOld:
            try:
                if type(l) is float or type(l) is int:
                    specEntry = [float(l)]
                    specMask = [False]
                else:
                    specEntry = [float(f) for f in l.split(';')]
                    specMask = [False] * len(specEntry)
                while len(specEntry) < maxLen:
                    specEntry.append(0.0)
                    specMask.append(True)
                specVec.append(specEntry)
                maskVec.append(specMask)
            except:
                specVec.append([0.0] * maxLen)
                maskVec.append([True] * maxLen)
        specCol = MaskedColumn(name='SpectralIndex',
                               data=np.array(specVec, dtype=np.float))
        specCol.mask = maskVec
        specIndx = table.keys().index('SpectralIndex')
        table.remove_column('SpectralIndex')
        table.add_column(specCol, index=specIndx)

    # Convert RA and Dec to Angle objects
    log.debug('Converting RA...')
    RARaw = table['Ra'].data.tolist()
    RACol = Column(name='Ra', data=RA2Angle(RARaw))

    def raformat(val):
        return Angle(val, unit='degree').to_string(unit='hourangle', sep=':')

    RACol.format = raformat
    RAIndx = table.keys().index('Ra')
    table.remove_column('Ra')
    table.add_column(RACol, index=RAIndx)

    log.debug('Converting Dec...')
    DecRaw = table['Dec'].data.tolist()
    DecCol = Column(name='Dec', data=Dec2Angle(DecRaw))

    def decformat(val):
        return Angle(val, unit='degree').to_string(unit='degree', sep='.')

    DecCol.format = decformat
    DecIndx = table.keys().index('Dec')
    table.remove_column('Dec')
    table.add_column(DecCol, index=DecIndx)

    def fluxformat(val):
        return '{0:0.3f}'.format(val)

    table.columns['I'].format = fluxformat

    # Set column units and default values
    for i, colName in enumerate(colNames):
        log.debug("Setting units for column '{0}' to {1}".format(
            colName, allowedColumnUnits[colName.lower()]))
        table.columns[colName].unit = allowedColumnUnits[colName.lower()]

        if hasattr(table.columns[colName],
                   'filled') and colDefaults[i] is not None:
            fillVal = colDefaults[i]
            if colName == 'SpectralIndex':
                while len(fillVal) < maxLen:
                    fillVal.append(0.0)
            log.debug("Setting default value for column '{0}' to {1}".format(
                colName, fillVal))
            table.columns[colName].fill_value = fillVal
    table.meta = metaDict

    return table
Example #18
0
    def write(self, lines):
        """
        Writes the Header of the MRT table, aka ReadMe, which
        also contains the Byte-By-Byte description of the table.
        """
        from astropy.coordinates import SkyCoord

        # Recognised ``SkyCoord.name`` forms with their default column names (helio* require SunPy).
        coord_systems = {
            'galactic': ('GLAT', 'GLON', 'b', 'l'),
            'ecliptic':
            ('ELAT', 'ELON', 'lat', 'lon'),  # 'geocentric*ecliptic'
            'heliographic':
            ('HLAT', 'HLON', 'lat', 'lon'),  # '_carrington|stonyhurst'
            'helioprojective': ('HPLT', 'HPLN', 'Ty', 'Tx')
        }
        eqtnames = ['RAh', 'RAm', 'RAs', 'DEd', 'DEm', 'DEs']

        # list to store indices of columns that are modified.
        to_pop = []

        # For columns that are instances of ``SkyCoord`` and other ``mixin`` columns
        # or whose values are objects of these classes.
        for i, col in enumerate(self.cols):
            # If col is a ``Column`` object but its values are ``SkyCoord`` objects,
            # convert the whole column to ``SkyCoord`` object, which helps in applying
            # SkyCoord methods directly.
            if not isinstance(col, SkyCoord) and isinstance(col[0], SkyCoord):
                try:
                    col = SkyCoord(col)
                except (ValueError, TypeError):
                    # If only the first value of the column is a ``SkyCoord`` object,
                    # the column cannot be converted to a ``SkyCoord`` object.
                    # These columns are converted to ``Column`` object and then converted
                    # to string valued column.
                    if not isinstance(col, Column):
                        col = Column(col)
                    col = Column([str(val) for val in col])
                    self.cols[i] = col
                    continue

            # Replace single ``SkyCoord`` column by its coordinate components if no coordinate
            # columns of the correspoding type exist yet.
            if isinstance(col, SkyCoord):
                # If coordinates are given in RA/DEC, divide each them into hour/deg,
                # minute/arcminute, second/arcsecond columns.
                if ('ra' in col.representation_component_names.keys()
                        and len(set(eqtnames) - set(self.colnames)) == 6):
                    ra_c, dec_c = col.ra.hms, col.dec.dms
                    coords = [
                        ra_c.h.round().astype('i1'),
                        ra_c.m.round().astype('i1'), ra_c.s,
                        dec_c.d.round().astype('i1'),
                        dec_c.m.round().astype('i1'), dec_c.s
                    ]
                    coord_units = [
                        u.h, u.min, u.second, u.deg, u.arcmin, u.arcsec
                    ]
                    coord_descrip = [
                        'Right Ascension (hour)', 'Right Ascension (minute)',
                        'Right Ascension (second)', 'Declination (degree)',
                        'Declination (arcmin)', 'Declination (arcsec)'
                    ]
                    for coord, name, coord_unit, descrip in zip(
                            coords, eqtnames, coord_units, coord_descrip):
                        # Have Sign of Declination only in the DEd column.
                        if name in ['DEm', 'DEs']:
                            coord_col = Column(list(np.abs(coord)),
                                               name=name,
                                               unit=coord_unit,
                                               description=descrip)
                        else:
                            coord_col = Column(list(coord),
                                               name=name,
                                               unit=coord_unit,
                                               description=descrip)
                        # Set default number of digits after decimal point for the
                        # second values, and deg-min to (signed) 2-digit zero-padded integer.
                        if name == 'RAs':
                            coord_col.format = '013.10f'
                        elif name == 'DEs':
                            coord_col.format = '012.9f'
                        elif name == 'RAh':
                            coord_col.format = '2d'
                        elif name == 'DEd':
                            coord_col.format = '+03d'
                        elif name.startswith(('RA', 'DE')):
                            coord_col.format = '02d'
                        self.cols.append(coord_col)
                    to_pop.append(i)  # Delete original ``SkyCoord`` column.

                # For all other coordinate types, simply divide into two columns
                # for latitude and longitude resp. with the unit used been as it is.

                else:
                    frminfo = ''
                    for frame, latlon in coord_systems.items():
                        if frame in col.name and len(
                                set(latlon[:2]) - set(self.colnames)) == 2:
                            if frame != col.name:
                                frminfo = f' ({col.name})'
                            lon_col = Column(
                                getattr(col, latlon[3]),
                                name=latlon[1],
                                description=
                                f'{frame.capitalize()} Longitude{frminfo}',
                                unit=col.representation_component_units[
                                    latlon[3]],
                                format='.12f')
                            lat_col = Column(
                                getattr(col, latlon[2]),
                                name=latlon[0],
                                description=
                                f'{frame.capitalize()} Latitude{frminfo}',
                                unit=col.representation_component_units[
                                    latlon[2]],
                                format='+.12f')
                            self.cols.append(lon_col)
                            self.cols.append(lat_col)
                            to_pop.append(
                                i)  # Delete original ``SkyCoord`` column.

                # Convert all other ``SkyCoord`` columns that are not in the above three
                # representations to string valued columns. Those could either be types not
                # supported yet (e.g. 'helioprojective'), or already present and converted.
                # If there were any extra ``SkyCoord`` columns of one kind after the first one,
                # then their decomposition into their component columns has been skipped.
                # This is done in order to not create duplicate component columns.
                # Explicit renaming of the extra coordinate component columns by appending some
                # suffix to their name, so as to distinguish them, is not yet implemented.
                if i not in to_pop:
                    warnings.warn(
                        f"Coordinate system of type '{col.name}' already stored in table "
                        f"as CDS/MRT-syle columns or of unrecognized type. So column {i} "
                        f"is being skipped with designation of a string valued column "
                        f"`{self.colnames[i]}`.", UserWarning)
                    self.cols.append(
                        Column(col.to_string(), name=self.colnames[i]))
                    to_pop.append(i)  # Delete original ``SkyCoord`` column.

            # Convert all other ``mixin`` columns to ``Column`` objects.
            # Parsing these may still lead to errors!
            elif not isinstance(col, Column):
                col = Column(col)
                # If column values are ``object`` types, convert them to string.
                if np.issubdtype(col.dtype, np.dtype(object).type):
                    col = Column([str(val) for val in col])
                self.cols[i] = col

        # Delete original ``SkyCoord`` columns, if there were any.
        for i in to_pop[::-1]:
            self.cols.pop(i)

        # Check for any left over extra coordinate columns.
        if any(x in self.colnames for x in ['RAh', 'DEd', 'ELON', 'GLAT']):
            # At this point any extra ``SkyCoord`` columns should have been converted to string
            # valued columns, together with issuance of a warning, by the coordinate parser above.
            # This test is just left here as a safeguard.
            for i, col in enumerate(self.cols):
                if isinstance(col, SkyCoord):
                    self.cols[i] = Column(col.to_string(),
                                          name=self.colnames[i])
                    message = (
                        'Table already has coordinate system in CDS/MRT-syle columns. '
                        f'So column {i} should have been replaced already with '
                        f'a string valued column `{self.colnames[i]}`.')
                    raise core.InconsistentTableError(message)

        # Get Byte-By-Byte description and fill the template
        bbb_template = Template('\n'.join(BYTE_BY_BYTE_TEMPLATE))
        byte_by_byte = bbb_template.substitute({
            'file':
            'table.dat',
            'bytebybyte':
            self.write_byte_by_byte()
        })

        # Fill up the full ReadMe
        rm_template = Template('\n'.join(MRT_TEMPLATE))
        readme_filled = rm_template.substitute({'bytebybyte': byte_by_byte})
        lines.append(readme_filled)