Пример #1
0
def addImagingColumns(msname, ack=True):
    """ Add the columns to an MS needed for the casa imager.

    It adds the columns MODEL_DATA, CORRECTED_DATA, and IMAGING_WEIGHT.
    It also sets the CHANNEL_SELECTION keyword needed for the older casa
    imagers.

    A column is not added if already existing.
    """
    # numpy is needed
    import numpy as np
    # Open the MS
    t = table(msname, readonly=False, ack=False)
    cnames = t.colnames()
    # Get the description of the DATA column.
    try:
        cdesc = t.getcoldesc('DATA')
    except:
        raise ValueError('Column DATA does not exist')
    # Determine if the DATA storage specification is tiled.
    hasTiled = False
    try:
        dminfo = t.getdminfo("DATA")
        if dminfo['TYPE'][:5] == 'Tiled':
            hasTiled = True
    except:
        hasTiled = False
    # Use TiledShapeStMan if needed.
    if not hasTiled:
        dminfo = {'TYPE': 'TiledShapeStMan',
                  'SPEC': {'DEFAULTTILESHAPE': [4, 32, 128]}}
    # Add the columns(if not existing). Use the description of the DATA column.
    if 'MODEL_DATA' in cnames:
        six.print_("Column MODEL_DATA not added; it already exists")
    else:
        dminfo['NAME'] = 'modeldata'
        cdesc['comment'] = 'The model data column'
        t.addcols(maketabdesc(makecoldesc('MODEL_DATA', cdesc)), dminfo)
        if ack:
            six.print_("added column MODEL_DATA")
    if 'CORRECTED_DATA' in cnames:
        six.print_("Column CORRECTED_DATA not added; it already exists")
    else:
        dminfo['NAME'] = 'correcteddata'
        cdesc['comment'] = 'The corrected data column'
        t.addcols(maketabdesc(makecoldesc('CORRECTED_DATA', cdesc)), dminfo)
        if ack:
            six.print_("'added column CORRECTED_DATA")
    if 'IMAGING_WEIGHT' in cnames:
        six.print_("Column IMAGING_WEIGHT not added; it already exists")
    else:
        # Add IMAGING_WEIGHT which is 1-dim and has type float.
        # It needs a shape, otherwise the CASA imager complains.
        shp = []
        if 'shape' in cdesc:
            shp = cdesc['shape']
        if len(shp) > 0:
            shp = [shp[0]]  # use nchan from shape
        else:
            shp = [t.getcell('DATA', 0).shape[0]]  # use nchan from actual data
        cd = makearrcoldesc('IMAGING_WEIGHT', 0, ndim=1, shape=shp,
                            valuetype='float')
        dminfo = {'TYPE': 'TiledShapeStMan',
                  'SPEC': {'DEFAULTTILESHAPE': [32, 128]}}
        dminfo['NAME'] = 'imagingweight'
        t.addcols(maketabdesc(cd), dminfo)
        if ack:
            six.print_("added column IMAGING_WEIGHT")
    # Add or overwrite keyword CHANNEL_SELECTION.
    if 'CHANNEL_SELECTION' in t.colkeywordnames('MODEL_DATA'):
        t.removecolkeyword('MODEL_DATA', 'CHANNEL_SELECTION')
    # Define the CHANNEL_SELECTION keyword containing the channels of
    # all spectral windows.
    tspw = table(t.getkeyword('SPECTRAL_WINDOW'), ack=False)
    nchans = tspw.getcol('NUM_CHAN')
    chans = [[0, nch] for nch in nchans]
    t.putcolkeyword('MODEL_DATA', 'CHANNEL_SELECTION', np.int32(chans))
    if ack:
        six.print_("defined keyword CHANNEL_SELECTION in column MODEL_DATA")
    # Flush the table to make sure it is written.
    t.flush()
Пример #2
0
def addImagingColumns(msname, ack=True):
    """ Add the columns to an MS needed for the casa imager.

    It adds the columns MODEL_DATA, CORRECTED_DATA, and IMAGING_WEIGHT.
    It also sets the CHANNEL_SELECTION keyword needed for the older casa
    imagers.

    A column is not added if already existing.
    """
    # numpy is needed
    import numpy as np
    # Open the MS
    t = table(msname, readonly=False, ack=False)
    cnames = t.colnames()
    # Get the description of the DATA column.
    try:
        cdesc = t.getcoldesc('DATA')
    except:
        raise ValueError('Column DATA does not exist')
    # Determine if the DATA storage specification is tiled.
    hasTiled = False
    try:
        dminfo = t.getdminfo("DATA")
        if dminfo['TYPE'][:5] == 'Tiled':
            hasTiled = True
    except:
        hasTiled = False
    # Use TiledShapeStMan if needed.
    if not hasTiled:
        dminfo = {'TYPE': 'TiledShapeStMan',
                  'SPEC': {'DEFAULTTILESHAPE': [4, 32, 128]}}
    # Add the columns(if not existing). Use the description of the DATA column.
    if 'MODEL_DATA' in cnames:
        six.print_("Column MODEL_DATA not added; it already exists")
    else:
        dminfo['NAME'] = 'modeldata'
        cdesc['comment'] = 'The model data column'
        t.addcols(maketabdesc(makecoldesc('MODEL_DATA', cdesc)), dminfo)
        if ack:
            six.print_("added column MODEL_DATA")
    if 'CORRECTED_DATA' in cnames:
        six.print_("Column CORRECTED_DATA not added; it already exists")
    else:
        dminfo['NAME'] = 'correcteddata'
        cdesc['comment'] = 'The corrected data column'
        t.addcols(maketabdesc(makecoldesc('CORRECTED_DATA', cdesc)), dminfo)
        if ack:
            six.print_("'added column CORRECTED_DATA")
    if 'IMAGING_WEIGHT' in cnames:
        six.print_("Column IMAGING_WEIGHT not added; it already exists")
    else:
        # Add IMAGING_WEIGHT which is 1-dim and has type float.
        # It needs a shape, otherwise the CASA imager complains.
        shp = []
        if 'shape' in cdesc:
            shp = cdesc['shape']
        if len(shp) > 0:
            shp = [shp[0]]  # use nchan from shape
        else:
            shp = [t.getcell('DATA', 0).shape[0]]  # use nchan from actual data
        cd = makearrcoldesc('IMAGING_WEIGHT', 0, ndim=1, shape=shp,
                            valuetype='float')
        dminfo = {'TYPE': 'TiledShapeStMan',
                  'SPEC': {'DEFAULTTILESHAPE': [32, 128]}}
        dminfo['NAME'] = 'imagingweight'
        t.addcols(maketabdesc(cd), dminfo)
        if ack:
            six.print_("added column IMAGING_WEIGHT")
    # Add or overwrite keyword CHANNEL_SELECTION.
    if 'CHANNEL_SELECTION' in t.colkeywordnames('MODEL_DATA'):
        t.removecolkeyword('MODEL_DATA', 'CHANNEL_SELECTION')
    # Define the CHANNEL_SELECTION keyword containing the channels of
    # all spectral windows.
    tspw = table(t.getkeyword('SPECTRAL_WINDOW'), ack=False)
    nchans = tspw.getcol('NUM_CHAN')
    chans = [[0, nch] for nch in nchans]
    t.putcolkeyword('MODEL_DATA', 'CHANNEL_SELECTION', np.int32(chans))
    if ack:
        six.print_("defined keyword CHANNEL_SELECTION in column MODEL_DATA")
    # Flush the table to make sure it is written.
    t.flush()
Пример #3
0
def msconcat(names, newname, concatTime=False):
    """Virtually concatenate multiple MeasurementSets.

    Multiple MeasurementSets are concatenated into a single MeasurementSet.
    The concatenation is done in an entirely or almost entirely virtual way,
    so hardly any data are copied. It makes the command very fast and hardly
    any extra disk space is needed.

    The MSs can be concatenated in time or frequency (spectral windows).
    If concatenated in time, no indices need to be updated and the
    concatenation is done in a single step.

    If spectral windows are concatenated, the data-description-ids and
    spectral-window-ids in the resulting MS and its subtables are updated
    to make them unique.
    The spectral concatenation is done in two steps and results in two MSs:

    1. The input MSs are virtually concatenated resulting in the
       MeasurementSet `<newname>_CONCAT`.
    2. The MeasurementSet <newname> is created. It references all columns
       in `<newname>_CONCAT` with the exception of the DATA_DESC_ID column.
       This column is copied and updated to make the ids correct.
       Furthermore the MS contains a copy of all subtables (with the exception
       of SORTED_TABLE), where the DATA_DESCRIPTION and SPECTRAL_WINDOW
       subtables are the concatenation of those subtables in the input MSs.
       The ids in the resulting subtables are updated.

    The FEED, FREQ_OFFSET, SOURCE, and SYSCAL subtables also have a
    SPECTRAL_WINDOW_ID column. Currently these subtables are not concatenated
    nor are their ids updated.

    `names`
      A sequence containing the names of the MeasurementSets to concatenate.
    `newname`
      The name of the resulting MeasurementSet. A MeasurementSet with this
      name followed by `_CONCAT` will also be created (and must be kept).
    `concatTime`
      False means that the spectral windows ids will be adjusted as explained
      above.

    """

    if len(names) == 0:
        raise ValueError('No input MSs given')
    # Concatenation in time is straightforward.
    if concatTime:
        t = table(names[0])
        if 'SYSCAL' in t.fieldnames():
            tn = table(names, concatsubtables='SYSCAL')
        else:
            tn = table(names)
        t.close()
        tn.rename(newname)
        return
    # First concatenate the given tables as another table.
    # The SPECTRAL_WINDOW and DATA_DESCRIPTION subtables are concatenated
    # and changed later.
    # Those subtables cannot be concatenated here, because the deep copy of
    # them fails due to the rename of the main table.
    tn = table(names)
    tdesc = tn.getdesc()
    tn.rename(newname + '_CONCAT')
    tn.flush()
    # Now create a table where all columns forward to the concatenated table,
    # but create a stored column for the data description id, because it has
    # to be changed.
    # The new column is filled at the end.
    tnew = table(newname, tdesc, nrow=tn.nrows(), dminfo={
        '1': {'TYPE': 'ForwardColumnEngine',
              'NAME': 'ForwardData',
              'COLUMNS': tn.colnames(),
              'SPEC': {'FORWARDTABLE': tn.name()}}})
    # Remove the DATA_DESC_ID column and recreate it in a stored way.
    tnew.removecols('DATA_DESC_ID')
    tnew.addcols(makecoldesc('DATA_DESC_ID', tdesc['DATA_DESC_ID']),
                 dminfo={'TYPE': 'IncrementalStMan',
                         'NAME': 'DDID',
                         'SPEC': {}})
    # Copy the table keywords.
    keywords = tn.getkeywords()
    tnew.putkeywords(keywords)
    # Copy all column keywords.
    for col in tn.colnames():
        tnew.putcolkeywords(col, tn.getcolkeywords(col))
    # Make a deep copy of all subtables (except SORTED_TABLE).
    for key in keywords:
        if key != 'SORTED_TABLE':
            val = keywords[key]
            if isinstance(val, str):
                tsub = table(val, ack=False)
                tsubn = tsub.copy(newname + '/' + key, deep=True)
                tnew.putkeyword(key, tsubn)
    tnew.flush()
    # Now we have to take care that the subbands are numbered correctly.
    # The DATA_DESCRIPTION and SPECTRAL_WINDOW subtables are concatenated.
    # The ddid in the main table and spwid in DD subtable have to be updated.
    tnewdd = table(tnew.getkeyword('DATA_DESCRIPTION'),
                   readonly=False, ack=False)
    tnewspw = table(tnew.getkeyword('SPECTRAL_WINDOW'),
                    readonly=False, ack=False)
    nrdd = 0
    nrspw = 0
    nrmain = 0
    for name in names:
        t = table(name, ack=False)
        tdd = table(t.getkeyword('DATA_DESCRIPTION'), ack=False)
        tspw = table(t.getkeyword('SPECTRAL_WINDOW'), ack=False)
        # The first table already has its subtable copied.
        # Append the subtables of the other ones.
        if nrdd > 0:
            tnewdd.addrows(tdd.nrows())
            for i in range(tdd.nrows()):
                tnewdd[nrdd + i] = tdd[i]  # copy row i
            tnewspw.addrows(tspw.nrows())
            for i in range(tspw.nrows()):
                tnewspw[nrspw + i] = tspw[i]
        tnewdd.putcol('SPECTRAL_WINDOW_ID',
                      tdd.getcol('SPECTRAL_WINDOW_ID') + nrspw,
                      nrdd, tdd.nrows())
        tnew.putcol('DATA_DESC_ID',
                    t.getcol('DATA_DESC_ID') + nrdd,
                    nrmain, t.nrows())
        nrdd += tdd.nrows()
        nrspw += tspw.nrows()
        nrmain += t.nrows()
    # Overwrite keyword CHANNEL_SELECTION.
    if 'MODEL_DATA' in tnew.colnames():
        if 'CHANNEL_SELECTION' in tnew.colkeywordnames('MODEL_DATA'):
            tnew.removecolkeyword('MODEL_DATA', 'CHANNEL_SELECTION')
            # Define the CHANNEL_SELECTION keyword containing the channels of
            # all spectral windows.
            tspw = table(tnew.getkeyword('SPECTRAL_WINDOW'), ack=False)
            nchans = tspw.getcol('NUM_CHAN')
            chans = [[0, nch] for nch in nchans]
            tnew.putcolkeyword('MODEL_DATA', 'CHANNEL_SELECTION',
                               np.int32(chans))
    # Future work:
    #   If SOURCE subtables have to concatenated, the FIELD and DOPPLER
    #   have to be dealt with as well.
    #   The FEED table can be concatenated; the FEED_ID can stay the same,
    #   but spwid has to be updated.
    #   The FREQ_OFFSET table is stand-alone, thus can simply be concatenated
    #   and have spwid updated.
    #   The SYSCAL table can be very large, so it might be better to virtually
    #   concatenate it instead of making a copy (just like the main table).
    # Flush the table and subtables.
    tnew.flush(True)
Пример #4
0
def msconcat(names, newname, concatTime=False):
    """Virtually concatenate multiple MeasurementSets.

    Multiple MeasurementSets are concatenated into a single MeasurementSet.
    The concatenation is done in an entirely or almost entirely virtual way,
    so hardly any data are copied. It makes the command very fast and hardly
    any extra disk space is needed.

    The MSs can be concatenated in time or frequency (spectral windows).
    If concatenated in time, no indices need to be updated and the
    concatenation is done in a single step.

    If spectral windows are concatenated, the data-description-ids and
    spectral-window-ids in the resulting MS and its subtables are updated
    to make them unique.
    The spectral concatenation is done in two steps and results in two MSs:

    1. The input MSs are virtually concatenated resulting in the
       MeasurementSet `<newname>_CONCAT`.
    2. The MeasurementSet <newname> is created. It references all columns
       in `<newname>_CONCAT` with the exception of the DATA_DESC_ID column.
       This column is copied and updated to make the ids correct.
       Furthermore the MS contains a copy of all subtables (with the exception
       of SORTED_TABLE), where the DATA_DESCRIPTION and SPECTRAL_WINDOW
       subtables are the concatenation of those subtables in the input MSs.
       The ids in the resulting subtables are updated.

    The FEED, FREQ_OFFSET, SOURCE, and SYSCAL subtables also have a
    SPECTRAL_WINDOW_ID column. Currently these subtables are not concatenated
    nor are their ids updated.

    `names`
      A sequence containing the names of the MeasurementSets to concatenate.
    `newname`
      The name of the resulting MeasurementSet. A MeasurementSet with this
      name followed by `_CONCAT` will also be created (and must be kept).
    `concatTime`
      False means that the spectral windows ids will be adjusted as explained
      above.

    """

    if len(names) == 0:
        raise ValueError('No input MSs given')
    # Concatenation in time is straightforward.
    if concatTime:
        t = table(names[0])
        if 'SYSCAL' in t.fieldnames():
            tn = table(names, concatsubtables='SYSCAL')
        else:
            tn = table(names)
        t.close()
        tn.rename(newname)
        return
    # First concatenate the given tables as another table.
    # The SPECTRAL_WINDOW and DATA_DESCRIPTION subtables are concatenated
    # and changed later.
    # Those subtables cannot be concatenated here, because the deep copy of
    # them fails due to the rename of the main table.
    tn = table(names)
    tdesc = tn.getdesc()
    tn.rename(newname + '_CONCAT')
    tn.flush()
    # Now create a table where all columns forward to the concatenated table,
    # but create a stored column for the data description id, because it has
    # to be changed.
    # The new column is filled at the end.
    tnew = table(newname, tdesc, nrow=tn.nrows(), dminfo={
        '1': {'TYPE': 'ForwardColumnEngine',
              'NAME': 'ForwardData',
              'COLUMNS': tn.colnames(),
              'SPEC': {'FORWARDTABLE': tn.name()}}})
    # Remove the DATA_DESC_ID column and recreate it in a stored way.
    tnew.removecols('DATA_DESC_ID')
    tnew.addcols(makecoldesc('DATA_DESC_ID', tdesc['DATA_DESC_ID']),
                 dminfo={'TYPE': 'IncrementalStMan',
                         'NAME': 'DDID',
                         'SPEC': {}})
    # Copy the table keywords.
    keywords = tn.getkeywords()
    tnew.putkeywords(keywords)
    # Copy all column keywords.
    for col in tn.colnames():
        tnew.putcolkeywords(col, tn.getcolkeywords(col))
    # Make a deep copy of all subtables (except SORTED_TABLE).
    for key in keywords:
        if key != 'SORTED_TABLE':
            val = keywords[key]
            if isinstance(val, string_types):
                tsub = table(val, ack=False)
                tsubn = tsub.copy(newname + '/' + key, deep=True)
                tnew.putkeyword(key, tsubn)
    tnew.flush()
    # Now we have to take care that the subbands are numbered correctly.
    # The DATA_DESCRIPTION and SPECTRAL_WINDOW subtables are concatenated.
    # The ddid in the main table and spwid in DD subtable have to be updated.
    tnewdd = table(tnew.getkeyword('DATA_DESCRIPTION'),
                   readonly=False, ack=False)
    tnewspw = table(tnew.getkeyword('SPECTRAL_WINDOW'),
                    readonly=False, ack=False)
    nrdd = 0
    nrspw = 0
    nrmain = 0
    for name in names:
        t = table(name, ack=False)
        tdd = table(t.getkeyword('DATA_DESCRIPTION'), ack=False)
        tspw = table(t.getkeyword('SPECTRAL_WINDOW'), ack=False)
        # The first table already has its subtable copied.
        # Append the subtables of the other ones.
        if nrdd > 0:
            tnewdd.addrows(tdd.nrows())
            for i in range(tdd.nrows()):
                tnewdd[nrdd + i] = tdd[i]  # copy row i
            tnewspw.addrows(tspw.nrows())
            for i in range(tspw.nrows()):
                tnewspw[nrspw + i] = tspw[i]
        tnewdd.putcol('SPECTRAL_WINDOW_ID',
                      tdd.getcol('SPECTRAL_WINDOW_ID') + nrspw,
                      nrdd, tdd.nrows())
        tnew.putcol('DATA_DESC_ID',
                    t.getcol('DATA_DESC_ID') + nrdd,
                    nrmain, t.nrows())
        nrdd += tdd.nrows()
        nrspw += tspw.nrows()
        nrmain += t.nrows()
    # Overwrite keyword CHANNEL_SELECTION.
    if 'MODEL_DATA' in tnew.colnames():
        if 'CHANNEL_SELECTION' in tnew.colkeywordnames('MODEL_DATA'):
            tnew.removecolkeyword('MODEL_DATA', 'CHANNEL_SELECTION')
            # Define the CHANNEL_SELECTION keyword containing the channels of
            # all spectral windows.
            tspw = table(tnew.getkeyword('SPECTRAL_WINDOW'), ack=False)
            nchans = tspw.getcol('NUM_CHAN')
            chans = [[0, nch] for nch in nchans]
            tnew.putcolkeyword('MODEL_DATA', 'CHANNEL_SELECTION',
                               np.int32(chans))
    # Future work:
    #   If SOURCE subtables have to concatenated, the FIELD and DOPPLER
    #   have to be dealt with as well.
    #   The FEED table can be concatenated; the FEED_ID can stay the same,
    #   but spwid has to be updated.
    #   The FREQ_OFFSET table is stand-alone, thus can simply be concatenated
    #   and have spwid updated.
    #   The SYSCAL table can be very large, so it might be better to virtually
    #   concatenate it instead of making a copy (just like the main table).
    # Flush the table and subtables.
    tnew.flush(True)