示例#1
0
    def copy_data(self,inms):
		
			# Create corrected data colun and Put data to corrected data column 
			t = pt.table(inms, readonly=False, ack=True)
			data = t.getcol('DATA')
			pt.addImagingColumns(inms, ack=True)
			t.putcol('CORRECTED_DATA', data)
			t.close()	
示例#2
0
    def copy_data_invert(self,inms):
		
			## Create corrected data colun and Put data to corrected data column 
			t = pt.table(inms, readonly=False, ack=True)
			data = t.getcol('CORRECTED_DATA')
			pt.addImagingColumns(inms, ack=True)
			t.putcol('DATA', data)
			t.close()	
示例#3
0
def check_imaging_weight(mslist_name):

    # returns a boolean that says whether it did something
    result = False
    report('Checking for IMAGING_WEIGHT in input MSS')
    mslist = [s.strip() for s in open(mslist_name).readlines()]
    for ms in mslist:
        t = pt.table(ms)
        try:
            dummy = t.getcoldesc('IMAGING_WEIGHT')
        except RuntimeError:
            dummy = None
        t.close()
        if dummy is not None:
            warn('Table ' + ms + ' already has imaging weights')
        else:
            pt.addImagingColumns(ms)
            result = True
    return result
示例#4
0
文件: ms.py 项目: AlanLoh/nenums
def addCorrected(msname):
    """ Copy DATA to CORRECTED_DATA column.

        Parameters
        ----------
        * **msname** : str
            Name of the Measurement Set
    """
    isMS(msname)

    mstable = table(msname, readonly=False)
    data = mstable.getcol('DATA')
    addImagingColumns(msname)
    mstable.putcol('CORRECTED_DATA', data)
    mstable.flush()
    mstable.close()

    updateHist(msname=msname,
               message='CORRECTED data collumn copied from DATA')
    return
示例#5
0
#!/usr/bin/env python

# Script that adds imaging columns to a MS
#
# File:        addImagingColumns.py
# Author:      Sven Duscha ([email protected])
# Date:        2011-02-14
# Last change: 2011-10-29

import sys
try:
  import pyrap.tables as pt
except ImportError:
  print "addImagingColumns.py: could not import pyrap.tables"
  print "WARN: No imaging columns added"
  sys.exit(1)

if len(sys.argv)> 2:
  print "addImagingColumns.py: Too many arguments"
  sys.exit(1)
elif len(sys.argv)==1:
  print "addImagingColumns.py: No MS given"
  sys.exit(1)
else:
  filename=sys.argv[1]                # MS filename is by default first sys.argv
  pt.addImagingColumns(filename)      # Add imaging columns
  sys.exit(0)
示例#6
0
    def run(self, environment, parset, working_dir, processed_ms_dir,
            ndppp_executable, output_measurement_set, time_slices_per_image,
            subbands_per_group, raw_ms_mapfile, asciistat_executable,
            statplot_executable, msselect_executable, rficonsole_executable,
            add_beam_tables):
        """
        Entry point for the node recipe
        """
        self.environment.update(environment)
        with log_time(self.logger):
            input_map = DataMap.load(raw_ms_mapfile)

            #******************************************************************
            # I. Create the directories used in this recipe
            create_directory(processed_ms_dir)

            # time slice dir_to_remove: assure empty directory: Stale data
            # is problematic for dppp
            time_slice_dir = os.path.join(working_dir, _time_slice_dir_name)
            create_directory(time_slice_dir)
            for root, dirs, files in os.walk(time_slice_dir):
                for file_to_remove in files:
                    os.unlink(os.path.join(root, file_to_remove))
                for dir_to_remove in dirs:
                    shutil.rmtree(os.path.join(root, dir_to_remove))
            self.logger.debug("Created directory: {0}".format(time_slice_dir))
            self.logger.debug("and assured it is empty")

            #******************************************************************
            # 1. Copy the input files
            copied_ms_map = self._copy_input_files(processed_ms_dir, input_map)

            #******************************************************************
            # 2. run dppp: collect frequencies into larger group
            time_slices_path_list = \
                self._run_dppp(working_dir, time_slice_dir,
                    time_slices_per_image, copied_ms_map, subbands_per_group,
                    processed_ms_dir, parset, ndppp_executable)

            # If no timeslices were created, bail out with exit status 1
            if len(time_slices_path_list) == 0:
                self.logger.error("No timeslices were created.")
                self.logger.error("Exiting with error state 1")
                return 1

            self.logger.debug(
                "Produced time slices: {0}".format(time_slices_path_list))
            #***********************************************************
            # 3. run rfi_concole: flag datapoints which are corrupted
            self._run_rficonsole(rficonsole_executable, time_slice_dir,
                                 time_slices_path_list)

            #******************************************************************
            # 4. Add imaging columns to each timeslice
            # ndppp_executable fails if not present
            for time_slice_path in time_slices_path_list:
                pt.addImagingColumns(time_slice_path)
                self.logger.debug(
                    "Added imaging columns to time_slice: {0}".format(
                        time_slice_path))

            #*****************************************************************
            # 5. Filter bad stations
            time_slice_filtered_path_list = self._filter_bad_stations(
                time_slices_path_list, asciistat_executable,
                statplot_executable, msselect_executable)

            #*****************************************************************
            # Add measurmenttables
            if add_beam_tables:
                self.add_beam_tables(time_slice_filtered_path_list)

            #******************************************************************
            # 6. Perform the (virtual) concatenation of the timeslices
            self._concat_timeslices(time_slice_filtered_path_list,
                                    output_measurement_set)

            #******************************************************************
            # return
            self.outputs["time_slices"] = \
                time_slices_path_list

        return 0
#!/usr/bin/env python

# Script that adds imaging columns to a MS
#
# File:        addImagingColumns.py
# Author:      Sven Duscha ([email protected])
# Date:        2011-02-14
# Last change: 2011-10-29

import sys
try:
    import pyrap.tables as pt
except ImportError:
    print "addImagingColumns.py: could not import pyrap.tables"
    print "WARN: No imaging columns added"
    sys.exit(1)

if len(sys.argv) > 2:
    print "addImagingColumns.py: Too many arguments"
    sys.exit(1)
elif len(sys.argv) == 1:
    print "addImagingColumns.py: No MS given"
    sys.exit(1)
else:
    filename = sys.argv[1]  # MS filename is by default first sys.argv
    pt.addImagingColumns(filename)  # Add imaging columns
    sys.exit(0)
示例#8
0
    def run(self, environment, parset, working_dir, processed_ms_dir,
            ndppp_executable, output_measurement_set, subbandgroups_per_ms,
            subbands_per_subbandgroup, ms_mapfile, asciistat_executable,
            statplot_executable, msselect_executable, rficonsole_executable,
            add_beam_tables, globalfs, final_output_path):
        """
        Entry point for the node recipe
        """
        self.environment.update(environment)
        self.globalfs = globalfs

        with log_time(self.logger):
            input_map = DataMap.load(ms_mapfile)
            #******************************************************************
            # I. Create the directories used in this recipe
            create_directory(processed_ms_dir)
            create_directory(working_dir)

            # time slice dir_to_remove: assure empty directory: Stale data
            # is problematic for dppp
            time_slice_dir = os.path.join(working_dir, _time_slice_dir_name)
            create_directory(time_slice_dir)
            for root, dirs, files in os.walk(time_slice_dir):
                for file_to_remove in files:
                    os.unlink(os.path.join(root, file_to_remove))
                for dir_to_remove in dirs:
                    shutil.rmtree(os.path.join(root, dir_to_remove))
            self.logger.debug("Created directory: {0}".format(time_slice_dir))
            self.logger.debug("and assured it is empty")

            #******************************************************************
            # 1. Copy the input files
            processed_ms_map = self._copy_input_files(processed_ms_dir,
                                                      input_map)

            #******************************************************************
            # 2. run dppp: collect frequencies into larger group
            time_slices_path_list = \
                self._run_dppp(working_dir, time_slice_dir,
                    subbandgroups_per_ms, processed_ms_map, subbands_per_subbandgroup,
                    processed_ms_dir, parset, ndppp_executable)

            # If no timeslices were created, bail out with exit status 1
            if len(time_slices_path_list) == 0:
                self.logger.error("No timeslices were created.")
                self.logger.error("Exiting with error state 1")
                return 1

            self.logger.debug(
                "Produced time slices: {0}".format(time_slices_path_list))

            #***********************************************************
            # 3. run rfi_concole: flag datapoints which are corrupted
            if False:
                self._run_rficonsole(rficonsole_executable, time_slice_dir,
                                     time_slices_path_list)

            #******************************************************************
            # 4. Add imaging columns to each timeslice
            # ndppp_executable fails if not present
            for time_slice_path in time_slices_path_list:
                pt.addImagingColumns(time_slice_path)
                self.logger.debug(
                    "Added imaging columns to time_slice: {0}".format(
                        time_slice_path))

            #*****************************************************************
            # 5. Filter bad stations
            #if not(asciistat_executable == "" or
            #     statplot_executable == "" or
            #     msselect_executable == "" or True):
            if False:
                time_slice_filtered_path_list = self._filter_bad_stations(
                    time_slices_path_list, asciistat_executable,
                    statplot_executable, msselect_executable)
            else:
                # use the unfiltered list
                time_slice_filtered_path_list = time_slices_path_list

            #*****************************************************************
            # 6. Add measurmenttables
            if add_beam_tables:
                self.add_beam_tables(time_slice_filtered_path_list)

            #******************************************************************
            # 7. Perform Convert polarization:
            self._convert_polarization(time_slice_filtered_path_list)

            #******************************************************************
            # 8. Perform the (virtual) concatenation of the timeslices
            self._concat_timeslices(time_slice_filtered_path_list,
                                    output_measurement_set)

            #*****************************************************************
            # 9. Use table.copy(deep=true) to copy the ms to the correct
            # output location: create a new measurement set.
            self._deep_copy_to_output_location(output_measurement_set,
                                               final_output_path)

            # Write the actually used ms for the created dataset to the input
            # mapfile
            processed_ms_map.save(ms_mapfile)

            #******************************************************************
            # return
            self.outputs["time_slices"] = \
                time_slices_path_list

        return 0
示例#9
0
    def run(self, environment, parset, working_dir, processed_ms_dir,
             ndppp_executable, output_measurement_set,
            subbandgroups_per_ms, subbands_per_subbandgroup, ms_mapfile,
            asciistat_executable, statplot_executable, msselect_executable,
            rficonsole_executable, add_beam_tables, globalfs, final_output_path):
        """
        Entry point for the node recipe
        """
        self.environment.update(environment)
        self.globalfs = globalfs

        with log_time(self.logger):
            input_map = DataMap.load(ms_mapfile)
            #******************************************************************
            # I. Create the directories used in this recipe
            create_directory(processed_ms_dir)
            create_directory(working_dir)
            create_directory(os.path.dirname(output_measurement_set))
            create_directory(os.path.dirname(final_output_path))

            # time slice dir_to_remove: assure empty directory: Stale data
            # is problematic for dppp
            time_slice_dir = os.path.join(working_dir, _time_slice_dir_name)
            create_directory(time_slice_dir)
            for root, dirs, files in os.walk(time_slice_dir):
                for file_to_remove in files:
                    os.unlink(os.path.join(root, file_to_remove))
                for dir_to_remove in dirs:
                    shutil.rmtree(os.path.join(root, dir_to_remove))
            self.logger.debug("Created directory: {0}".format(time_slice_dir))
            self.logger.debug("and assured it is empty")

            #******************************************************************
            # 1. Copy the input files
            processed_ms_map = self._copy_input_files(
                            processed_ms_dir, input_map)

            #******************************************************************
            # 2. run dppp: collect frequencies into larger group
            time_slices_path_list = \
                self._run_dppp(working_dir, time_slice_dir,
                    subbandgroups_per_ms, processed_ms_map, subbands_per_subbandgroup,
                    processed_ms_dir, parset, ndppp_executable)

            # If no timeslices were created, bail out with exit status 1
            if len(time_slices_path_list) == 0:
                self.logger.error("No timeslices were created.")
                self.logger.error("Exiting with error state 1")
                return 1

            self.logger.debug(
                    "Produced time slices: {0}".format(time_slices_path_list))

            #***********************************************************
            # 3. run rfi_concole: flag datapoints which are corrupted
            if False:
                self._run_rficonsole(rficonsole_executable, time_slice_dir,
                                     time_slices_path_list)

            #******************************************************************
            # 4. Add imaging columns to each timeslice
            # ndppp_executable fails if not present
            for time_slice_path in time_slices_path_list:
                pt.addImagingColumns(time_slice_path)
                self.logger.debug(
                "Added imaging columns to time_slice: {0}".format(
                                                            time_slice_path))

            #*****************************************************************
            # 5. Filter bad stations
            #if not(asciistat_executable == "" or
            #     statplot_executable == "" or
            #     msselect_executable == "" or True):
            if False:
                time_slice_filtered_path_list = self._filter_bad_stations(
                    time_slices_path_list, asciistat_executable,
                    statplot_executable, msselect_executable)
            else:
                # use the unfiltered list
                time_slice_filtered_path_list = time_slices_path_list

            #*****************************************************************
            # 6. Add measurmenttables
            if add_beam_tables:
                self.add_beam_tables(time_slice_filtered_path_list)

            #******************************************************************
            # 7. Perform Convert polarization:
            self._convert_polarization(time_slice_filtered_path_list)

            #******************************************************************
            # 8. Perform the (virtual) concatenation of the timeslices
            self._concat_timeslices(time_slice_filtered_path_list,
                                    output_measurement_set)

            #*****************************************************************
            # 9. Use table.copy(deep=true) to copy the ms to the correct
            # output location: create a new measurement set.
            self._deep_copy_to_output_location(output_measurement_set,
                                               final_output_path)

            # Write the actually used ms for the created dataset to the input 
            # mapfile
            processed_ms_map.save(ms_mapfile)



            #******************************************************************
            # return
            self.outputs["time_slices"] = \
                time_slices_path_list

        return 0
示例#10
0
    def run(self, environment, parset, working_dir, processed_ms_dir,
             ndppp_executable, output_measurement_set,
            time_slices_per_image, subbands_per_group, raw_ms_mapfile,
            asciistat_executable, statplot_executable, msselect_executable,
            rficonsole_executable, add_beam_tables):
        """
        Entry point for the node recipe
        """
        self.environment.update(environment)
        with log_time(self.logger):
            input_map = DataMap.load(raw_ms_mapfile)

            #******************************************************************
            # I. Create the directories used in this recipe
            create_directory(processed_ms_dir)

            # time slice dir_to_remove: assure empty directory: Stale data
            # is problematic for dppp
            time_slice_dir = os.path.join(working_dir, _time_slice_dir_name)
            create_directory(time_slice_dir)
            for root, dirs, files in os.walk(time_slice_dir):
                for file_to_remove in files:
                    os.unlink(os.path.join(root, file_to_remove))
                for dir_to_remove in dirs:
                    shutil.rmtree(os.path.join(root, dir_to_remove))
            self.logger.debug("Created directory: {0}".format(time_slice_dir))
            self.logger.debug("and assured it is empty")

            #******************************************************************
            # 1. Copy the input files
            copied_ms_map = self._copy_input_files(
                            processed_ms_dir, input_map)

            #******************************************************************
            # 2. run dppp: collect frequencies into larger group
            time_slices_path_list = \
                self._run_dppp(working_dir, time_slice_dir,
                    time_slices_per_image, copied_ms_map, subbands_per_group,
                    processed_ms_dir, parset, ndppp_executable)

            # If no timeslices were created, bail out with exit status 1
            if len(time_slices_path_list) == 0:
                self.logger.error("No timeslices were created.")
                self.logger.error("Exiting with error state 1")
                return 1

            self.logger.debug(
                    "Produced time slices: {0}".format(time_slices_path_list))
            #***********************************************************
            # 3. run rfi_concole: flag datapoints which are corrupted
            self._run_rficonsole(rficonsole_executable, time_slice_dir,
                                 time_slices_path_list)

            #******************************************************************
            # 4. Add imaging columns to each timeslice
            # ndppp_executable fails if not present
            for time_slice_path in time_slices_path_list:
                pt.addImagingColumns(time_slice_path)
                self.logger.debug(
                "Added imaging columns to time_slice: {0}".format(
                                                            time_slice_path))

            #*****************************************************************
            # 5. Filter bad stations
            time_slice_filtered_path_list = self._filter_bad_stations(
                time_slices_path_list, asciistat_executable,
                statplot_executable, msselect_executable)

            #*****************************************************************
            # Add measurmenttables
            if add_beam_tables:
                self.add_beam_tables(time_slice_filtered_path_list)

            #******************************************************************
            # 6. Perform the (virtual) concatenation of the timeslices
            self._concat_timeslices(time_slice_filtered_path_list,
                                    output_measurement_set)

            #******************************************************************
            # return
            self.outputs["time_slices"] = \
                time_slices_path_list

        return 0
示例#11
0
        for bandi in range(allms.shape[1]):

            mergems = '{outmsroot}_BAND{ii:03d}.ms'.format(ii=bandi,
                                                           outmsroot=outmsroot)

            if os.path.isdir(mergems):
                if args.noclobber == False:
                    print('removing existing ms: ' + mergems)
                    os.system('rm -rf ' + mergems)
                else:
                    print('ms ' + mergems +
                          ' exists and noclobber is set True')
                    sys.exit()

            print('concatenating in time')
            print(mergems + ': ' + ','.join(allms[:, bandi]))
            if not args.dryrun:
                t = pt.table(allms[:, bandi])
                t.sort('TIME,ANTENNA1,ANTENNA2').copy(mergems, deep=True)

                print('concatenating done')

                print('add imaging columns')
                pt.addImagingColumns(mergems)

                # remove the intermediate files unless asked to keep them
                if not args.keepfiles:
                    for ms in allms[:, bandi]:
                        print('removing ' + ms)
                        os.system('rm -rf ' + ms)
示例#12
0
    def __init__(self, msname, slvr_cfg):
        super(MeasurementSetManager, self).__init__()

        self._msname = msname
        # Create dictionary of tables
        self._tables = { k: open_table(msname, k) for k in SUBTABLE_KEYS }

        if not pt.tableexists(msname):
            raise ValueError("'{ms}' does not exist "
                "or is not a Measurement Set!".format(ms=msname))

        # Add imaging columns, just in case
        pt.addImagingColumns(msname, ack=False)

        # Open the main measurement set
        ms = open_table(msname)

        # Access individual tables
        ant, spec, ddesc, pol, field = (self._tables[k] for k in SUBTABLE_KEYS)

        # Sanity check the polarizations
        if pol.nrows() > 1:
            raise ValueError("Multiple polarization configurations!")

        self._npol = npol = pol.getcol('NUM_CORR')[0]

        if npol != 4:
            raise ValueError('Expected four polarizations')

        # Number of channels per band
        chan_per_band = spec.getcol('NUM_CHAN')

        # Require the same number of channels per band
        if not all(chan_per_band[0] == cpb for cpb in chan_per_band):
            raise ValueError('Channels per band {cpb} are not equal!'
                .format(cpb=chan_per_band))

        if ddesc.nrows() != spec.nrows():
            raise ValueError("DATA_DESCRIPTOR.nrows() "
                "!= SPECTRAL_WINDOW.nrows()")

        # Hard code auto-correlations and field_id 0
        self._auto_correlations = auto_correlations = slvr_cfg['auto_correlations']
        self._field_id = field_id = 0

        # Create a view over the MS, ordered by
        # (1) time (TIME)
        # (2) baseline (ANTENNA1, ANTENNA2)
        # (3) band (SPECTRAL_WINDOW_ID via DATA_DESC_ID)
        ordering_query = " ".join((
            "SELECT FROM $ms",
            "WHERE FIELD_ID={fid}".format(fid=field_id),
            "" if auto_correlations else "AND ANTENNA1 != ANTENNA2",
            orderby_clause(MS_DIM_ORDER)
        ))

        # Ordered Measurement Set
        oms = pt.taql(ordering_query)

        montblanc.log.debug("MS ordering query is '{o}'."
            .format(o=ordering_query))

        # Measurement Set ordered by unique time and baseline
        otblms = pt.taql("SELECT FROM $oms {c}".format(
            c=orderby_clause(UVW_DIM_ORDER, unique=True)))

        # Store the main table
        self._tables[MAIN_TABLE] = ms
        self._tables[ORDERED_MAIN_TABLE] = oms
        self._tables[ORDERED_UVW_TABLE] = otblms

        self._column_descriptors = {col: ms.getcoldesc(col) for col in SELECTED}

        # Count distinct timesteps in the MS
        t_orderby = orderby_clause(['ntime'], unique=True)
        t_query = "SELECT FROM $otblms {c}".format(c=t_orderby)
        self._tables[ORDERED_TIME_TABLE] = ot = pt.taql(t_query)
        self._ntime = ntime = ot.nrows()

        # Count number of baselines in the MS
        bl_orderby = orderby_clause(['nbl'], unique=True)
        bl_query = "SELECT FROM $otblms {c}".format(c=bl_orderby)
        self._tables[ORDERED_BASELINE_TABLE] = obl = pt.taql(bl_query)
        self._nbl = nbl = obl.nrows()

        # Number of channels per band
        self._nchanperband = chan_per_band[0]

        self._nchan = nchan = sum(chan_per_band)
        self._nbands = nbands = len(chan_per_band)
        self._npolchan = npolchan = npol*nchan
        self._nvis = nvis = ntime*nbl*nchan

        # Update the cube with dimension information
        # obtained from the MS
        updated_sizes = [ntime, nbl, ant.nrows(),
            sum(chan_per_band), len(chan_per_band), npol,
            npolchan, nvis]

        self._dim_sizes = dim_sizes = { dim: size for dim, size
            in zip(UPDATE_DIMENSIONS, updated_sizes) }

        shape = tuple(dim_sizes[d] for d in MS_DIM_ORDER)
        expected_rows = np.product(shape)

        if not expected_rows == oms.nrows():
            dim_desc = ", ".join('(%s,%s)' % (d, s) for
                d, s in zip(MS_DIM_ORDER, shape))
            row_desc = " x ".join('%s' % s for s in shape)

            montblanc.log.warn("Encountered '{msr}' rows in '{ms}' "
                "but expected '{rd} = {er}' after finding the following "
                "dimensions by inspection: [{d}]. Irregular Measurement Sets "
                "are not fully supported due to the generality of the format.".format(
                    msr=oms.nrows(), ms=msname,
                    er=expected_rows, rd=row_desc, d=dim_desc))