Exemplo n.º 1
0
    def _attach_SN_tables_to_image(self, uv_file, image_file):
        """
        Loop through each of the SN tables in uv_file that
        were produced by MFImage and copy and attach these to the
        image_file.

        Parameters
        ----------
        uv_file    : :class:`AIPSPath`
            UV file output from MFImage with SN tables attached.
        image_file : :class:`AIPSPath`
            Image (MA) file output from MFImage
        """

        uvf = uv_factory(aips_path=uv_file, mode='r')
        if uvf.exists:
            # Get all SN tables in UV file
            tables = uvf.tablelist
            taco_kwargs = {}
            taco_kwargs.update(uv_file.task_input_kwargs())
            taco_kwargs.update(image_file.task_output_kwargs())
            taco_kwargs['inTab'] = 'AIPS SN'
            taco_kwargs['nCopy'] = 1
            # Copy all SN tables
            SN_ver = [table[0] for table in tables if table[1] == 'AIPS SN']
            for ver in SN_ver:
                taco_kwargs.update({
                    'inVer': ver,
                    'outVer': ver
                    })
                taco = task_factory("TabCopy", **taco_kwargs)
                with log_obit_err(log):
                    taco.go()
Exemplo n.º 2
0
def file_cleaner(paths):
    """
    Delete a list of AIPS files at both the context start and stop
    """

    if not isinstance(paths, (tuple, list)):
        paths = [paths]

    try:
        for path in paths:
            with uv_factory(aips_path=path, mode="w") as f:
                f.Zap()

        yield
    finally:
        for path in paths:
            with uv_factory(aips_path=path, mode="w") as f:
                f.Zap()
Exemplo n.º 3
0
    def test_next_seq_nr(self):
        """ Test finding the next highest disk sequence number of an AIPS Path """

        # Create two AIPS paths, one with with sequence number 10 and 11
        p1 = AIPSPath(name='test', disk=1, aclass="klass", seq=10)
        p2 = p1.copy(seq=p1.seq+1)

        with obit_context(), file_cleaner([p1, p2]):
            # Create the first file and test highest sequence number
            with uv_factory(aips_path=p1, mode="w"):
                pass
            self.assertEqual(next_seq_nr(p1), p1.seq+1)

            # Create the second file and test highest sequence number
            with uv_factory(aips_path=p2, mode="w"):
                pass
            self.assertEqual(next_seq_nr(p1), p1.seq+2)
            self.assertEqual(next_seq_nr(p2), p2.seq+1)
Exemplo n.º 4
0
    def _cleanup(self):
        """
        Remove any remaining UV, Clean, and Merged UVF files,
        if requested
        """

        # Clobber any result files requested
        for cleanup_file in set(self.cleanup_uv_files):
            with uv_factory(aips_path=cleanup_file, mode="w") as uvf:
                log.info("Zapping '%s'", uvf.aips_path)
                uvf.Zap()
        for cleanup_file in set(self.cleanup_img_files):
            with img_factory(aips_path=cleanup_file, mode="w") as imf:
                log.info("Zapping '%s'", imf.aips_path)
                imf.Zap()
Exemplo n.º 5
0
    def _maybe_create_merge_uvf(self, merge_uvf, blavg_uvf, global_table_cmds):
        """
        Create the merge file if it hasn't yet been created,
        conditioning it with the baseline averaged file
        descriptor. Baseline averaged files
        have integration time as an additional random parameter
        so the merged file will need to take this into account.
        """

        # If we've performed channel averaging, the FREQ dimension
        # and FQ tables in the baseline averaged file will be smaller
        # than that of the scan file. This data must either be
        # (1) Used to condition a new merge UV file
        # (2) compared against the method in the existing merge UV file

        if merge_uvf is not None:
            return merge_uvf

        blavg_desc = blavg_uvf.Desc.Dict

        log.info("Creating '%s'",  self.uv_merge_path)

        # Use the FQ table rows and keywords to create
        # the merge UV file.
        blavg_table_cmds = deepcopy(global_table_cmds)

        blavg_fq_kw = dict(blavg_uvf.tables["AIPS FQ"].keywords)
        blavg_fq_rows = blavg_uvf.tables["AIPS FQ"].rows

        fq_cmd = blavg_table_cmds["AIPS FQ"]
        fq_cmd["keywords"] = blavg_fq_kw
        fq_cmd["rows"] = blavg_fq_rows

        # Create the UV object
        merge_uvf = uv_factory(aips_path=self.uv_merge_path,
                               mode="w",
                               nvispio=self.nvispio,
                               table_cmds=blavg_table_cmds,
                               desc=blavg_desc)

        # Write history
        uv_history_obs_description(self.ka, merge_uvf)
        uv_history_selection(self.katdal_select, merge_uvf)

        # Ensure merge and blavg file descriptors match on important points
        self._sanity_check_merge_blavg_descriptors(merge_uvf, blavg_uvf)

        return merge_uvf
Exemplo n.º 6
0
    def execute_implementation(self):
        result_tuple = self._select_and_infer_files()
        uv_sources, target_indices, uv_files, clean_files = result_tuple
        if "mfimage" in self.clobber:
            self.cleanup_uv_files += uv_files
        if "clean" in self.clobber:
            self.cleanup_img_files += clean_files
        # Update MFImage source selection
        self.mfimage_params['Sources'] = uv_sources
        # Find the highest numbered merge file if we are reusing
        if self.reuse:
            uv_mp = self.ka.aips_path(aclass='merge', name=kc.get_config()['cb_id'])
            # Find the merge file with the highest seq #
            hiseq = next_seq_nr(uv_mp) - 1
            # hiseq will be zero if the aipsdisk has no 'merge' file
            if hiseq == 0:
                raise ValueError("AIPS disk at '%s' has no 'merge' file to reuse." %
                                 (kc.get_config()['aipsdirs'][self.disk - 1][-1]))
            else:
                # Get the AIPS entry of the UV data to reuse
                self.uv_merge_path = uv_mp.copy(seq=hiseq)
                log.info("Re-using UV data in '%s' from AIPS disk: '%s'",
                         self.uv_merge_path, kc.get_config()['aipsdirs'][self.disk - 1][-1])
                merge_uvf = uv_factory(aips_path=self.uv_merge_path, mode='r',
                                       nvispio=self.nvispio)

                merge_nvis = merge_uvf.nvis_from_NX()
        else:
            merge_nvis = self._export_and_merge_scans()
        if "merge" in self.clobber:
            self.cleanup_uv_files.append(self.uv_merge_path)
        log.info('There are %s visibilities in the merged file', merge_nvis)
        if merge_nvis < 1:
            return {}
        else:
            self._run_mfimage(self.uv_merge_path, uv_sources)

            self._get_wavg_img(clean_files)
            for uv, clean in zip(uv_files, clean_files):
                self._attach_SN_tables_to_image(uv, clean)

            metadata = export_images(clean_files, target_indices, self.odisk, self.ka)
            return metadata
Exemplo n.º 7
0
    def _run_mfimage(self, uv_path, uv_sources):
        """
        Run the MFImage task
        """

        with uv_factory(aips_path=uv_path, mode="r") as uvf:
            merge_desc = uvf.Desc.Dict

        # Run MFImage task on merged file,
        out_kwargs = uv_path.task_output_kwargs(name='',
                                                aclass=IMG_CLASS,
                                                seq=0)
        out2_kwargs = uv_path.task_output2_kwargs(name='',
                                                  aclass=UV_CLASS,
                                                  seq=0)

        mfimage_kwargs = {}
        # Setup input file
        mfimage_kwargs.update(uv_path.task_input_kwargs())
        # Output file 1 (clean file)
        mfimage_kwargs.update(out_kwargs)
        # Output file 2 (uv file)
        mfimage_kwargs.update(out2_kwargs)
        mfimage_kwargs.update({
            'maxFBW': fractional_bandwidth(merge_desc)/20.0,
            'nThreads': multiprocessing.cpu_count(),
            'prtLv': self.prtlv,
            'Sources': uv_sources
        })

        # Finally, override with default parameters
        mfimage_kwargs.update(self.mfimage_params)

        log.info("MFImage arguments %s", pretty(mfimage_kwargs))

        mfimage = task_factory("MFImage", **mfimage_kwargs)
        # Send stdout from the task to the log
        with log_obit_err(log):
            mfimage.go()
Exemplo n.º 8
0
    def test_uv_facade_read_write(self):
        """
        Test basic reads and writes the AIPS UV Facade
        """
        nvis = 577  # Read/write this many visibilities, total
        nvispio = 20  # Read/write this many visibilities per IO op
        uv_file_path = AIPSPath('test', 1, 'test', 1)

        # Set up the spectral window
        nchan = 4

        spws = [{
            'centre_freq': .856e9 + .856e9 / 2.,
            'num_chans': nchan,
            'channel_width': .856e9 / nchan,
            'sideband': 1,
            'band': 'L',
        }]

        # Use first four antenna to create the subarray
        subarrays = [{'antenna': ANTENNA_DESCRIPTIONS[:4]}]

        # Pick 5 random stars as targets
        targets = [
            katpoint.Target("%s, star" % t)
            for t in random.sample(stars.keys(), 5)
        ]

        # track for 5 on each target
        slew_track_dumps = (('track', 5), )
        scans = [(e, nd, t) for t in targets for e, nd in slew_track_dumps]

        # Create Mock dataset and wrap it in a KatdalAdapter
        KA = KatdalAdapter(
            MockDataSet(timestamps=DEFAULT_TIMESTAMPS,
                        subarrays=subarrays,
                        spws=spws,
                        dumps=scans))

        with obit_context(), file_cleaner(uv_file_path):
            # Create the UV file
            with uv_factory(aips_path=uv_file_path,
                            mode="w",
                            nvispio=nvispio,
                            table_cmds=KA.default_table_cmds(),
                            desc=KA.uv_descriptor()) as uvf:

                uv_desc = uvf.Desc.Dict

                # Length of visibility buffer record
                lrec = uv_desc['lrec']
                # Random parameter indices
                iloct = uv_desc['iloct']  # time

                # Write out visibilities, putting sequential values
                # in the time random parameter
                for firstVis in range(1, nvis + 1, nvispio):
                    numVisBuff = min(nvis + 1 - firstVis, nvispio)

                    uv_desc = uvf.Desc.Dict
                    uv_desc['numVisBuff'] = numVisBuff
                    uvf.Desc.Dict = uv_desc

                    times = np.arange(firstVis,
                                      firstVis + numVisBuff,
                                      dtype=np.float32)

                    buf = uvf.np_visbuf
                    buf[iloct:lrec * numVisBuff:lrec] = times
                    uvf.Write(firstVis=firstVis)

            # Now re-open in readonly mode and test
            # that we get the same sequential values out
            with uv_factory(aips_path=uv_file_path, mode="r",
                            nvispio=nvispio) as uvf:

                uv_desc = uvf.Desc.Dict

                # Length of visibility buffer record
                lrec = uv_desc['lrec']
                nvis = uv_desc['nvis']
                # Random parameter indices
                iloct = uv_desc['iloct']  # time

                for firstVis in range(1, nvis + 1, nvispio):
                    numVisBuff = min(nvis + 1 - firstVis, nvispio)

                    uv_desc = uvf.Desc.Dict
                    uv_desc['numVisBuff'] = numVisBuff
                    uvf.Desc.Dict = uv_desc

                    uvf.Read(firstVis=firstVis)
                    buf = uvf.np_visbuf

                    times = np.arange(firstVis,
                                      firstVis + numVisBuff,
                                      dtype=np.float32)
                    buf_times = buf[iloct:lrec * numVisBuff:lrec]
                    self.assertTrue(np.all(times == buf_times))
Exemplo n.º 9
0
    def _export_and_merge_scans(self):
        """
        1. Read scans from katdal
        2. Export scan data to an AIPS UV file
        3. Baseline average the file.
        4. Merge averaged AIPS UV file into a merge UV file.
        """

        # The merged UV observation file. We wait until
        # we have a baseline averaged file with which to condition it
        merge_uvf = None

        uv_mp = self.ka.aips_path(aclass='merge', name=kc.get_config()['cb_id'])
        self.uv_merge_path = uv_mp.copy(seq=next_seq_nr(uv_mp))

        global_desc = self.ka.uv_descriptor()
        global_table_cmds = self.ka.default_table_cmds()

        # FORTRAN indexing
        merge_firstVis = 1

        # Scan indices
        scan_indices = [int(si) for si in self.ka.scan_indices]

        merge_blavg_nvis = 0
        # Export each scan individually, baseline averaging and merging it
        # into the final observation file.
        # NOTE: Loop over scan indices here rather than using the ka.scans
        # generator to avoid a conflict with the loop over ka.scans in uv_export.
        for si in scan_indices:
            # Select the current scan
            self.ka.select(scans=si)
            # Get path, with sequence based on scan index
            scan_path = self.uv_merge_path.copy(aclass='raw', seq=int(si))
            # Get the AIPS source for logging purposes
            aips_source = self.ka.catalogue[self.ka.target_indices[0]]
            aips_source_name = aips_source["SOURCE"][0].strip()

            log.info("Creating '%s'", scan_path)

            # Create a UV file for the scan and export to it
            with uv_factory(aips_path=scan_path, mode="w",
                            nvispio=self.nvispio,
                            table_cmds=global_table_cmds,
                            desc=global_desc) as uvf:

                uv_export(self.ka, uvf, time_step=self.time_step)

            # Retrieve the single scan index.
            # The time centroids and interval should be correct
            # but the visibility indices need to be repurposed
            scan_uvf = uv_factory(aips_path=scan_path, mode='r',
                                  nvispio=self.nvispio)

            assert len(scan_uvf.tables["AIPS NX"].rows) == 1
            nx_row = scan_uvf.tables["AIPS NX"].rows[0].copy()
            scan_nvis = scan_uvf.nvis_from_NX()

            # If we should be merging scans
            # just use the existing scan path and file
            if self.merge_scans:
                blavg_path = scan_path
                blavg_uvf = scan_uvf
            # Otherwise performing baseline averaging, deriving
            # a new scan path and file
            else:
                # Perform baseline averaging
                blavg_path = self._blavg_scan(scan_path)
                blavg_uvf = uv_factory(aips_path=blavg_path,
                                       mode='r',
                                       nvispio=self.nvispio)

            # Create the merge UV file, if necessary
            merge_uvf = self._maybe_create_merge_uvf(merge_uvf, blavg_uvf,
                                                     global_table_cmds)

            blavg_nvis = blavg_uvf.nvis_from_NX()
            merge_blavg_nvis += blavg_nvis

            # Record something about the baseline averaging process
            param_str = ', '.join("%s=%s" % (k, v)
                                  for k, v
                                  in self.uvblavg_params.items())

            blavg_history = ("Scan %d '%s' averaged "
                             "%s to %s visiblities. UVBlAvg(%s)" %
                             (si, aips_source_name, scan_nvis,
                              blavg_nvis, param_str))

            log.info(blavg_history)

            merge_uvf.append_history(blavg_history)
            if blavg_nvis > 0:
                log.info("Merging '%s' into '%s'", blavg_path, self.uv_merge_path)
                merge_firstVis = self._copy_scan_to_merge(merge_firstVis,
                                                          merge_uvf, blavg_uvf,
                                                          nx_row)
            else:
                log.warn("No visibilities to merge for scan %d", si)

            # Remove scan once merged
            if 'scans' in self.clobber:
                log.info("Zapping '%s'", scan_uvf.aips_path)
                scan_uvf.Zap()
            else:
                scan_uvf.Close()

            # If merging scans for testing purposes, our
            # baseline averaged file will be the same as the
            # scan file, which was handled above, so don't
            # delete again. Otherwise default to
            # normal clobber handling.
            if not self.merge_scans:
                if 'avgscans' in self.clobber:
                    log.info("Zapping '%s'", blavg_uvf.aips_path)
                    blavg_uvf.Zap()
                else:
                    blavg_uvf.Close()

        if merge_blavg_nvis == 0:
            log.error("Final merged file '%s' has ZERO averaged visibilities",
                      self.uv_merge_path)
        # Write the index table
        merge_uvf.tables["AIPS NX"].write()

        # Create an empty calibration table
        merge_uvf.attach_CL_from_NX_table(self.ka.max_antenna_number)

        # Close merge file
        merge_uvf.close()

        return merge_blavg_nvis
Exemplo n.º 10
0
    def _test_export_implementation(self, export_type="uv_export", nif=1):
        """
        Implementation of export test. Tests export via
        either the :func:`katacomb.uv_export` or
        :func:`katacomb.pipeline_factory, depending on ``export_type``.

        When testing export via the Continuum Pipeline, baseline
        averaging is disabled.

        Parameters
        ----------
        export_type (optional): string
            Either ``"uv_export"`` or ``"continuum_export"``.
            Defaults to ``"uv_export"``
        nif (optional): nif
            Number of IFs to test splitting the band into
        """

        nchan = 16
        nvispio = 1024

        spws = [{
            'centre_freq': .856e9 + .856e9 / 2.,
            'num_chans': nchan,
            'channel_width': .856e9 / nchan,
            'sideband': 1,
            'band': 'L',
        }]

        target_names = random.sample(stars.keys(), 5)

        # Pick 5 random stars as targets
        targets = [katpoint.Target("%s, star" % t) for t in target_names]

        # Set up varying scans
        scans = [('slew', 1, targets[0]), ('track', 3, targets[0]),
                 ('slew', 2, targets[1]), ('track', 5, targets[1]),
                 ('slew', 1, targets[2]), ('track', 8, targets[2]),
                 ('slew', 2, targets[3]), ('track', 9, targets[3]),
                 ('slew', 1, targets[4]), ('track', 10, targets[4])]

        # Create Mock dataset and wrap it in a KatdalAdapter
        ds = MockDataSet(timestamps=DEFAULT_TIMESTAMPS,
                         subarrays=DEFAULT_SUBARRAYS,
                         spws=spws,
                         dumps=scans)

        KA = KatdalAdapter(ds)

        # Create a FAKE object
        FAKE = object()

        # Test that metadata agrees
        for k, v in DEFAULT_METADATA.items():
            self.assertEqual(v, getattr(KA, k, FAKE))

        # Setup the katdal selection, convert it to a string
        # accepted by our command line parser function, which
        # converts it back to a dict.
        select = {
            'scans': 'track',
            'corrprods': 'cross',
            'targets': target_names,
            'pol': 'HH,VV',
            'channels': slice(0, nchan), }
        assign_str = '; '.join('%s=%s' % (k, repr(v)) for k, v in select.items())
        select = parse_python_assigns(assign_str)

        # Add nif to selection
        if nif > 1:
            select['nif'] = nif

        # Perform the katdal selection
        KA.select(**select)

        # Obtain correlator products and produce argsorts that will
        # order by (a1, a2, stokes)
        cp = KA.correlator_products()
        nstokes = KA.nstokes

        # Lexicographically sort correlation products on (a1, a2, cid)
        def sort_fn(x): return (cp[x].ant1_ix, cp[x].ant2_ix, cp[x].cid)
        cp_argsort = np.asarray(sorted(range(len(cp)), key=sort_fn))

        # Use first stokes parameter index of each baseline
        bl_argsort = cp_argsort[::nstokes]

        # Get data shape after selection
        kat_ndumps, kat_nchans, kat_ncorrprods = KA.shape

        uv_file_path = AIPSPath('test', 1, 'test', 1)

        with obit_context(), file_cleaner([uv_file_path]):
            # Perform export of katdal selection via uv_export
            if export_type == "uv_export":
                with uv_factory(aips_path=uv_file_path, mode="w",
                                nvispio=nvispio,
                                table_cmds=KA.default_table_cmds(),
                                desc=KA.uv_descriptor()) as uvf:

                    uv_export(KA, uvf)
            # Perform export of katdal selection via ContinuumPipline
            elif export_type == "continuum_export":
                pipeline = pipeline_factory(export_type, KA.katdal,
                                            katdal_select=select,
                                            merge_scans=True)
                pipeline._select_and_infer_files()
                pipeline._export_and_merge_scans()

                uv_file_path = pipeline.uv_merge_path

                newselect = select.copy()
                newselect['reset'] = 'TFB'
                KA.select(**newselect)
            else:
                raise ValueError("Invalid export_type '%s'" % export_type)

            nvispio = 1

            # Now read from the AIPS UV file and sanity check
            with uv_factory(aips_path=uv_file_path,
                            mode="r",
                            nvispio=nvispio) as uvf:

                def _strip_strings(aips_keywords):
                    """ AIPS string are padded, strip them """
                    return {k: v.strip()
                            if isinstance(v, (str, bytes)) else v
                            for k, v in aips_keywords.items()}

                fq_kw = _strip_strings(uvf.tables["AIPS FQ"].keywords)
                src_kw = _strip_strings(uvf.tables["AIPS SU"].keywords)
                ant_kw = _strip_strings(uvf.tables["AIPS AN"].keywords)

                # Check that the subset of keywords generated
                # by the katdal adapter match those read from the AIPS table
                self.assertDictContainsSubset(KA.uv_spw_keywords, fq_kw)
                self.assertDictContainsSubset(KA.uv_source_keywords, src_kw)
                self.assertDictContainsSubset(KA.uv_antenna_keywords, ant_kw)

                def _strip_metadata(aips_table_rows):
                    """
                    Strip out ``Numfields``, ``_status``, ``Table name``
                    fields from each row entry
                    """
                    STRIP = {'NumFields', '_status', 'Table name'}
                    return [{k: v for k, v in d.items()
                             if k not in STRIP}
                            for d in aips_table_rows]

                # Check that frequency, source and antenna rows
                # are correctly exported
                fq_rows = _strip_metadata(uvf.tables["AIPS FQ"].rows)
                self.assertEqual(fq_rows, KA.uv_spw_rows)

                ant_rows = _strip_metadata(uvf.tables["AIPS AN"].rows)
                self.assertEqual(ant_rows, KA.uv_antenna_rows)

                # TODO(sjperkins)
                # For some reason, source radec and apparent radec
                # coordinates are off by some minor difference
                # Probably related to float32 conversion.
                if not export_type == "continuum_export":
                    src_rows = _strip_metadata(uvf.tables["AIPS SU"].rows)
                    self.assertEqual(src_rows, KA.uv_source_rows)

                uv_desc = uvf.Desc.Dict
                inaxes = tuple(reversed(uv_desc['inaxes'][:6]))
                naips_vis = uv_desc['nvis']
                summed_vis = 0

                # Number of random parameters
                nrparm = uv_desc['nrparm']
                # Length of visibility buffer record
                lrec = uv_desc['lrec']

                # Random parameter indices
                ilocu = uv_desc['ilocu']     # U
                ilocv = uv_desc['ilocv']     # V
                ilocw = uv_desc['ilocw']     # W
                iloct = uv_desc['iloct']     # time
                ilocsu = uv_desc['ilocsu']   # source id

                # Sanity check the UV descriptor inaxes
                uv_nra, uv_ndec, uv_nif, uv_nchans, uv_nstokes, uv_viscomp = inaxes

                self.assertEqual(uv_nchans * uv_nif, kat_nchans,
                                 "Number of AIPS and katdal channels differ")
                self.assertEqual(uv_viscomp, 3,
                                 "Number of AIPS visibility components")
                self.assertEqual(uv_nra, 1,
                                 "RA should be 1")
                self.assertEqual(uv_ndec, 1,
                                 "DEC should be 1")
                self.assertEqual(uv_nif, nif,
                                 "NIF should be %d" % (nif))

                # Compare AIPS and katdal scans
                aips_scans = uvf.tables["AIPS NX"].rows
                katdal_scans = list(KA.scans())

                # Must have same number of scans
                self.assertEqual(len(aips_scans), len(katdal_scans))

                # Iterate through the katdal scans
                for i, (si, state, target) in enumerate(KA.scans()):
                    self.assertTrue(state in select['scans'])

                    kat_ndumps, kat_nchans, kat_ncorrprods = KA.shape

                    # Was is the expected source ID?
                    expected_source = np.float32(target['ID. NO.'][0])

                    # Work out start, end and length of the scan
                    # in visibilities
                    aips_scan = aips_scans[i]
                    start_vis = aips_scan['START VIS'][0]
                    last_vis = aips_scan['END VIS'][0]
                    naips_scan_vis = last_vis - start_vis + 1

                    summed_vis += naips_scan_vis

                    # Each AIPS visibility has dimension [1,1,1,nchan,nstokes,3]
                    # and one exists for each timestep and baseline
                    # Ensure that the number of visibilities equals
                    # number of dumps times number of baselines
                    self.assertEqual(naips_scan_vis,
                                     kat_ndumps*kat_ncorrprods//uv_nstokes,
                                     'Mismatch in number of visibilities in scan %d' % si)

                    # Accumulate UVW, time data from the AIPS UV file
                    # By convention uv_export's data in (ntime, nbl)
                    # ordering, so we assume that the AIPS UV data
                    # is ordered the same way
                    u_data = []
                    v_data = []
                    w_data = []
                    time_data = []
                    vis_data = []

                    # For each visibility in the scan, read data and
                    # compare with katdal observation data
                    for firstVis in range(start_vis, last_vis+1, nvispio):
                        # Determine number of visibilities to read
                        numVisBuff = min(last_vis+1-firstVis, nvispio)

                        desc = uvf.Desc.Dict
                        desc.update(numVisBuff=numVisBuff)
                        uvf.Desc.Dict = desc

                        # Read a visibility
                        uvf.Read(firstVis=firstVis)
                        buf = uvf.np_visbuf

                        # Must copy because buf data will change with each read
                        u_data.append(buf[ilocu:lrec*numVisBuff:lrec].copy())
                        v_data.append(buf[ilocv:lrec*numVisBuff:lrec].copy())
                        w_data.append(buf[ilocw:lrec*numVisBuff:lrec].copy())
                        time_data.append(buf[iloct:lrec*numVisBuff:lrec].copy())

                        for i in range(numVisBuff):
                            base = nrparm + i*lrec
                            data = buf[base:base+lrec-nrparm].copy()
                            data = data.reshape(inaxes)
                            vis_data.append(data)

                        # Check that we're dealing with the same source
                        # within the scan
                        sources = buf[ilocsu:lrec*numVisBuff:lrec].copy()
                        self.assertEqual(sources, expected_source)

                    # Ensure katdal timestamps match AIPS UV file timestamps
                    # and that there are exactly number of baseline counts
                    # for each one
                    times, time_counts = np.unique(time_data, return_counts=True)
                    timestamps = KA.uv_timestamps[:].astype(np.float32)
                    self.assertTrue(np.all(times == timestamps))
                    self.assertTrue(np.all(time_counts == len(bl_argsort)))

                    # Flatten AIPS UVW data, there'll be (ntime*nbl) values
                    u_data = np.concatenate(u_data).ravel()
                    v_data = np.concatenate(v_data).ravel()
                    w_data = np.concatenate(w_data).ravel()

                    # uv_u will have shape (ntime, ncorrprods)
                    # Select katdal stokes 0 UVW coordinates and flatten
                    uv_u = KA.uv_u[:, bl_argsort].astype(np.float32).ravel()
                    uv_v = KA.uv_v[:, bl_argsort].astype(np.float32).ravel()
                    uv_w = KA.uv_w[:, bl_argsort].astype(np.float32).ravel()

                    # Confirm UVW coordinate equality
                    self.assertTrue(np.all(uv_u == u_data))
                    self.assertTrue(np.all(uv_v == v_data))
                    self.assertTrue(np.all(uv_w == w_data))

                    # Number of baselines
                    nbl = len(bl_argsort)

                    # Now compare visibility data

                    # Stacking produces
                    # (ntime*nbl, nra, ndec, nif, nchan, nstokes, 3)
                    aips_vis = np.stack(vis_data, axis=0)
                    kat_vis = KA.uv_vis[:]

                    shape = (kat_ndumps, kat_nchans, nbl, nstokes, 3)
                    # This produces (ntime, nchan, nbl, nstokes, 3)
                    kat_vis = kat_vis[:, :, cp_argsort, :].reshape(shape)

                    # (1) transpose so that we have (ntime, nbl, nchan, nstokes, 3)
                    # (2) reshape to include the full inaxes shape,
                    #     including singleton nif, ra and dec dimensions
                    kat_vis = (kat_vis.transpose(0, 2, 1, 3, 4)
                               .reshape((kat_ndumps, nbl,) + inaxes))

                    aips_vis = aips_vis.reshape((kat_ndumps, nbl) + inaxes)

                    self.assertTrue(np.all(aips_vis == kat_vis))

                # Check that we read the expected number of visibilities
                self.assertEqual(summed_vis, naips_vis)
Exemplo n.º 11
0
    # Handle invalid sequence numbers
    if args.seq is None or args.seq < 1:
        aips_path.seq = next_seq_nr(aips_path)

    # Apply the katdal selection
    KA.select(**args.select)

    # Fall over on empty selections
    if not KA.size > 0:
        raise ValueError("The katdal selection produced an empty dataset"
                         "\n'%s'\n" % pretty(args.select))

    # UV file location variables
    with uv_factory(aips_path=aips_path,
                    mode="w",
                    nvispio=args.nvispio,
                    table_cmds=KA.default_table_cmds(),
                    desc=KA.uv_descriptor()) as uvf:

        # Write history
        uv_history_obs_description(KA, uvf)
        uv_history_selection(args.select, uvf)

        # Perform export to the file
        uv_export(KA, uvf)

    # Possibly perform baseline dependent averaging
    if args.blavg == True:
        task_kwargs = aips_path.task_input_kwargs()
        task_kwargs.update(aips_path.task_output_kwargs(aclass='uvav'))
        blavg = task_factory("UVBlAvg", **task_kwargs)