Example #1
0
    def test_zero_vis_online(self):
        """Check online pipeline exits gracefully if all data is flagged
        """
        # Create flagged Mock dataset and wrap it in a KatdalAdapter
        ds = MockDataSet(timestamps=DEFAULT_TIMESTAMPS,
                         subarrays=DEFAULT_SUBARRAYS,
                         spws=self.spws,
                         dumps=self.scans,
                         flags=partial(flags, flagged=True))

        # Dummy CB_ID and Product ID and temp fits disk
        fd = kc.get_config()['fitsdirs']
        fd += [(None, '/tmp/FITS')]
        kc.set_config(output_id='OID', cb_id='CBID', fitsdirs=fd)

        setup_aips_disks()

        # Create the pipeline
        pipeline = pipeline_factory('online', ds, TelescopeState(),
                                    katdal_select=self.select,
                                    uvblavg_params=self.uvblavg_params,
                                    mfimage_params=self.mfimage_params)

        metadata = pipeline.execute()
        # Check metadata is empty and no exceptions are thrown
        assert_equal(metadata, {})

        # Get fits area
        cfg = kc.get_config()
        fits_area = cfg['fitsdirs'][-1][1]

        # Remove the tmp/FITS dir
        shutil.rmtree(fits_area)
Example #2
0
    def test_empty_dataset(self):
        """Test that a completely flagged dataset is exported without error"""
        nchan = 16

        spws = [{
            'centre_freq': .856e9 + .856e9 / 2.,
            'num_chans': nchan,
            'channel_width': .856e9 / nchan,
            'sideband': 1,
            'band': 'L',
        }]

        targets = [katpoint.Target("Flosshilde, radec, 0.0, -30.0")]

        # Set up a scan
        scans = [('track', 10, targets[0])]

        # Flag the data
        def mock_flags(dataset):
            return np.ones(dataset.shape, dtype=np.bool)

        # Create Mock dataset and wrap it in a KatdalAdapter
        ds = MockDataSet(timestamps=DEFAULT_TIMESTAMPS,
                         subarrays=DEFAULT_SUBARRAYS,
                         spws=spws,
                         dumps=scans,
                         flags=mock_flags)

        with obit_context():
            pipeline = pipeline_factory('offline', ds)
            pipeline._select_and_infer_files()
            pipeline._export_and_merge_scans()
Example #3
0
    def test_new_online_pipeline(self):
        """
        Tests that a run of the online continuum pipeline exectues.
        """
        # Create Mock dataset and wrap it in a KatdalAdapter
        ds = MockDataSet(timestamps=DEFAULT_TIMESTAMPS,
                         subarrays=DEFAULT_SUBARRAYS,
                         spws=self.spws,
                         dumps=self.scans)

        # Create a FAKE object
        FAKE = object()

        # Test that metadata agrees
        for k, v in DEFAULT_METADATA.items():
            self.assertEqual(v, getattr(ds, k, FAKE))

        # Dummy CB_ID and Product ID and temp fits disk
        fd = kc.get_config()['fitsdirs']
        fd += [(None, '/tmp/FITS')]
        kc.set_config(output_id='OID', cb_id='CBID', fitsdirs=fd)

        setup_aips_disks()

        # Create the pipeline
        pipeline = pipeline_factory('online', ds, TelescopeState(),
                                    katdal_select=self.select,
                                    uvblavg_params=self.uvblavg_params,
                                    mfimage_params=self.mfimage_params)

        metadata = pipeline.execute()

        # Check that output FITS files exist and have the right names
        cfg = kc.get_config()
        cb_id = cfg['cb_id']
        out_id = cfg['output_id']
        fits_area = cfg['fitsdirs'][-1][1]

        for otarg in self.sanitised_target_names:
            out_strings = [cb_id, out_id, otarg, IMG_CLASS]
            filename = '_'.join(filter(None, out_strings)) + '.fits'
            assert_in(filename, metadata['FITSImageFilename'])
            filepath = os.path.join(fits_area, filename)
            assert os.path.isfile(filepath)
            _check_fits_headers(filepath)

        # Remove the tmp/FITS dir
        shutil.rmtree(fits_area)
Example #4
0
def main():
    setup_logging()
    parser = create_parser()
    args = parser.parse_args()

    # Open the observation
    if (args.access_key is not None) != (args.secret_key is not None):
        parser.error('--access-key and --secret-key must be used together')
    if args.access_key is not None and args.token is not None:
        parser.error('--access-key/--secret-key cannot be used with --token')
    open_kwargs = {}
    if args.access_key is not None:
        open_kwargs['credentials'] = (args.access_key, args.secret_key)
    elif args.token is not None:
        open_kwargs['token'] = args.token
    katdata = katdal.open(args.katdata, applycal='l1', **open_kwargs)

    post_process_args(args, katdata)

    uvblavg_args, mfimage_args, band = _infer_defaults_from_katdal(katdata)

    # Get config defaults for uvblavg and mfimage and merge user supplied ones
    uvblavg_parm_file = pjoin(CONFIG, f'uvblavg_MKAT_{band}.yaml')
    log.info('UVBlAvg parameter file for %s-band: %s', band, uvblavg_parm_file)
    mfimage_parm_file = pjoin(CONFIG, f'mfimage_MKAT_{band}.yaml')
    log.info('MFImage parameter file for %s-band: %s', band, mfimage_parm_file)

    user_uvblavg_args = get_and_merge_args(uvblavg_parm_file, args.uvblavg)
    user_mfimage_args = get_and_merge_args(mfimage_parm_file, args.mfimage)

    # Merge katdal defaults with user supplied defaults
    recursive_merge(user_uvblavg_args, uvblavg_args)
    recursive_merge(user_mfimage_args, mfimage_args)

    # Get the default config.
    dc = kc.get_config()
    # Set up aipsdisk configuration from args.workdir
    if args.workdir is not None:
        aipsdirs = [(None,
                     pjoin(args.workdir, args.capture_block_id + '_aipsdisk'))]
    else:
        aipsdirs = dc['aipsdirs']
    log.info('Using AIPS data area: %s', aipsdirs[0][1])

    # Set up output configuration from args.outputdir
    fitsdirs = dc['fitsdirs']

    outputname = args.capture_block_id + OUTDIR_SEPARATOR + args.telstate_id + \
        OUTDIR_SEPARATOR + START_TIME

    outputdir = pjoin(args.outputdir, outputname)
    # Set writing tag for duration of the pipeline
    work_outputdir = outputdir + WRITE_TAG
    # Append outputdir to fitsdirs
    # NOTE: Pipeline is set up to always place its output in the
    # highest numbered fits disk so we ensure that is the case
    # here.
    fitsdirs += [(None, work_outputdir)]
    log.info('Using output data area: %s', outputdir)

    kc.set_config(aipsdirs=aipsdirs, fitsdirs=fitsdirs)

    setup_aips_disks()

    # Add output_id and capture_block_id to configuration
    kc.set_config(cfg=kc.get_config(),
                  output_id=args.output_id,
                  cb_id=args.capture_block_id)

    # Set up telstate link then create
    # a view based the capture block ID and output ID
    telstate = TelescopeState(args.telstate)
    view = telstate.join(args.capture_block_id, args.telstate_id)
    ts_view = telstate.view(view)

    katdal_select = args.select
    katdal_select['nif'] = args.nif

    # Create Continuum Pipeline
    pipeline = pipeline_factory('online',
                                katdata,
                                ts_view,
                                katdal_select=katdal_select,
                                uvblavg_params=uvblavg_args,
                                mfimage_params=mfimage_args,
                                nvispio=args.nvispio)

    # Execute it
    metadata = pipeline.execute()

    # Create QA products if images were created
    if metadata:
        make_pbeam_images(metadata, outputdir, WRITE_TAG)
        make_qa_report(metadata, outputdir, WRITE_TAG)
        organise_qa_output(metadata, outputdir, WRITE_TAG)

        # Remove the writing tag from the output directory
        os.rename(work_outputdir, outputdir)
    else:
        os.rmdir(work_outputdir)
Example #5
0
    def test_gains_export(self):
        """Check l2 export to telstate"""
        nchan = 128
        nif = 4
        dump_period = 1.0
        centre_freq = 1200.e6
        bandwidth = 100.e6
        solPint = dump_period / 2.
        solAint = dump_period
        AP_telstate = 'product_GAMP_PHASE'
        P_telstate = 'product_GPHASE'

        spws = [{'centre_freq': centre_freq,
                 'num_chans': nchan,
                 'channel_width': bandwidth / nchan,
                 'sideband': 1,
                 'band': 'L'}]
        ka_select = {'pol': 'HH,VV', 'scans': 'track',
                     'corrprods': 'cross', 'nif': nif}
        uvblavg_params = {'maxFact': 1.0, 'avgFreq': 0,
                          'FOV': 100.0, 'maxInt': 1.e-6}
        mfimage_params = {'Niter': 50, 'FOV': 0.1,
                          'xCells': 5., 'yCells': 5.,
                          'doGPU': False, 'Robust': -1.5,
                          'minFluxPSC': 0.1, 'solPInt': solPint / 60.,
                          'solPMode': 'P', 'minFluxASC': 0.1,
                          'solAInt': solAint / 60., 'maxFBW': 0.02}

        # Simulate a '10Jy' source at the phase center
        cat = katpoint.Catalogue()
        cat.add(katpoint.Target(
            "Alberich lord of the Nibelungs, radec, 20.0, -30.0, (856. 1712. 1. 0. 0.)"))

        telstate = TelescopeState()

        # Set up a scratch space in /tmp
        fd = kc.get_config()['fitsdirs']
        fd += [(None, '/tmp/FITS')]
        kc.set_config(cb_id='CBID', fitsdirs=fd)
        setup_aips_disks()

        scan = [('track', 4, cat.targets[0])]

        # Construct a simulated dataset with our
        # point source at the centre of the field
        ds = MockDataSet(timestamps={'start_time': 0.0, 'dump_period': dump_period},
                         subarrays=DEFAULT_SUBARRAYS,
                         spws=spws,
                         dumps=scan,
                         vis=partial(vis, sources=cat),
                         weights=weights,
                         flags=flags)

        # Try one round of phase only self-cal & Amp+Phase self-cal
        mfimage_params['maxPSCLoop'] = 1
        mfimage_params['maxASCLoop'] = 1

        # Run the pipeline
        pipeline = pipeline_factory('online', ds, telstate, katdal_select=ka_select,
                                    uvblavg_params=uvblavg_params,
                                    mfimage_params=mfimage_params)
        pipeline.execute()

        ts = telstate.view('selfcal')
        # Check what we have in telstate agrees with what we put in
        self.assertEqual(len(ts['antlist']), len(ANTENNA_DESCRIPTIONS))
        self.assertEqual(ts['bandwidth'], bandwidth)
        self.assertEqual(ts['n_chans'], nif)
        pol_ordering = [pol[0] for pol in sorted(CORR_ID_MAP, key=CORR_ID_MAP.get)
                        if pol[0] == pol[1]]
        self.assertEqual(ts['pol_ordering'], pol_ordering)
        if_width = bandwidth / nif
        center_if = nif // 2
        start_freq = centre_freq - (bandwidth / 2.)
        self.assertEqual(ts['center_freq'], start_freq + if_width * (center_if + 0.5))

        self.assertIn(ts.join('selfcal', P_telstate), ts.keys())
        self.assertIn(ts.join('selfcal', AP_telstate), ts.keys())

        def check_gains_timestamps(gains, expect_timestamps):
            timestamps = []
            for gain, timestamp in gains:
                np.testing.assert_array_almost_equal(np.abs(gain), 1.0, decimal=3)
                np.testing.assert_array_almost_equal(np.angle(gain), 0.0)
                timestamps.append(timestamp)
            np.testing.assert_array_almost_equal(timestamps, expect_timestamps, decimal=1)

        # Check phase-only gains and timestamps
        P_times = np.arange(solPint, ds.end_time.secs, 2. * solPint)
        check_gains_timestamps(ts.get_range(P_telstate, st=0), P_times)
        # Check Amp+Phase gains
        AP_times = np.arange(solAint, ds.end_time.secs, 2. * solAint)
        check_gains_timestamps(ts.get_range(AP_telstate, st=0), AP_times)

        # Check with no Amp+Phase self-cal
        mfimage_params['maxASCLoop'] = 0
        telstate.clear()
        pipeline = pipeline_factory('online', ds, telstate, katdal_select=ka_select,
                                    uvblavg_params=uvblavg_params,
                                    mfimage_params=mfimage_params)
        pipeline.execute()
        self.assertIn(telstate.join('selfcal', P_telstate), ts.keys())
        self.assertNotIn(telstate.join('selfcal', AP_telstate), ts.keys())

        # Check with no self-cal
        mfimage_params['maxPSCLoop'] = 0
        telstate.clear()
        pipeline = pipeline_factory('online', ds, telstate, katdal_select=ka_select,
                                    uvblavg_params=uvblavg_params,
                                    mfimage_params=mfimage_params)
        pipeline.execute()
        self.assertNotIn(telstate.join('selfcal', P_telstate), ts.keys())
        self.assertNotIn(telstate.join('selfcal', AP_telstate), ts.keys())

        # Cleanup workspace
        shutil.rmtree(fd[-1][1])
Example #6
0
    def test_cc_export(self):
        """Check CC models returned by MFImage
        """
        nchan = 128

        spws = [{'centre_freq': .856e9 + .856e9 / 2.,
                 'num_chans': nchan,
                 'channel_width': .856e9 / nchan,
                 'sideband': 1,
                 'band': 'L'}]

        katdal_select = {'pol': 'HH,VV', 'scans': 'track',
                         'corrprods': 'cross'}
        uvblavg_params = {'FOV': 0.2, 'avgFreq': 0,
                          'chAvg': 1, 'maxInt': 2.0}

        cat = katpoint.Catalogue()
        cat.add(katpoint.Target("Amfortas, radec, 0.0, -90.0, (856. 1712. 1. 0. 0.)"))
        cat.add(katpoint.Target("Klingsor, radec, 0.0, 0.0, (856. 1712. 2. -0.7 0.1)"))
        cat.add(katpoint.Target("Kundry, radec, 100.0, -35.0, (856. 1712. -1.0 1. -0.1)"))

        ts = TelescopeState()

        # Set up a scratch space in /tmp
        fd = kc.get_config()['fitsdirs']
        fd += [(None, '/tmp/FITS')]
        kc.set_config(cb_id='CBID', fitsdirs=fd)

        setup_aips_disks()

        # Point sources with various flux models
        for targ in cat:
            scans = [('track', 5, targ)]
            ds = MockDataSet(timestamps={'start_time': 1.0, 'dump_period': 4.0},
                             subarrays=DEFAULT_SUBARRAYS,
                             spws=spws,
                             dumps=scans,
                             vis=partial(vis, sources=[targ]),
                             weights=weights,
                             flags=flags)

            # 100 clean components
            mfimage_params = {'Niter': 100, 'maxFBW': 0.05,
                              'FOV': 0.1, 'xCells': 5.,
                              'yCells': 5., 'doGPU': False}

            pipeline = pipeline_factory('online', ds, ts, katdal_select=katdal_select,
                                        uvblavg_params=uvblavg_params,
                                        mfimage_params=mfimage_params)
            pipeline.execute()

            # Get the fitted CCs from telstate
            fit_cc = ts.get('target0_clean_components')
            ts.delete('target0_clean_components')

            all_ccs = katpoint.Catalogue(fit_cc['components'])
            # Should have one merged and fitted component
            self.assertEqual(len(all_ccs), 1)

            cc = all_ccs.targets[0]
            out_fluxmodel = cc.flux_model
            in_fluxmodel = targ.flux_model

            # Check the flux densities of the flux model in the fitted CC's
            test_freqs = np.linspace(out_fluxmodel.min_freq_MHz, out_fluxmodel.max_freq_MHz, 5)
            in_flux = in_fluxmodel.flux_density(test_freqs)
            out_flux = out_fluxmodel.flux_density(test_freqs)
            np.testing.assert_allclose(out_flux, in_flux, rtol=1.e-3)

        # A field with some off axis sources to check positions
        offax_cat = katpoint.Catalogue()
        offax_cat.add(katpoint.Target("Titurel, radec, 100.1, -35.05, (856. 1712. 1.1 0. 0.)"))
        offax_cat.add(katpoint.Target("Gurmenanz, radec, 99.9, -34.95, (856. 1712. 1. 0. 0.)"))

        scans = [('track', 5, cat.targets[2])]
        ds = MockDataSet(timestamps={'start_time': 1.0, 'dump_period': 4.0},
                         subarrays=DEFAULT_SUBARRAYS,
                         spws=spws,
                         dumps=scans,
                         vis=partial(vis, sources=offax_cat),
                         weights=weights,
                         flags=flags)

        # Small number of CC's and high gain (not checking flux model)
        mfimage_params['Niter'] = 4
        mfimage_params['FOV'] = 0.2
        mfimage_params['Gain'] = 0.5
        mfimage_params['Robust'] = -5

        pipeline = pipeline_factory('online', ds, ts, katdal_select=katdal_select,
                                    uvblavg_params=uvblavg_params,
                                    mfimage_params=mfimage_params)
        pipeline.execute()
        fit_cc = ts.get('target0_clean_components')
        ts.delete('target0_clean_components')
        all_ccs = katpoint.Catalogue(fit_cc['components'])
        # We should have 2 merged clean components for two source positions
        self.assertEqual(len(all_ccs), 2)

        # Check the positions of the clean components
        # These will be ordered by decreasing flux density of the inputs
        # Position should be accurate to within a 5" pixel
        delta_dec = np.deg2rad(5./3600.)
        for model, cc in zip(offax_cat.targets, all_ccs.targets):
            delta_ra = delta_dec/np.cos(model.radec()[1])
            self.assertAlmostEqual(cc.radec()[0], model.radec()[0], delta=delta_ra)
            self.assertAlmostEqual(cc.radec()[1], model.radec()[1], delta=delta_dec)

        # Empty the scratch space
        shutil.rmtree(fd[-1][1])
Example #7
0
    def test_offline_pipeline(self):
        """
        Tests that a run of the offline continuum pipeline executes.
        """

        # Create Mock dataset and wrap it in a KatdalAdapter
        ds = MockDataSet(timestamps=DEFAULT_TIMESTAMPS,
                         subarrays=DEFAULT_SUBARRAYS,
                         spws=self.spws,
                         dumps=self.scans)

        # Dummy CB_ID and Product ID and temp fits and aips disks
        fd = kc.get_config()['fitsdirs']
        fd += [(None, os.path.join(os.sep, 'tmp', 'FITS'))]
        kc.set_config(output_id='OID', cb_id='CBID', fitsdirs=fd)

        setup_aips_disks()

        # Create and run the pipeline
        pipeline = pipeline_factory('offline', ds,
                                    katdal_select=self.select,
                                    uvblavg_params=self.uvblavg_params,
                                    mfimage_params=self.mfimage_params,
                                    clobber=CLOBBER.difference({'merge'}))

        pipeline.execute()

        # Check that output FITS files exist and have the right names
        # Now check for files
        cfg = kc.get_config()
        cb_id = cfg['cb_id']
        out_id = cfg['output_id']
        fits_area = cfg['fitsdirs'][-1][1]

        out_strings = [cb_id, out_id, self.target_name, IMG_CLASS]
        filename = '_'.join(filter(None, out_strings)) + '.fits'
        filepath = os.path.join(fits_area, filename)
        assert os.path.isfile(filepath)
        _check_fits_headers(filepath)

        # Remove the tmp/FITS dir
        shutil.rmtree(fits_area)

        ds = MockDataSet(timestamps=DEFAULT_TIMESTAMPS,
                         subarrays=DEFAULT_SUBARRAYS,
                         spws=self.spws,
                         dumps=self.scans)

        setup_aips_disks()

        # Create and run the pipeline (Reusing the previous data)
        pipeline = pipeline_factory('offline', ds,
                                    katdal_select=self.select,
                                    uvblavg_params=self.uvblavg_params,
                                    mfimage_params=self.mfimage_params,
                                    reuse=True,
                                    clobber=CLOBBER)

        metadata = pipeline.execute()
        assert_in(filename, metadata['FITSImageFilename'])
        assert os.path.isfile(filepath)
        _check_fits_headers(filepath)

        # Remove FITS temporary area
        shutil.rmtree(fits_area)
Example #8
0
    def _test_export_implementation(self, export_type="uv_export", nif=1):
        """
        Implementation of export test. Tests export via
        either the :func:`katacomb.uv_export` or
        :func:`katacomb.pipeline_factory, depending on ``export_type``.

        When testing export via the Continuum Pipeline, baseline
        averaging is disabled.

        Parameters
        ----------
        export_type (optional): string
            Either ``"uv_export"`` or ``"continuum_export"``.
            Defaults to ``"uv_export"``
        nif (optional): nif
            Number of IFs to test splitting the band into
        """

        nchan = 16
        nvispio = 1024

        spws = [{
            'centre_freq': .856e9 + .856e9 / 2.,
            'num_chans': nchan,
            'channel_width': .856e9 / nchan,
            'sideband': 1,
            'band': 'L',
        }]

        target_names = random.sample(stars.keys(), 5)

        # Pick 5 random stars as targets
        targets = [katpoint.Target("%s, star" % t) for t in target_names]

        # Set up varying scans
        scans = [('slew', 1, targets[0]), ('track', 3, targets[0]),
                 ('slew', 2, targets[1]), ('track', 5, targets[1]),
                 ('slew', 1, targets[2]), ('track', 8, targets[2]),
                 ('slew', 2, targets[3]), ('track', 9, targets[3]),
                 ('slew', 1, targets[4]), ('track', 10, targets[4])]

        # Create Mock dataset and wrap it in a KatdalAdapter
        ds = MockDataSet(timestamps=DEFAULT_TIMESTAMPS,
                         subarrays=DEFAULT_SUBARRAYS,
                         spws=spws,
                         dumps=scans)

        KA = KatdalAdapter(ds)

        # Create a FAKE object
        FAKE = object()

        # Test that metadata agrees
        for k, v in DEFAULT_METADATA.items():
            self.assertEqual(v, getattr(KA, k, FAKE))

        # Setup the katdal selection, convert it to a string
        # accepted by our command line parser function, which
        # converts it back to a dict.
        select = {
            'scans': 'track',
            'corrprods': 'cross',
            'targets': target_names,
            'pol': 'HH,VV',
            'channels': slice(0, nchan), }
        assign_str = '; '.join('%s=%s' % (k, repr(v)) for k, v in select.items())
        select = parse_python_assigns(assign_str)

        # Add nif to selection
        if nif > 1:
            select['nif'] = nif

        # Perform the katdal selection
        KA.select(**select)

        # Obtain correlator products and produce argsorts that will
        # order by (a1, a2, stokes)
        cp = KA.correlator_products()
        nstokes = KA.nstokes

        # Lexicographically sort correlation products on (a1, a2, cid)
        def sort_fn(x): return (cp[x].ant1_ix, cp[x].ant2_ix, cp[x].cid)
        cp_argsort = np.asarray(sorted(range(len(cp)), key=sort_fn))

        # Use first stokes parameter index of each baseline
        bl_argsort = cp_argsort[::nstokes]

        # Get data shape after selection
        kat_ndumps, kat_nchans, kat_ncorrprods = KA.shape

        uv_file_path = AIPSPath('test', 1, 'test', 1)

        with obit_context(), file_cleaner([uv_file_path]):
            # Perform export of katdal selection via uv_export
            if export_type == "uv_export":
                with uv_factory(aips_path=uv_file_path, mode="w",
                                nvispio=nvispio,
                                table_cmds=KA.default_table_cmds(),
                                desc=KA.uv_descriptor()) as uvf:

                    uv_export(KA, uvf)
            # Perform export of katdal selection via ContinuumPipline
            elif export_type == "continuum_export":
                pipeline = pipeline_factory(export_type, KA.katdal,
                                            katdal_select=select,
                                            merge_scans=True)
                pipeline._select_and_infer_files()
                pipeline._export_and_merge_scans()

                uv_file_path = pipeline.uv_merge_path

                newselect = select.copy()
                newselect['reset'] = 'TFB'
                KA.select(**newselect)
            else:
                raise ValueError("Invalid export_type '%s'" % export_type)

            nvispio = 1

            # Now read from the AIPS UV file and sanity check
            with uv_factory(aips_path=uv_file_path,
                            mode="r",
                            nvispio=nvispio) as uvf:

                def _strip_strings(aips_keywords):
                    """ AIPS string are padded, strip them """
                    return {k: v.strip()
                            if isinstance(v, (str, bytes)) else v
                            for k, v in aips_keywords.items()}

                fq_kw = _strip_strings(uvf.tables["AIPS FQ"].keywords)
                src_kw = _strip_strings(uvf.tables["AIPS SU"].keywords)
                ant_kw = _strip_strings(uvf.tables["AIPS AN"].keywords)

                # Check that the subset of keywords generated
                # by the katdal adapter match those read from the AIPS table
                self.assertDictContainsSubset(KA.uv_spw_keywords, fq_kw)
                self.assertDictContainsSubset(KA.uv_source_keywords, src_kw)
                self.assertDictContainsSubset(KA.uv_antenna_keywords, ant_kw)

                def _strip_metadata(aips_table_rows):
                    """
                    Strip out ``Numfields``, ``_status``, ``Table name``
                    fields from each row entry
                    """
                    STRIP = {'NumFields', '_status', 'Table name'}
                    return [{k: v for k, v in d.items()
                             if k not in STRIP}
                            for d in aips_table_rows]

                # Check that frequency, source and antenna rows
                # are correctly exported
                fq_rows = _strip_metadata(uvf.tables["AIPS FQ"].rows)
                self.assertEqual(fq_rows, KA.uv_spw_rows)

                ant_rows = _strip_metadata(uvf.tables["AIPS AN"].rows)
                self.assertEqual(ant_rows, KA.uv_antenna_rows)

                # TODO(sjperkins)
                # For some reason, source radec and apparent radec
                # coordinates are off by some minor difference
                # Probably related to float32 conversion.
                if not export_type == "continuum_export":
                    src_rows = _strip_metadata(uvf.tables["AIPS SU"].rows)
                    self.assertEqual(src_rows, KA.uv_source_rows)

                uv_desc = uvf.Desc.Dict
                inaxes = tuple(reversed(uv_desc['inaxes'][:6]))
                naips_vis = uv_desc['nvis']
                summed_vis = 0

                # Number of random parameters
                nrparm = uv_desc['nrparm']
                # Length of visibility buffer record
                lrec = uv_desc['lrec']

                # Random parameter indices
                ilocu = uv_desc['ilocu']     # U
                ilocv = uv_desc['ilocv']     # V
                ilocw = uv_desc['ilocw']     # W
                iloct = uv_desc['iloct']     # time
                ilocsu = uv_desc['ilocsu']   # source id

                # Sanity check the UV descriptor inaxes
                uv_nra, uv_ndec, uv_nif, uv_nchans, uv_nstokes, uv_viscomp = inaxes

                self.assertEqual(uv_nchans * uv_nif, kat_nchans,
                                 "Number of AIPS and katdal channels differ")
                self.assertEqual(uv_viscomp, 3,
                                 "Number of AIPS visibility components")
                self.assertEqual(uv_nra, 1,
                                 "RA should be 1")
                self.assertEqual(uv_ndec, 1,
                                 "DEC should be 1")
                self.assertEqual(uv_nif, nif,
                                 "NIF should be %d" % (nif))

                # Compare AIPS and katdal scans
                aips_scans = uvf.tables["AIPS NX"].rows
                katdal_scans = list(KA.scans())

                # Must have same number of scans
                self.assertEqual(len(aips_scans), len(katdal_scans))

                # Iterate through the katdal scans
                for i, (si, state, target) in enumerate(KA.scans()):
                    self.assertTrue(state in select['scans'])

                    kat_ndumps, kat_nchans, kat_ncorrprods = KA.shape

                    # Was is the expected source ID?
                    expected_source = np.float32(target['ID. NO.'][0])

                    # Work out start, end and length of the scan
                    # in visibilities
                    aips_scan = aips_scans[i]
                    start_vis = aips_scan['START VIS'][0]
                    last_vis = aips_scan['END VIS'][0]
                    naips_scan_vis = last_vis - start_vis + 1

                    summed_vis += naips_scan_vis

                    # Each AIPS visibility has dimension [1,1,1,nchan,nstokes,3]
                    # and one exists for each timestep and baseline
                    # Ensure that the number of visibilities equals
                    # number of dumps times number of baselines
                    self.assertEqual(naips_scan_vis,
                                     kat_ndumps*kat_ncorrprods//uv_nstokes,
                                     'Mismatch in number of visibilities in scan %d' % si)

                    # Accumulate UVW, time data from the AIPS UV file
                    # By convention uv_export's data in (ntime, nbl)
                    # ordering, so we assume that the AIPS UV data
                    # is ordered the same way
                    u_data = []
                    v_data = []
                    w_data = []
                    time_data = []
                    vis_data = []

                    # For each visibility in the scan, read data and
                    # compare with katdal observation data
                    for firstVis in range(start_vis, last_vis+1, nvispio):
                        # Determine number of visibilities to read
                        numVisBuff = min(last_vis+1-firstVis, nvispio)

                        desc = uvf.Desc.Dict
                        desc.update(numVisBuff=numVisBuff)
                        uvf.Desc.Dict = desc

                        # Read a visibility
                        uvf.Read(firstVis=firstVis)
                        buf = uvf.np_visbuf

                        # Must copy because buf data will change with each read
                        u_data.append(buf[ilocu:lrec*numVisBuff:lrec].copy())
                        v_data.append(buf[ilocv:lrec*numVisBuff:lrec].copy())
                        w_data.append(buf[ilocw:lrec*numVisBuff:lrec].copy())
                        time_data.append(buf[iloct:lrec*numVisBuff:lrec].copy())

                        for i in range(numVisBuff):
                            base = nrparm + i*lrec
                            data = buf[base:base+lrec-nrparm].copy()
                            data = data.reshape(inaxes)
                            vis_data.append(data)

                        # Check that we're dealing with the same source
                        # within the scan
                        sources = buf[ilocsu:lrec*numVisBuff:lrec].copy()
                        self.assertEqual(sources, expected_source)

                    # Ensure katdal timestamps match AIPS UV file timestamps
                    # and that there are exactly number of baseline counts
                    # for each one
                    times, time_counts = np.unique(time_data, return_counts=True)
                    timestamps = KA.uv_timestamps[:].astype(np.float32)
                    self.assertTrue(np.all(times == timestamps))
                    self.assertTrue(np.all(time_counts == len(bl_argsort)))

                    # Flatten AIPS UVW data, there'll be (ntime*nbl) values
                    u_data = np.concatenate(u_data).ravel()
                    v_data = np.concatenate(v_data).ravel()
                    w_data = np.concatenate(w_data).ravel()

                    # uv_u will have shape (ntime, ncorrprods)
                    # Select katdal stokes 0 UVW coordinates and flatten
                    uv_u = KA.uv_u[:, bl_argsort].astype(np.float32).ravel()
                    uv_v = KA.uv_v[:, bl_argsort].astype(np.float32).ravel()
                    uv_w = KA.uv_w[:, bl_argsort].astype(np.float32).ravel()

                    # Confirm UVW coordinate equality
                    self.assertTrue(np.all(uv_u == u_data))
                    self.assertTrue(np.all(uv_v == v_data))
                    self.assertTrue(np.all(uv_w == w_data))

                    # Number of baselines
                    nbl = len(bl_argsort)

                    # Now compare visibility data

                    # Stacking produces
                    # (ntime*nbl, nra, ndec, nif, nchan, nstokes, 3)
                    aips_vis = np.stack(vis_data, axis=0)
                    kat_vis = KA.uv_vis[:]

                    shape = (kat_ndumps, kat_nchans, nbl, nstokes, 3)
                    # This produces (ntime, nchan, nbl, nstokes, 3)
                    kat_vis = kat_vis[:, :, cp_argsort, :].reshape(shape)

                    # (1) transpose so that we have (ntime, nbl, nchan, nstokes, 3)
                    # (2) reshape to include the full inaxes shape,
                    #     including singleton nif, ra and dec dimensions
                    kat_vis = (kat_vis.transpose(0, 2, 1, 3, 4)
                               .reshape((kat_ndumps, nbl,) + inaxes))

                    aips_vis = aips_vis.reshape((kat_ndumps, nbl) + inaxes)

                    self.assertTrue(np.all(aips_vis == kat_vis))

                # Check that we read the expected number of visibilities
                self.assertEqual(summed_vis, naips_vis)
Example #9
0
def main():
    parser = create_parser()
    args = parser.parse_args()
    configure_logging(args)
    log.info("Reading data with applycal=%s", args.applycal)
    katdata = katdal.open(args.katdata,
                          applycal=args.applycal,
                          **args.open_args)

    # Apply the supplied mask to the flags
    if args.mask:
        apply_user_mask(katdata, args.mask)

    # Set up katdal selection based on arguments
    kat_select = {'pol': args.pols, 'nif': args.nif}

    if args.targets:
        kat_select['targets'] = args.targets
    if args.channels:
        start_chan, end_chan = args.channels
        kat_select['channels'] = slice(start_chan, end_chan)

    # Command line katdal selection overrides command line options
    kat_select = recursive_merge(args.select, kat_select)

    # Get band and determine default .yaml files
    band = katdata.spectral_windows[katdata.spw].band
    uvblavg_parm_file = args.uvblavg_config
    if not uvblavg_parm_file:
        uvblavg_parm_file = os.path.join(os.sep, "obitconf",
                                         f"uvblavg_{band}.yaml")
    log.info('UVBlAvg parameter file for %s-band: %s', band, uvblavg_parm_file)
    mfimage_parm_file = args.mfimage_config
    if not mfimage_parm_file:
        mfimage_parm_file = os.path.join(os.sep, "obitconf",
                                         f"mfimage_{band}.yaml")
    log.info('MFImage parameter file for %s-band: %s', band, mfimage_parm_file)

    # Get defaults for uvblavg and mfimage and merge user supplied ones
    uvblavg_args = get_and_merge_args(uvblavg_parm_file, args.uvblavg)
    mfimage_args = get_and_merge_args(mfimage_parm_file, args.mfimage)

    # Grab the cal refant from the katdal dataset and default to
    # it if it is available and hasn't been set by the user.
    ts = katdata.source.telstate
    refant = ts.get('cal_refant')
    if refant is not None and 'refAnt' not in mfimage_args:
        mfimage_args['refAnt'] = aips_ant_nr(refant)

    # Try and always average down to 1024 channels if the user
    # hasn't specified something else
    num_chans = len(katdata.channels)
    factor = num_chans // 1024
    if 'avgFreq' not in uvblavg_args:
        if factor > 1:
            uvblavg_args['avgFreq'] = 1
            uvblavg_args['chAvg'] = factor

    # Get the default config.
    dc = kc.get_config()

    # capture_block_id is used to generate AIPS disk filenames
    capture_block_id = katdata.obs_params['capture_block_id']

    if args.reuse:
        # Set up AIPS disk from specified directory
        if os.path.exists(args.reuse):
            aipsdirs = [(None, args.reuse)]
            log.info('Re-using AIPS data area: %s', aipsdirs[0][1])
            reuse = True
        else:
            msg = "AIPS disk at '%s' does not exist." % (args.reuse)
            log.exception(msg)
            raise IOError(msg)
    else:
        # Set up aipsdisk configuration from args.workdir
        aipsdirs = [(None,
                     os.path.join(args.workdir,
                                  capture_block_id + '_aipsdisk'))]
        log.info('Using AIPS data area: %s', aipsdirs[0][1])
        reuse = False

    # Set up output configuration from args.outputdir
    fitsdirs = dc['fitsdirs']

    # Append outputdir to fitsdirs
    fitsdirs += [(None, args.outputdir)]
    log.info('Using output data area: %s', args.outputdir)

    kc.set_config(aipsdirs=aipsdirs,
                  fitsdirs=fitsdirs,
                  output_id='',
                  cb_id=capture_block_id)

    setup_aips_disks()

    pipeline = pipeline_factory('offline',
                                katdata,
                                katdal_select=kat_select,
                                uvblavg_params=uvblavg_args,
                                mfimage_params=mfimage_args,
                                nvispio=args.nvispio,
                                clobber=args.clobber,
                                prtlv=args.prtlv,
                                reuse=reuse)

    # Execute it
    pipeline.execute()