예제 #1
0
    def test_empty_dataset(self):
        """Test that a completely flagged dataset is exported without error"""
        nchan = 16

        spws = [{
            'centre_freq': .856e9 + .856e9 / 2.,
            'num_chans': nchan,
            'channel_width': .856e9 / nchan,
            'sideband': 1,
            'band': 'L',
        }]

        targets = [katpoint.Target("Flosshilde, radec, 0.0, -30.0")]

        # Set up a scan
        scans = [('track', 10, targets[0])]

        # Flag the data
        def mock_flags(dataset):
            return np.ones(dataset.shape, dtype=np.bool)

        # Create Mock dataset and wrap it in a KatdalAdapter
        ds = MockDataSet(timestamps=DEFAULT_TIMESTAMPS,
                         subarrays=DEFAULT_SUBARRAYS,
                         spws=spws,
                         dumps=scans,
                         flags=mock_flags)

        with obit_context():
            pipeline = pipeline_factory('offline', ds)
            pipeline._select_and_infer_files()
            pipeline._export_and_merge_scans()
예제 #2
0
    def test_SN_to_telstate(self):
        """Check conversion of SN table"""
        ant_ordering = ['m000', 'm001', 'm002', 'm003', 'm004', 'm005']
        # Make a dummy AIPS UV and attach an SN Table
        with obit_context():
            nif = 1
            ap = AIPSPath("Flosshilde")
            rows = construct_SN_default_rows([0.5], [1, 2, 3, 4, 5, 6], nif)
            # Modify some gains
            rows[1]['REAL1'] = [AIPS_NAN]
            rows[2]['REAL1'] = rows[2]['REAL2'] = [AIPS_NAN]
            rows[3]['REAL1'], rows[3]['WEIGHT 1'] = ([AIPS_NAN], [-1.0])
            rows[4]['WEIGHT 1'] = rows[4]['WEIGHT 2'] = [-1.0]
            rows[5]['REAL1'] = rows[5]['REAL2'] = [AIPS_NAN]
            rows[5]['IMAG1'] = rows[5]['IMAG2'] = [AIPS_NAN]
            rows[5]['WEIGHT 1'] = rows[5]['WEIGHT 2'] = [-1.0]
            sn_tab_desc = construct_SN_desc(nif, rows)
            uvf = uv_factory(aips_path=ap, mode="w", table_cmds=sn_tab_desc)
            sntab = uvf.tables["AIPS SN"]
            ts, result = _massage_gains(sntab, ant_ordering)
            # Do the gains and timestamps have the right values/shapes
            self.assertEqual(ts, [0.5])
            self.assertEqual(len(ts), len(result))
            self.assertEqual(result[0].shape, (nif, 2, len(ant_ordering)))
            expected_result = np.full((1, 2, len(ant_ordering)), 1.+1.j, dtype=np.complex64)
            expected_result[0, 0, 1] = AIPS_NAN + 1.j
            expected_result[0, :, 2] = AIPS_NAN + 1.j
            expected_result[0, 0, 3] = NP_NAN
            expected_result[0, :, 5] = NP_NAN
            np.testing.assert_array_equal(result[0], expected_result)

            # Change ntimes, nif and antennas and recheck shapes
            nif = 8
            ntimes = 5
            rows = construct_SN_default_rows(np.linspace(0., 1., ntimes), [1, 3, 5, 6], nif)
            sn_tab_desc = construct_SN_desc(nif, rows)
            uvf = uv_factory(aips_path=ap, mode="w", table_cmds=sn_tab_desc)
            sntab = uvf.tables["AIPS SN"]
            ts, result = _massage_gains(sntab, ant_ordering)
            np.testing.assert_array_equal(ts, np.linspace(0., 1., ntimes))
            self.assertEqual(len(ts), len(result))
            self.assertEqual(result[0].shape, (nif, 2, len(ant_ordering)))
            # Are the missing antennas nans?
            np.testing.assert_array_equal(result[0][:, :, [1, 3]], NP_NAN)

            # Empty SN table should return empty lists
            sn_tab_desc = construct_SN_desc(8, [])
            uvf = uv_factory(aips_path=ap, mode="w", table_cmds=sn_tab_desc)
            sntab = uvf.tables["AIPS SN"]
            ts, result = _massage_gains(sntab, ant_ordering)
            self.assertEqual(ts, [])
            self.assertEqual(result, [])
예제 #3
0
    def test_next_seq_nr(self):
        """ Test finding the next highest disk sequence number of an AIPS Path """

        # Create two AIPS paths, one with with sequence number 10 and 11
        p1 = AIPSPath(name='test', disk=1, aclass="klass", seq=10)
        p2 = p1.copy(seq=p1.seq+1)

        with obit_context(), file_cleaner([p1, p2]):
            # Create the first file and test highest sequence number
            with uv_factory(aips_path=p1, mode="w"):
                pass
            self.assertEqual(next_seq_nr(p1), p1.seq+1)

            # Create the second file and test highest sequence number
            with uv_factory(aips_path=p2, mode="w"):
                pass
            self.assertEqual(next_seq_nr(p1), p1.seq+2)
            self.assertEqual(next_seq_nr(p2), p2.seq+1)
예제 #4
0
    def test_uv_facade_read_write(self):
        """
        Test basic reads and writes the AIPS UV Facade
        """
        nvis = 577  # Read/write this many visibilities, total
        nvispio = 20  # Read/write this many visibilities per IO op
        uv_file_path = AIPSPath('test', 1, 'test', 1)

        # Set up the spectral window
        nchan = 4

        spws = [{
            'centre_freq': .856e9 + .856e9 / 2.,
            'num_chans': nchan,
            'channel_width': .856e9 / nchan,
            'sideband': 1,
            'band': 'L',
        }]

        # Use first four antenna to create the subarray
        subarrays = [{'antenna': ANTENNA_DESCRIPTIONS[:4]}]

        # Pick 5 random stars as targets
        targets = [
            katpoint.Target("%s, star" % t)
            for t in random.sample(stars.keys(), 5)
        ]

        # track for 5 on each target
        slew_track_dumps = (('track', 5), )
        scans = [(e, nd, t) for t in targets for e, nd in slew_track_dumps]

        # Create Mock dataset and wrap it in a KatdalAdapter
        KA = KatdalAdapter(
            MockDataSet(timestamps=DEFAULT_TIMESTAMPS,
                        subarrays=subarrays,
                        spws=spws,
                        dumps=scans))

        with obit_context(), file_cleaner(uv_file_path):
            # Create the UV file
            with uv_factory(aips_path=uv_file_path,
                            mode="w",
                            nvispio=nvispio,
                            table_cmds=KA.default_table_cmds(),
                            desc=KA.uv_descriptor()) as uvf:

                uv_desc = uvf.Desc.Dict

                # Length of visibility buffer record
                lrec = uv_desc['lrec']
                # Random parameter indices
                iloct = uv_desc['iloct']  # time

                # Write out visibilities, putting sequential values
                # in the time random parameter
                for firstVis in range(1, nvis + 1, nvispio):
                    numVisBuff = min(nvis + 1 - firstVis, nvispio)

                    uv_desc = uvf.Desc.Dict
                    uv_desc['numVisBuff'] = numVisBuff
                    uvf.Desc.Dict = uv_desc

                    times = np.arange(firstVis,
                                      firstVis + numVisBuff,
                                      dtype=np.float32)

                    buf = uvf.np_visbuf
                    buf[iloct:lrec * numVisBuff:lrec] = times
                    uvf.Write(firstVis=firstVis)

            # Now re-open in readonly mode and test
            # that we get the same sequential values out
            with uv_factory(aips_path=uv_file_path, mode="r",
                            nvispio=nvispio) as uvf:

                uv_desc = uvf.Desc.Dict

                # Length of visibility buffer record
                lrec = uv_desc['lrec']
                nvis = uv_desc['nvis']
                # Random parameter indices
                iloct = uv_desc['iloct']  # time

                for firstVis in range(1, nvis + 1, nvispio):
                    numVisBuff = min(nvis + 1 - firstVis, nvispio)

                    uv_desc = uvf.Desc.Dict
                    uv_desc['numVisBuff'] = numVisBuff
                    uvf.Desc.Dict = uv_desc

                    uvf.Read(firstVis=firstVis)
                    buf = uvf.np_visbuf

                    times = np.arange(firstVis,
                                      firstVis + numVisBuff,
                                      dtype=np.float32)
                    buf_times = buf[iloct:lrec * numVisBuff:lrec]
                    self.assertTrue(np.all(times == buf_times))
예제 #5
0
 def execute(self):
     with obit_context(), self as ctx:
         return ctx.execute_implementation()
예제 #6
0
    def test_table_versions(self):
        """Check correct SN table versions are exported to telstate"""
        # Make a dummy AIPS UV and attach some SN Tables
        with obit_context():
            nif = 1
            ap = AIPSPath("Woglinde")
            # Make a list of 4 SN tables with gains equal to version number
            row = partial(construct_SN_default_rows, [0.5], [1], nif)
            sn_tables = [construct_SN_desc(nif, row(gain=float(ver)), version=ver)
                         for ver in range(1, 5)]
            # Create empty UV object and attach the tables
            for sn in sn_tables:
                uvf = uv_factory(aips_path=ap, mode="w", table_cmds=sn)
                # Flush the added table to disk
                uvf.Close()

            # Dummy telstate
            ts = TelescopeState()
            AP_telstate = ts.join('selfcal', 'product_GAMP_PHASE')
            P_telstate = ts.join('selfcal', 'product_GPHASE')
            # A basic MockDataSet
            spw = [{'centre_freq': 1200.e6,
                    'num_chans': 1,
                    'channel_width': 1.e6}]
            targ = katpoint.construct_radec_target(0., 0.)
            scan = [('track', 1, targ)]
            suba = {}
            # Only need 1 antenna
            suba['antenna'] = DEFAULT_SUBARRAYS[0]['antenna'][0:1]
            ka = KatdalAdapter(MockDataSet(spws=spw, dumps=scan, subarrays=[suba]))

            # Fake input parameters with 2 phase and 2 amp+phase self-cal
            mfimage_parms = {'maxPSCLoop': 2,
                             'maxASCLoop': 2}
            export_calibration_solutions([ap], ka, mfimage_parms, ts)
            # Should have solns from SN:2 in 'product_GPHASE'
            # and from SN:4 in 'product_GAMP_PHASE'
            self.assertEqual(ts[P_telstate][0, 0, 0], 2.+2.j)
            self.assertEqual(ts[AP_telstate][0, 0, 0], 4.+4.j)
            ts.clear()

            # Fake input parameters with 2 phase and 3 amp+phase self-cal
            mfimage_parms = {'maxPSCLoop': 2,
                             'maxASCLoop': 3}
            export_calibration_solutions([ap], ka, mfimage_parms, ts)
            # Should have solns from SN:2 in 'product_GPHASE'
            # and from SN:4 in 'product_GAMP_PHASE'
            self.assertEqual(ts[P_telstate][0, 0, 0], 2.+2.j)
            self.assertEqual(ts[AP_telstate][0, 0, 0], 4.+4.j)
            ts.clear()

            # Fake input parameters with 5 phase and 1 amp+phase self-cal
            mfimage_parms = {'maxPSCLoop': 5,
                             'maxASCLoop': 1}
            export_calibration_solutions([ap], ka, mfimage_parms, ts)
            # Should have solns from SN:4 in 'product_GPHASE'
            # and no solutions in 'product_GAMP_PHASE'
            self.assertEqual(ts[P_telstate][0, 0, 0], 4.+4.j)
            self.assertNotIn(AP_telstate, ts.keys())
            ts.clear()

            # Fake input parameters with 4 phase and 0 amp+phase self-cal
            mfimage_parms = {'maxPSCLoop': 4,
                             'maxASCLoop': 0}
            export_calibration_solutions([ap], ka, mfimage_parms, ts)
            # Should have solns from SN:4 in 'product_GPHASE'
            # and no solutions in 'product_GAMP_PHASE'
            self.assertEqual(ts[P_telstate][0, 0, 0], 4.+4.j)
            self.assertNotIn(AP_telstate, ts.keys())
            ts.clear()
            uvf.Zap()

            # Check that nothing is exported when the AIPS SN table versions
            # are not sequential starting from 1
            for sn in sn_tables[1:4:2]:
                uvf = uv_factory(aips_path=ap, mode="w", table_cmds=sn)
                # Flush the added table to disk
                uvf.Close()

            mfimage_parms = {'maxPSCLoop': 2,
                             'maxASCLoop': 2}
            export_calibration_solutions([ap], ka, mfimage_parms, ts)
            # Should have no solutions in 'product_GPHASE'
            # and no solutions in 'product_GAMP_PHASE'
            self.assertNotIn(P_telstate, ts.keys())
            self.assertNotIn(AP_telstate, ts.keys())
            ts.clear()
            uvf.Zap()
예제 #7
0
    def _test_export_implementation(self, export_type="uv_export", nif=1):
        """
        Implementation of export test. Tests export via
        either the :func:`katacomb.uv_export` or
        :func:`katacomb.pipeline_factory, depending on ``export_type``.

        When testing export via the Continuum Pipeline, baseline
        averaging is disabled.

        Parameters
        ----------
        export_type (optional): string
            Either ``"uv_export"`` or ``"continuum_export"``.
            Defaults to ``"uv_export"``
        nif (optional): nif
            Number of IFs to test splitting the band into
        """

        nchan = 16
        nvispio = 1024

        spws = [{
            'centre_freq': .856e9 + .856e9 / 2.,
            'num_chans': nchan,
            'channel_width': .856e9 / nchan,
            'sideband': 1,
            'band': 'L',
        }]

        target_names = random.sample(stars.keys(), 5)

        # Pick 5 random stars as targets
        targets = [katpoint.Target("%s, star" % t) for t in target_names]

        # Set up varying scans
        scans = [('slew', 1, targets[0]), ('track', 3, targets[0]),
                 ('slew', 2, targets[1]), ('track', 5, targets[1]),
                 ('slew', 1, targets[2]), ('track', 8, targets[2]),
                 ('slew', 2, targets[3]), ('track', 9, targets[3]),
                 ('slew', 1, targets[4]), ('track', 10, targets[4])]

        # Create Mock dataset and wrap it in a KatdalAdapter
        ds = MockDataSet(timestamps=DEFAULT_TIMESTAMPS,
                         subarrays=DEFAULT_SUBARRAYS,
                         spws=spws,
                         dumps=scans)

        KA = KatdalAdapter(ds)

        # Create a FAKE object
        FAKE = object()

        # Test that metadata agrees
        for k, v in DEFAULT_METADATA.items():
            self.assertEqual(v, getattr(KA, k, FAKE))

        # Setup the katdal selection, convert it to a string
        # accepted by our command line parser function, which
        # converts it back to a dict.
        select = {
            'scans': 'track',
            'corrprods': 'cross',
            'targets': target_names,
            'pol': 'HH,VV',
            'channels': slice(0, nchan), }
        assign_str = '; '.join('%s=%s' % (k, repr(v)) for k, v in select.items())
        select = parse_python_assigns(assign_str)

        # Add nif to selection
        if nif > 1:
            select['nif'] = nif

        # Perform the katdal selection
        KA.select(**select)

        # Obtain correlator products and produce argsorts that will
        # order by (a1, a2, stokes)
        cp = KA.correlator_products()
        nstokes = KA.nstokes

        # Lexicographically sort correlation products on (a1, a2, cid)
        def sort_fn(x): return (cp[x].ant1_ix, cp[x].ant2_ix, cp[x].cid)
        cp_argsort = np.asarray(sorted(range(len(cp)), key=sort_fn))

        # Use first stokes parameter index of each baseline
        bl_argsort = cp_argsort[::nstokes]

        # Get data shape after selection
        kat_ndumps, kat_nchans, kat_ncorrprods = KA.shape

        uv_file_path = AIPSPath('test', 1, 'test', 1)

        with obit_context(), file_cleaner([uv_file_path]):
            # Perform export of katdal selection via uv_export
            if export_type == "uv_export":
                with uv_factory(aips_path=uv_file_path, mode="w",
                                nvispio=nvispio,
                                table_cmds=KA.default_table_cmds(),
                                desc=KA.uv_descriptor()) as uvf:

                    uv_export(KA, uvf)
            # Perform export of katdal selection via ContinuumPipline
            elif export_type == "continuum_export":
                pipeline = pipeline_factory(export_type, KA.katdal,
                                            katdal_select=select,
                                            merge_scans=True)
                pipeline._select_and_infer_files()
                pipeline._export_and_merge_scans()

                uv_file_path = pipeline.uv_merge_path

                newselect = select.copy()
                newselect['reset'] = 'TFB'
                KA.select(**newselect)
            else:
                raise ValueError("Invalid export_type '%s'" % export_type)

            nvispio = 1

            # Now read from the AIPS UV file and sanity check
            with uv_factory(aips_path=uv_file_path,
                            mode="r",
                            nvispio=nvispio) as uvf:

                def _strip_strings(aips_keywords):
                    """ AIPS string are padded, strip them """
                    return {k: v.strip()
                            if isinstance(v, (str, bytes)) else v
                            for k, v in aips_keywords.items()}

                fq_kw = _strip_strings(uvf.tables["AIPS FQ"].keywords)
                src_kw = _strip_strings(uvf.tables["AIPS SU"].keywords)
                ant_kw = _strip_strings(uvf.tables["AIPS AN"].keywords)

                # Check that the subset of keywords generated
                # by the katdal adapter match those read from the AIPS table
                self.assertDictContainsSubset(KA.uv_spw_keywords, fq_kw)
                self.assertDictContainsSubset(KA.uv_source_keywords, src_kw)
                self.assertDictContainsSubset(KA.uv_antenna_keywords, ant_kw)

                def _strip_metadata(aips_table_rows):
                    """
                    Strip out ``Numfields``, ``_status``, ``Table name``
                    fields from each row entry
                    """
                    STRIP = {'NumFields', '_status', 'Table name'}
                    return [{k: v for k, v in d.items()
                             if k not in STRIP}
                            for d in aips_table_rows]

                # Check that frequency, source and antenna rows
                # are correctly exported
                fq_rows = _strip_metadata(uvf.tables["AIPS FQ"].rows)
                self.assertEqual(fq_rows, KA.uv_spw_rows)

                ant_rows = _strip_metadata(uvf.tables["AIPS AN"].rows)
                self.assertEqual(ant_rows, KA.uv_antenna_rows)

                # TODO(sjperkins)
                # For some reason, source radec and apparent radec
                # coordinates are off by some minor difference
                # Probably related to float32 conversion.
                if not export_type == "continuum_export":
                    src_rows = _strip_metadata(uvf.tables["AIPS SU"].rows)
                    self.assertEqual(src_rows, KA.uv_source_rows)

                uv_desc = uvf.Desc.Dict
                inaxes = tuple(reversed(uv_desc['inaxes'][:6]))
                naips_vis = uv_desc['nvis']
                summed_vis = 0

                # Number of random parameters
                nrparm = uv_desc['nrparm']
                # Length of visibility buffer record
                lrec = uv_desc['lrec']

                # Random parameter indices
                ilocu = uv_desc['ilocu']     # U
                ilocv = uv_desc['ilocv']     # V
                ilocw = uv_desc['ilocw']     # W
                iloct = uv_desc['iloct']     # time
                ilocsu = uv_desc['ilocsu']   # source id

                # Sanity check the UV descriptor inaxes
                uv_nra, uv_ndec, uv_nif, uv_nchans, uv_nstokes, uv_viscomp = inaxes

                self.assertEqual(uv_nchans * uv_nif, kat_nchans,
                                 "Number of AIPS and katdal channels differ")
                self.assertEqual(uv_viscomp, 3,
                                 "Number of AIPS visibility components")
                self.assertEqual(uv_nra, 1,
                                 "RA should be 1")
                self.assertEqual(uv_ndec, 1,
                                 "DEC should be 1")
                self.assertEqual(uv_nif, nif,
                                 "NIF should be %d" % (nif))

                # Compare AIPS and katdal scans
                aips_scans = uvf.tables["AIPS NX"].rows
                katdal_scans = list(KA.scans())

                # Must have same number of scans
                self.assertEqual(len(aips_scans), len(katdal_scans))

                # Iterate through the katdal scans
                for i, (si, state, target) in enumerate(KA.scans()):
                    self.assertTrue(state in select['scans'])

                    kat_ndumps, kat_nchans, kat_ncorrprods = KA.shape

                    # Was is the expected source ID?
                    expected_source = np.float32(target['ID. NO.'][0])

                    # Work out start, end and length of the scan
                    # in visibilities
                    aips_scan = aips_scans[i]
                    start_vis = aips_scan['START VIS'][0]
                    last_vis = aips_scan['END VIS'][0]
                    naips_scan_vis = last_vis - start_vis + 1

                    summed_vis += naips_scan_vis

                    # Each AIPS visibility has dimension [1,1,1,nchan,nstokes,3]
                    # and one exists for each timestep and baseline
                    # Ensure that the number of visibilities equals
                    # number of dumps times number of baselines
                    self.assertEqual(naips_scan_vis,
                                     kat_ndumps*kat_ncorrprods//uv_nstokes,
                                     'Mismatch in number of visibilities in scan %d' % si)

                    # Accumulate UVW, time data from the AIPS UV file
                    # By convention uv_export's data in (ntime, nbl)
                    # ordering, so we assume that the AIPS UV data
                    # is ordered the same way
                    u_data = []
                    v_data = []
                    w_data = []
                    time_data = []
                    vis_data = []

                    # For each visibility in the scan, read data and
                    # compare with katdal observation data
                    for firstVis in range(start_vis, last_vis+1, nvispio):
                        # Determine number of visibilities to read
                        numVisBuff = min(last_vis+1-firstVis, nvispio)

                        desc = uvf.Desc.Dict
                        desc.update(numVisBuff=numVisBuff)
                        uvf.Desc.Dict = desc

                        # Read a visibility
                        uvf.Read(firstVis=firstVis)
                        buf = uvf.np_visbuf

                        # Must copy because buf data will change with each read
                        u_data.append(buf[ilocu:lrec*numVisBuff:lrec].copy())
                        v_data.append(buf[ilocv:lrec*numVisBuff:lrec].copy())
                        w_data.append(buf[ilocw:lrec*numVisBuff:lrec].copy())
                        time_data.append(buf[iloct:lrec*numVisBuff:lrec].copy())

                        for i in range(numVisBuff):
                            base = nrparm + i*lrec
                            data = buf[base:base+lrec-nrparm].copy()
                            data = data.reshape(inaxes)
                            vis_data.append(data)

                        # Check that we're dealing with the same source
                        # within the scan
                        sources = buf[ilocsu:lrec*numVisBuff:lrec].copy()
                        self.assertEqual(sources, expected_source)

                    # Ensure katdal timestamps match AIPS UV file timestamps
                    # and that there are exactly number of baseline counts
                    # for each one
                    times, time_counts = np.unique(time_data, return_counts=True)
                    timestamps = KA.uv_timestamps[:].astype(np.float32)
                    self.assertTrue(np.all(times == timestamps))
                    self.assertTrue(np.all(time_counts == len(bl_argsort)))

                    # Flatten AIPS UVW data, there'll be (ntime*nbl) values
                    u_data = np.concatenate(u_data).ravel()
                    v_data = np.concatenate(v_data).ravel()
                    w_data = np.concatenate(w_data).ravel()

                    # uv_u will have shape (ntime, ncorrprods)
                    # Select katdal stokes 0 UVW coordinates and flatten
                    uv_u = KA.uv_u[:, bl_argsort].astype(np.float32).ravel()
                    uv_v = KA.uv_v[:, bl_argsort].astype(np.float32).ravel()
                    uv_w = KA.uv_w[:, bl_argsort].astype(np.float32).ravel()

                    # Confirm UVW coordinate equality
                    self.assertTrue(np.all(uv_u == u_data))
                    self.assertTrue(np.all(uv_v == v_data))
                    self.assertTrue(np.all(uv_w == w_data))

                    # Number of baselines
                    nbl = len(bl_argsort)

                    # Now compare visibility data

                    # Stacking produces
                    # (ntime*nbl, nra, ndec, nif, nchan, nstokes, 3)
                    aips_vis = np.stack(vis_data, axis=0)
                    kat_vis = KA.uv_vis[:]

                    shape = (kat_ndumps, kat_nchans, nbl, nstokes, 3)
                    # This produces (ntime, nchan, nbl, nstokes, 3)
                    kat_vis = kat_vis[:, :, cp_argsort, :].reshape(shape)

                    # (1) transpose so that we have (ntime, nbl, nchan, nstokes, 3)
                    # (2) reshape to include the full inaxes shape,
                    #     including singleton nif, ra and dec dimensions
                    kat_vis = (kat_vis.transpose(0, 2, 1, 3, 4)
                               .reshape((kat_ndumps, nbl,) + inaxes))

                    aips_vis = aips_vis.reshape((kat_ndumps, nbl) + inaxes)

                    self.assertTrue(np.all(aips_vis == kat_vis))

                # Check that we read the expected number of visibilities
                self.assertEqual(summed_vis, naips_vis)
예제 #8
0
                        "assignment statements to python "
                        "literals, separated by semi-colons")
    parser.add_argument("--blavg",
                        default=False,
                        action="store_true",
                        help="Apply baseline dependent averaging")
    return parser


setup_logging()

args = create_parser().parse_args()

KA = KatdalAdapter(katdal.open(args.katdata))

with obit_context():
    # Construct file object
    aips_path = KA.aips_path(name=args.name,
                             disk=args.disk,
                             aclass=args.aclass,
                             seq=args.seq,
                             dtype="AIPS")

    # Handle invalid sequence numbers
    if args.seq is None or args.seq < 1:
        aips_path.seq = next_seq_nr(aips_path)

    # Apply the katdal selection
    KA.select(**args.select)

    # Fall over on empty selections