Ejemplo n.º 1
0
 def test_match_red_baselines(self):
     model = copy.deepcopy(self.data)
     model = DataContainer(
         odict([((k[0] + 1, k[1] + 1, k[2]), model[k])
                for i, k in enumerate(model.keys())]))
     del model[(25, 54, 'xx')]
     model_antpos = odict([(k + 1, self.antpos[k])
                           for i, k in enumerate(self.antpos.keys())])
     new_model = hc.abscal.match_red_baselines(model,
                                               model_antpos,
                                               self.data,
                                               self.antpos,
                                               tol=2.0,
                                               verbose=False)
     nt.assert_equal(len(new_model.keys()), 8)
     nt.assert_true((24, 37, 'xx') in new_model)
     nt.assert_false((24, 53, 'xx') in new_model)
Ejemplo n.º 2
0
    def _solver(self,
                solver,
                data,
                wgts={},
                detrend_phs=False,
                sparse=False,
                **kwargs):
        """Instantiates a linsolve solver for performing redcal.

        Args:
            solver: linsolve solver (e.g. linsolve.LogProductSolver or linsolve.LinProductSolver)
            data: visibility data in the dictionary format {(ant1,ant2,pol): np.array}
            wgts: dictionary of linear weights in the same format as data. Defaults to equal wgts.
            detrend_phs: takes out average phase, useful for logcal
            sparse: represent the A matrix (visibilities to parameters) sparsely in linsolve
            **kwargs: other keyword arguments passed into the solver for use by linsolve

        Returns:
            solver: instantiated solver with redcal equations and weights
        """

        dc = DataContainer(data)
        eqs = self.build_eqs(dc.keys())
        self.phs_avg = {
        }  # detrend phases within redundant group, used for logcal to avoid phase wraps
        if detrend_phs:
            for blgrp in self.reds:
                self.phs_avg[blgrp[0]] = np.exp(-1j * np.median(np.unwrap(
                    [np.log(dc[bl]).imag for bl in blgrp], axis=0),
                                                                axis=0))
                for bl in blgrp:
                    self.phs_avg[bl] = self.phs_avg[blgrp[0]]
        d_ls, w_ls = {}, {}
        for eq, key in eqs.items():
            d_ls[eq] = dc[key] * self.phs_avg.get(key, 1)
        if len(wgts) > 0:
            wc = DataContainer(wgts)
            for eq, key in eqs.items():
                w_ls[eq] = wc[key]
        return solver(data=d_ls, wgts=w_ls, sparse=sparse, **kwargs)
Ejemplo n.º 3
0
    def test_recalibrate_in_place(self):
        np.random.seed(21)
        vis = np.random.randn(10, 10) + 1.0j * np.random.randn(10, 10)
        dc = DataContainer({(0, 1, 'xx'): deepcopy(vis)})
        f = np.random.randn(10, 10) > 0
        flags = DataContainer({(0, 1, 'xx'): deepcopy(f)})
        g0_new = np.random.randn(10, 10) + 1.0j * np.random.randn(10, 10)
        g1_new = np.random.randn(10, 10) + 1.0j * np.random.randn(10, 10)
        g_new = {(0, 'x'): g0_new, (1, 'x'): g1_new}
        g0_old = np.random.randn(10, 10) + 1.0j * np.random.randn(10, 10)
        g1_old = np.random.randn(10, 10) + 1.0j * np.random.randn(10, 10)
        g_old = {(0, 'x'): g0_old, (1, 'x'): g1_old}
        cal_flags = {
            (0, 'x'): np.random.randn(10, 10) > 0,
            (1, 'x'): np.random.randn(10, 10) > 0
        }
        # test standard operation
        ac.recalibrate_in_place(dc,
                                flags,
                                g_new,
                                cal_flags,
                                old_gains=g_old,
                                gain_convention='divide')
        for i in range(10):
            for j in range(10):
                self.assertAlmostEqual(
                    dc[(0, 1, 'xx')][i, j],
                    vis[i, j] * g0_old[i, j] * np.conj(g1_old[i, j]) /
                    g0_new[i, j] / np.conj(g1_new[i, j]))
                if f[i, j] or cal_flags[(0, 'x')][i,
                                                  j] or cal_flags[(1, 'x')][i,
                                                                            j]:
                    self.assertTrue(flags[(0, 1, 'xx')][i, j])
                else:
                    self.assertFalse(flags[(0, 1, 'xx')][i, j])

        # test without old cal
        dc = DataContainer({(0, 1, 'xx'): deepcopy(vis)})
        flags = DataContainer({(0, 1, 'xx'): deepcopy(f)})
        ac.recalibrate_in_place(dc,
                                flags,
                                g_new,
                                cal_flags,
                                gain_convention='divide')
        for i in range(10):
            for j in range(10):
                self.assertAlmostEqual(
                    dc[(0, 1, 'xx')][i, j],
                    vis[i, j] / g0_new[i, j] / np.conj(g1_new[i, j]))

        # test multiply
        dc = DataContainer({(0, 1, 'xx'): deepcopy(vis)})
        flags = DataContainer({(0, 1, 'xx'): deepcopy(f)})
        ac.recalibrate_in_place(dc,
                                flags,
                                g_new,
                                cal_flags,
                                old_gains=g_old,
                                gain_convention='multiply')
        for i in range(10):
            for j in range(10):
                self.assertAlmostEqual(
                    dc[(0, 1, 'xx')][i, j],
                    vis[i, j] / g0_old[i, j] / np.conj(g1_old[i, j]) *
                    g0_new[i, j] * np.conj(g1_new[i, j]))

        # test flag propagation when missing antennas in gains
        dc = DataContainer({(0, 1, 'xx'): deepcopy(vis)})
        flags = DataContainer({(0, 1, 'xx'): deepcopy(f)})
        ac.recalibrate_in_place(dc,
                                flags, {},
                                cal_flags,
                                gain_convention='divide')
        np.testing.assert_array_equal(flags[(0, 1, 'xx')], True)
        dc = DataContainer({(0, 1, 'xx'): deepcopy(vis)})
        flags = DataContainer({(0, 1, 'xx'): deepcopy(f)})
        ac.recalibrate_in_place(dc,
                                flags,
                                g_new,
                                cal_flags,
                                old_gains={},
                                gain_convention='divide')
        np.testing.assert_array_equal(flags[(0, 1, 'xx')], True)

        # test error
        dc = DataContainer({(0, 1, 'xx'): deepcopy(vis)})
        flags = DataContainer({(0, 1, 'xx'): deepcopy(f)})
        with self.assertRaises(KeyError):
            ac.recalibrate_in_place(dc,
                                    flags,
                                    g_new,
                                    cal_flags,
                                    old_gains=g_old,
                                    gain_convention='blah')

        # test w/ data weights
        dc = DataContainer({(0, 1, 'xx'): deepcopy(vis)})
        flags = DataContainer({(0, 1, 'xx'): deepcopy(f)})
        wgts = DataContainer(
            dict(map(lambda k: (k, (~flags[k]).astype(np.float)),
                     flags.keys())))
        del g_new[(0, 'x')]
        ac.recalibrate_in_place(dc,
                                wgts,
                                g_new,
                                cal_flags,
                                gain_convention='divide')
        self.assertAlmostEqual(wgts[(0, 1, 'xx')].max(), 0.0)