def reds_from_file(filename, vis_format='miriad'): """Get the redundant baseline pairs from a file. This is a wrapper around hera_cal.redcal.get_pos_reds that doesn't read the data file if it's possible to only read metadata. Parameters ---------- filename : str The file to get reds from. vis_format : {'miriad', 'uvh5', 'uvfits', 'fhd', 'ms'}, optional Format of the data file. Default is 'miriad'. Returns ------- reds : list of lists of tuples Each tuple represents antenna pairs. These are compiled in a list within a redundant group, and the outer list is all the redundant groups. See hera_cal.redcal.get_pos_reds. """ from hera_cal.io import HERAData from hera_cal.redcal import get_pos_reds hd = HERAData(filename, filetype=vis_format) if hd.antpos is None: reds = get_pos_reds(hd.read()[0].antpos) else: reds = get_pos_reds(hd.antpos) del hd return reds
def test_get_blvec_reds(): fname = os.path.join(DATA_PATH, "zen.2458042.17772.xx.HH.uvXA") uvd = UVData() uvd.read_miriad(fname) antpos, ants = uvd.get_ENU_antpos(pick_data_ants=True) reds = redcal.get_pos_reds(dict(list(zip(ants, antpos)))) uvp = testing.uvpspec_from_data(fname, reds[:2], spw_ranges=[(10, 40)]) # test execution w/ dictionary blvecs = dict(list(zip(uvp.bl_array, uvp.get_ENU_bl_vecs()))) (red_bl_grp, red_bl_len, red_bl_ang, red_bl_tag) = utils.get_blvec_reds(blvecs, bl_error_tol=1.0) assert len(red_bl_grp) == 2 assert red_bl_tag == ['015_060', '015_120'] # test w/ a UVPSpec (red_bl_grp, red_bl_len, red_bl_ang, red_bl_tag) = utils.get_blvec_reds(uvp, bl_error_tol=1.0) assert len(red_bl_grp) == 2 assert red_bl_tag == ['015_060', '015_120'] # test w/ zero tolerance: each blpair is its own group (red_bl_grp, red_bl_len, red_bl_ang, red_bl_tag) = utils.get_blvec_reds(uvp, bl_error_tol=0.0) assert len(red_bl_grp) == uvp.Nblpairs # test combine angles uvp = testing.uvpspec_from_data(fname, reds[:3], spw_ranges=[(10, 40)]) (red_bl_grp, red_bl_len, red_bl_ang, red_bl_tag) = utils.get_blvec_reds(uvp, bl_error_tol=1.0, match_bl_lens=True) assert len(red_bl_grp) == 1
def test_get_covariance(self): dfile = os.path.join(DATA_PATH, 'zen.even.xx.LST.1.28828.uvOCRSA') uvd = UVData() uvd.read(dfile) cosmo = conversions.Cosmo_Conversions() beamfile = os.path.join(DATA_PATH, 'HERA_NF_dipole_power.beamfits') uvb = pspecbeam.PSpecBeamUV(beamfile, cosmo=cosmo) Jy_to_mK = uvb.Jy_to_mK(np.unique(uvd.freq_array), pol='XX') uvd.data_array *= Jy_to_mK[None, None, :, None] uvd1 = uvd.select(times=np.unique(uvd.time_array)[:(uvd.Ntimes // 2):1], inplace=False) uvd2 = uvd.select(times=np.unique( uvd.time_array)[(uvd.Ntimes // 2):(uvd.Ntimes // 2 + uvd.Ntimes // 2):1], inplace=False) ds = pspecdata.PSpecData(dsets=[uvd1, uvd2], wgts=[None, None], beam=uvb) ds.rephase_to_dset(0) spws = utils.spw_range_from_freqs(uvd, freq_range=[(160e6, 165e6), (160e6, 165e6)], bounds_error=True) antpos, ants = uvd.get_ENU_antpos(pick_data_ants=True) antpos = dict(zip(ants, antpos)) red_bls = redcal.get_pos_reds(antpos, bl_error_tol=1.0) bls1, bls2, blpairs = utils.construct_blpairs( red_bls[3], exclude_auto_bls=True, exclude_permutations=True) uvp = ds.pspec(bls1, bls2, (0, 1), [('xx', 'xx')], spw_ranges=spws, input_data_weight='identity', norm='I', taper='blackman-harris', store_cov=True, cov_model='autos', verbose=False) key = (0, blpairs[0], "xx") cov_real = uvp.get_cov(key, component='real') assert cov_real[0].shape == (50, 50) cov_imag = uvp.get_cov(key, component='imag') assert cov_imag[0].shape == (50, 50) uvp.fold_spectra() cov_real = uvp.get_cov(key, component='real') assert cov_real[0].shape == (24, 24) cov_imag = uvp.get_cov(key, component='imag') assert cov_imag[0].shape == (24, 24)
def test_uvpspec_from_data(): # get data fname = os.path.join(DATA_PATH, "zen.even.xx.LST.1.28828.uvOCRSA") fname_std = os.path.join(DATA_PATH, "zen.even.std.xx.LST.1.28828.uvOCRSA") uvd = UVData() uvd.read_miriad(fname) beamfile = os.path.join(DATA_PATH, 'HERA_NF_dipole_power.beamfits') beam = pspecbeam.PSpecBeamUV(beamfile) # test basic execution uvp = testing.uvpspec_from_data(fname, [(37, 38), (38, 39), (52, 53), (53, 54)], beam=beam, spw_ranges=[(50, 100)]) assert uvp.Nfreqs == 50 assert np.unique(uvp.blpair_array).tolist() == [ 137138138139, 137138152153, 137138153154, 138139152153, 138139153154, 152153153154 ] uvp2 = testing.uvpspec_from_data(uvd, [(37, 38), (38, 39), (52, 53), (53, 54)], beam=beamfile, spw_ranges=[(50, 100)]) uvp.history = '' uvp2.history = '' assert uvp == uvp2 # test multiple bl groups antpos, ants = uvd.get_ENU_antpos(pick_data_ants=True) reds = redcal.get_pos_reds(dict(zip(ants, antpos))) uvp = testing.uvpspec_from_data(fname, reds[:3], beam=beam, spw_ranges=[(50, 100)]) assert len( set(uvp.bl_array) - set([ 137138, 137151, 137152, 138139, 138152, 138153, 139153, 139154, 151152, 151167, 152153, 152167, 152168, 153154, 153168, 153169, 154169, 167168, 168169 ])) == 0 assert uvp.Nblpairs == 51 # test exceptions pytest.raises(AssertionError, testing.uvpspec_from_data, fname, (37, 38)) pytest.raises(AssertionError, testing.uvpspec_from_data, fname, [([37, 38], [38, 39])]) pytest.raises(AssertionError, testing.uvpspec_from_data, fname, [[[37, 38], [38, 39]]]) # test std uvp = testing.uvpspec_from_data(fname, [(37, 38), (38, 39), (52, 53), (53, 54)], data_std=fname_std, beam=beam, spw_ranges=[(20, 28)])
def test_reds_from_file_no_read_file(): from hera_cal.io import HERAData from hera_cal.redcal import get_pos_reds # uvh5 file will not need to be read in testfile = os.path.join(DATA_PATH, 'zen.2457698.40355.xx.HH.uvh5') reds = ant_metrics.reds_from_file(testfile, vis_format='uvh5') assert len(reds) > 1 hd = HERAData(testfile, filetype='uvh5') reds_check = get_pos_reds(hd.antpos) assert reds == reds_check
def test_get_pos_red(self): pos = build_hex_array(3, sep=14.7) self.assertEqual(len(om.get_pos_reds(pos)), 30) pos = build_hex_array(7, sep=14.7) self.assertEqual(len(om.get_pos_reds(pos)), 234) for ant, r in pos.items(): pos[ant] += [0, 0, 1 * r[0] - .5 * r[1]] self.assertEqual(len(om.get_pos_reds(pos)), 234) pos = build_hex_array(7, sep=1) self.assertLess(len(om.get_pos_reds(pos)), 234) self.assertEqual(len(om.get_pos_reds(pos, bl_error_tol=.1)), 234) pos = build_hex_array(7, sep=14.7) blerror = 1.0 - 1e-12 error = blerror / 4 for key, val in pos.items(): th = np.random.choice([0, np.pi / 2, np.pi]) phi = np.random.choice([0, np.pi / 2, np.pi, 3 * np.pi / 2]) pos[key] = val + error * np.array([ np.sin(th) * np.cos(phi), np.sin(th) * np.sin(phi), np.cos(th) ]) self.assertEqual(len(om.get_pos_reds(pos, bl_error_tol=1.0)), 234) self.assertGreater(len(om.get_pos_reds(pos, bl_error_tol=.99)), 234) pos = { 0: np.array([0, 0, 0]), 1: np.array([20, 0, 0]), 2: np.array([10, 0, 0]) } self.assertEqual(om.get_pos_reds(pos), [[(0, 2), (2, 1)], [(0, 1)]]) self.assertEqual(om.get_pos_reds(pos, low_hi=True), [[(0, 2), (1, 2)], [(0, 1)]])
def test_bootstrap_resampled_error(): # generate a UVPSpec visfile = os.path.join(DATA_PATH, "zen.even.xx.LST.1.28828.uvOCRSA") beamfile = os.path.join(DATA_PATH, "HERA_NF_dipole_power.beamfits") cosmo = conversions.Cosmo_Conversions() beam = pspecbeam.PSpecBeamUV(beamfile, cosmo=cosmo) uvd = UVData() uvd.read_miriad(visfile) ap, a = uvd.get_ENU_antpos(pick_data_ants=True) reds = redcal.get_pos_reds(dict(zip(a, ap)), bl_error_tol=1.0)[:3] uvp = testing.uvpspec_from_data(uvd, reds, spw_ranges=[(50, 100)], beam=beam, cosmo=cosmo) # Lots of this function is already tested by bootstrap_run # so only test the stuff not already tested if os.path.exists("uvp.h5"): os.remove("uvp.h5") uvp.write_hdf5("uvp.h5", overwrite=True) ua, ub, uw = grouping.bootstrap_resampled_error("uvp.h5", blpair_groups=None, Nsamples=10, seed=0, verbose=False) # check number of boots assert len(ub) == 10 # check seed has been used properly assert uw[0][0][:5] == [1.0, 1.0, 0.0, 2.0, 1.0] assert uw[0][1][:5] == [2.0, 1.0, 1.0, 6.0, 1.0] assert uw[1][0][:5] == [2.0, 2.0, 1.0, 1.0, 2.0] assert uw[1][1][:5] == [1.0, 0.0, 1.0, 1.0, 4.0] if os.path.exists("uvp.h5"): os.remove("uvp.h5")
def test_spherical_average(): # create two polarization data uvd = UVData() uvd.read(os.path.join(DATA_PATH, 'zen.even.xx.LST.1.28828.uvOCRSA')) # load other data, get reds and make UVPSpec beamfile = os.path.join(DATA_PATH, "HERA_NF_dipole_power.beamfits") cosmo = conversions.Cosmo_Conversions() beam = pspecbeam.PSpecBeamUV(beamfile, cosmo=cosmo) ap, a = uvd.get_ENU_antpos(pick_data_ants=True) reds = redcal.get_pos_reds(dict(zip(a, ap)), bl_error_tol=1.0) reds = [r[:2] for r in reds] uvp = testing.uvpspec_from_data(uvd, reds, spw_ranges=[(50, 75), (100, 125)], beam=beam, cosmo=cosmo) uvd.polarization_array[0] = -6 uvp += testing.uvpspec_from_data(uvd, reds, spw_ranges=[(50, 75), (100, 125)], beam=beam, cosmo=cosmo) # insert cov_array and stats_array uvp.cov_model = 'empirical' uvp.cov_array_real = { s: np.repeat( np.repeat( np.eye(uvp.Ndlys, dtype=np.float64)[None, :, :, None], uvp.Nblpairts, 0), uvp.Npols, -1) for s in range(uvp.Nspws) } uvp.cov_array_imag = { s: np.repeat( np.repeat( np.eye(uvp.Ndlys, dtype=np.float64)[None, :, :, None], uvp.Nblpairts, 0), uvp.Npols, -1) for s in range(uvp.Nspws) } uvp.stats_array = { 'err': { s: np.ones((uvp.Nblpairts, uvp.Ndlys, uvp.Npols), dtype=np.complex128) for s in range(uvp.Nspws) } } # try a spherical average kbins = np.arange(0, 2.9, 0.25) Nk = len(kbins) bin_widths = 0.25 A = {} sph = grouping.spherical_average(uvp, kbins, bin_widths, add_to_history='checking 1 2 3', A=A) # metadata assert sph.Nblpairs == 1 assert 'checking 1 2 3' in sph.history assert np.isclose(sph.get_blpair_seps(), 0).all() # assert kperp has no magnitude assert 'err' in sph.stats_array for spw in sph.spw_array: # binning and normalization assert np.isclose(sph.get_kparas(spw), kbins).all() # assert kbins are input kbins assert np.isclose(sph.window_function_array[spw].sum(axis=2), 1).all() # assert window func is normalized # check low k modes are greater than high k modes # this is a basic "averaged data smell test" in lieu of a known pspec to compare to assert np.all(sph.data_array[spw][:, 0, :].real / sph.data_array[spw][:, 10, :] > 1e3) # assert errorbars are 1/sqrt(N) what they used to be assert np.isclose( np.sqrt(sph.cov_array_real[spw])[:, range(Nk), range(Nk)], 1 / np.sqrt(A[spw].sum(axis=1))).all() assert np.isclose(sph.stats_array['err'][spw], 1 / np.sqrt(A[spw].sum(axis=1))).all() # bug check: time_avg_array was not down-selected to new Nblpairts assert sph.time_avg_array.size == sph.Nblpairts # bug check: cov_array_imag was not updated assert sph.cov_array_real[0].shape == sph.cov_array_imag[0].shape # try without little h sph2 = grouping.spherical_average(uvp, kbins * cosmo.h, bin_widths * cosmo.h, little_h=False) for spw in sph.spw_array: assert np.isclose(sph.get_kparas(spw), sph2.get_kparas(spw)).all() # try time average sph = grouping.spherical_average(uvp, kbins, bin_widths, time_avg=True) assert sph.Ntimes == 1 # try weighting by stats_array sph = grouping.spherical_average(uvp, kbins, bin_widths, error_weights='err') for spw in sph.spw_array: assert np.isclose(sph.window_function_array[spw].sum(axis=2), 1).all() # weight by covariance (should be same result as stats given predefined values!) sph2 = grouping.spherical_average(uvp, kbins, bin_widths, weight_by_cov=True) for spw in sph2.spw_array: assert np.isclose(sph2.window_function_array[spw].sum(axis=2), 1).all() assert np.isclose(sph.data_array[spw], sph2.data_array[spw]).all() # slice into stats array and set region of k_perp k_para to infinte variance uvp2 = copy.deepcopy(uvp) uvp2.set_stats_slice('err', 0, 1000, above=False, val=np.inf) sph2 = grouping.spherical_average(uvp2, kbins, bin_widths, error_weights='err') # assert low k modes are zeroed! assert np.isclose(sph2.data_array[0][:, :3, :], 0).all() # assert bins that weren't nulled still have proper window normalization for spw in sph2.spw_array: assert np.isclose( sph2.window_function_array[spw].sum(axis=2)[:, 3:, :], 1).all() # assert resultant stats are not nan assert (~np.isnan(sph2.stats_array['err'][0])).all() # try combine after spherical and assert it is equivalent sph_a, sph_b = sph.select(spws=[0], inplace=False), sph.select(spws=[1], inplace=False) sph_c = uvpspec.combine_uvpspec([sph_a, sph_b], merge_history=False, verbose=False) # bug check: in the past, combine after spherical average erroneously changed dly_array assert sph == sph_c # insert an inf into the arrays as a test uvp2 = copy.deepcopy(uvp) uvp2.cov_array_real[0][0], uvp2.cov_array_imag[0][0] = np.inf, np.inf uvp2.stats_array['err'][0][0] = np.inf sph = grouping.spherical_average(uvp, kbins, bin_widths) assert np.isfinite(sph.cov_array_real[0]).all() # exceptions pytest.raises(AssertionError, grouping.spherical_average, uvp, kbins, 1.0)
def test_bootstrap_run(): # generate a UVPSpec and container visfile = os.path.join(DATA_PATH, "zen.even.xx.LST.1.28828.uvOCRSA") beamfile = os.path.join(DATA_PATH, "HERA_NF_dipole_power.beamfits") cosmo = conversions.Cosmo_Conversions() beam = pspecbeam.PSpecBeamUV(beamfile, cosmo=cosmo) uvd = UVData() uvd.read_miriad(visfile) ap, a = uvd.get_ENU_antpos(pick_data_ants=True) reds = redcal.get_pos_reds(dict(zip(a, ap)), bl_error_tol=1.0)[:3] uvp = testing.uvpspec_from_data(uvd, reds, spw_ranges=[(50, 100)], beam=beam, cosmo=cosmo) if os.path.exists("ex.h5"): os.remove("ex.h5") psc = container.PSpecContainer("ex.h5", mode='rw', keep_open=False, swmr=False) psc.set_pspec("grp1", "uvp", uvp) # Test basic bootstrap run grouping.bootstrap_run(psc, time_avg=True, Nsamples=100, seed=0, normal_std=True, robust_std=True, cintervals=[16, 84], keep_samples=True, bl_error_tol=1.0, overwrite=True, add_to_history='hello!', verbose=False) spcs = psc.spectra("grp1") # assert all bs samples were written assert (np.all(["uvp_bs{}".format(i) in spcs for i in range(100)])) # assert average was written assert ("uvp_avg" in spcs and "uvp" in spcs) # assert average only has one time and 3 blpairs uvp_avg = psc.get_pspec("grp1", "uvp_avg") assert uvp_avg.Ntimes == 1 assert uvp_avg.Nblpairs == 3 # check avg file history assert ("hello!" in uvp_avg.history) # assert original uvp is unchanged assert uvp == psc.get_pspec("grp1", 'uvp') # check stats array np.testing.assert_array_equal([ u'bs_cinterval_16.00', u'bs_cinterval_84.00', u'bs_robust_std', u'bs_std' ], list(uvp_avg.stats_array.keys())) for stat in [ u'bs_cinterval_16.00', u'bs_cinterval_84.00', u'bs_robust_std', u'bs_std' ]: assert uvp_avg.get_stats(stat, (0, ((37, 38), (38, 39)), ('xx', 'xx'))).shape == (1, 50) assert (np.any( np.isnan( uvp_avg.get_stats(stat, (0, ((37, 38), (38, 39)), ('xx', 'xx')))))) == False assert uvp_avg.get_stats(stat, (0, ((37, 38), (38, 39)), ('xx', 'xx'))).dtype == np.complex128 # test exceptions del psc if os.path.exists("ex.h5"): os.remove("ex.h5") psc = container.PSpecContainer("ex.h5", mode='rw', keep_open=False, swmr=False) # test empty groups pytest.raises(AssertionError, grouping.bootstrap_run, "ex.h5") # test bad filename pytest.raises(AssertionError, grouping.bootstrap_run, 1) # test fed spectra doesn't exist psc.set_pspec("grp1", "uvp", uvp) pytest.raises(AssertionError, grouping.bootstrap_run, psc, spectra=['grp1/foo']) # test assertionerror if SWMR psc = container.PSpecContainer("ex.h5", mode='rw', keep_open=False, swmr=True) pytest.raises(AssertionError, grouping.bootstrap_run, psc, spectra=['grp1/foo']) if os.path.exists("ex.h5"): os.remove("ex.h5")
def red_average(data, reds=None, bl_tol=1.0, inplace=False, wgts=None, flags=None, nsamples=None): """ Redundantly average visibilities in a DataContainer, HERAData or UVData object. Average is weighted by integration_time * nsamples * ~flags unless wgts are fed. Args: data : DataContainer, HERAData or UVData object Object to redundantly average reds : list, optional Nested lists of antpair tuples to redundantly average. E.g. [ [(1, 2), (2, 3)], [(1, 3), (2, 4)], ...] If None, will calculate these from the metadata bl_tol : float Baseline redundancy tolerance in meters. Only used if reds is None. inplace : bool Perform average and downselect inplace, otherwise returns a deepcopy. The first baseline in each reds sublist is kept. wgts : DataContainer Manual weights to use in redundant average. This supercedes flags and nsamples If provided, and will also be used if input data is a UVData or a subclass of it. flags : DataContainer If data is a DataContainer, these are its flags. Default (None) is no flags. nsamples : DataContainer If data is a DataContainer, these are its nsamples. Default (None) is 1.0 for all pixels. Furthermore, if data is a DataContainer, integration_time is 1.0 for all pixels. Returns: if fed a DataContainer: DataContainer, averaged data DataContainer, averaged flags DataContainer, summed nsamples elif fed a HERAData or UVData: HERAData or UVData object, averaged data Notes: 1. Different polarizations are assumed to be non-redundant. 2. Default weighting is nsamples * integration_time * ~flags. 3. If wgts Container is fed then they supercede flag and nsample weighting. """ from hera_cal import redcal, datacontainer # type checks if not (isinstance(data, datacontainer.DataContainer) or isinstance(data, UVData)): raise ValueError("data must be a DataContainer or a UVData or its subclass") fed_container = isinstance(data, datacontainer.DataContainer) # fill DataContainers if necessary if fed_container: if not inplace: flags = copy.deepcopy(flags) nsamples = copy.deepcopy(nsamples) if flags is None: flags = datacontainer.DataContainer({k: np.zeros_like(data[k], np.bool) for k in data}) if nsamples is None: nsamples = datacontainer.DataContainer({k: np.ones_like(data[k], np.float) for k in data}) # get weights: if wgts are not fed, then use flags and nsamples if wgts is None: if fed_container: wgts = datacontainer.DataContainer({k: nsamples[k] * ~flags[k] for k in data}) else: wgts = datacontainer.DataContainer({k: data.get_nsamples(k) * ~data.get_flags(k) for k in data.get_antpairpols()}) # deepcopy if not inplace: data = copy.deepcopy(data) # get metadata if fed_container: pols = sorted(data.pols()) else: pols = [polnum2str(pol, x_orientation=data.x_orientation) for pol in data.polarization_array] # get redundant groups if reds is None: # if DataContainer, check for antpos if fed_container: if not hasattr(data, 'antpos') or data.antpos is None: raise ValueError("DataContainer must have antpos dictionary to calculate reds") antposd = data.antpos else: antpos, ants = data.get_ENU_antpos() antposd = dict(zip(ants, antpos)) reds = redcal.get_pos_reds(antposd, bl_error_tol=bl_tol) # eliminate baselines not in data if fed_container: antpairs = sorted(data.antpairs()) else: antpairs = data.get_antpairs() reds = [[bl for bl in blg if bl in antpairs] for blg in reds] reds = [blg for blg in reds if len(blg) > 0] # iterate over redundant groups and polarizations for pol in pols: for blg in reds: # get data and weighting for this pol-blgroup if fed_container: d = np.asarray([data[bl + (pol,)] for bl in blg]) f = np.asarray([(~flags[bl + (pol,)]).astype(np.float) for bl in blg]) n = np.asarray([nsamples[bl + (pol,)] for bl in blg]) # DataContainer can't track integration time, so no tint here tint = np.array([1.0]) w = np.asarray([wgts[bl + (pol,)] for bl in blg]) else: d = np.asarray([data.get_data(bl + (pol,)) for bl in blg]) f = np.asarray([(~data.get_flags(bl + (pol,))).astype(np.float) for bl in blg]) n = np.asarray([data.get_nsamples(bl + (pol,)) for bl in blg]) tint = np.asarray([data.integration_time[data.antpair2ind(bl + (pol,))] for bl in blg])[:, :, None] w = np.asarray([wgts[bl + (pol,)] for bl in blg]) * tint # take the weighted average wsum = np.sum(w, axis=0).clip(1e-10, np.inf) # this is the normalization davg = np.sum(d * w, axis=0) / wsum # weighted average navg = np.sum(n * f, axis=0) # this is the new total nsample (without flagged elements) fmax = np.max(f, axis=2) # collapse along freq: marks any fully flagged integrations iavg = np.sum(tint.squeeze() * fmax, axis=0) / np.sum(fmax, axis=0).clip(1e-10, np.inf) favg = np.isclose(wsum, 0.0) # this is getting any fully flagged pixels # replace with new data if fed_container: blkey = blg[0] + (pol,) data[blkey] = davg flags[blkey] = favg nsamples[blkey] = navg else: blinds = data.antpair2ind(blg[0]) polind = pols.index(pol) data.data_array[blinds, 0, :, polind] = davg data.flag_array[blinds, 0, :, polind] = favg data.nsample_array[blinds, 0, :, polind] = navg data.integration_time[blinds] = iavg # select out averaged bls bls = [blg[0] + (pol,) for pol in pols for blg in reds] if fed_container: for bl in list(data.keys()): if bl not in bls: del data[bl] else: data.select(bls=bls) if not inplace: if fed_container: return data, flags, nsamples else: return data
""" x1, y1, z1 = antpos[bl[0]] x2, y2, z2 = antpos[bl[1]] dist = np.sqrt((x2 - x1)**2 + (y2 - y1)**2 + (z2 - z1)**2) return dist ## Generate visibilities for all redundant baselines in array. ants = np.loadtxt('antenna_positions_37.dat') idxs = np.arange(37) antpos = {} for k, v in zip(idxs, ants): antpos[k] = v reds = redcal.get_pos_reds(antpos) # Extract all ants ants = list(set([ant for bls in reds for bl in bls for ant in bl])) # Generate gains gains = sigchain.gen_gains(fqs, ants, dly_rng=(-1, 1)) true_vis, data = {}, {} # Generate sky model--common for all ants Tsky_mdl = noise.HERA_Tsky_mdl['xx'] tsky = noise.resample_Tsky(fqs, lsts, Tsky_mdl=noise.HERA_Tsky_mdl['xx']) fp = h5py.File('fake_vis.hdf5', 'a') fp.attrs.create('Nants', 37)
description= "Redundantly average a data file that's already been calibrated.") ap.add_argument("infilename", type=str, help="path to visibility data file to redundantly average") ap.add_argument("outfilename", type=str, help="path to new visibility results file") ap.add_argument("--bl_error_tol", type=float, default=1.0, help="Baseline redundancy tolerance in meters.") ap.add_argument("--clobber", default=False, action="store_true", help='overwrites existing file at outfile') args = ap.parse_args() # Load data hd = io.HERAData(args.infilename) hd.read() # Redundantly average reds = redcal.get_pos_reds(hd.data_antpos, bl_error_tol=args.bl_error_tol, include_autos=True) utils.red_average(hd, reds=reds, inplace=True) # Write data hd.write_uvh5(args.outfilename, clobber=args.clobber)
for i in range(len(dsets)): freqs = dsets[i].freq_array.flatten() dsets[i].data_array *= beam.Jy_to_mK(freqs)[None, None, :, None] dsets[i].vis_units = 'mK' #------------------------------------------------------------------------------- # Calculate power spectrum and package output into PSpecContainer #------------------------------------------------------------------------------- # Package data files into PSpecData object ds = hp.PSpecData(dsets=dsets, wgts=wgts, beam=beam) # Set-up which baselines to cross-correlate antpos, ants = dsets[0].get_ENU_antpos(pick_data_ants=True) antpos = dict(zip(ants, antpos)) red_bls = redcal.get_pos_reds(antpos, bl_error_tol=1.0) # FIXME: Use only the first redundant baseline group for now bls = red_bls[0] print("Baselines: %s" % bls) # Replace default pspec settings if specified in config file for key in pspec_defaults.keys(): if key in pspec_cfg.keys(): pspec_defaults[key] = pspec_cfg[key] # Open or create PSpecContainer to store output power spectra ps_store = hp.PSpecContainer(pspec_cfg['output'], mode='rw') # Loop over pairs of datasets dset_idxs = range(len(ds.dsets)) for i in dset_idxs: