def impl(time, interval, antenna1, antenna2, time_centroid=None, exposure=None, flag_row=None, uvw=None, weight=None, sigma=None, chan_freq=None, chan_width=None, vis=None, flag=None, weight_spectrum=None, sigma_spectrum=None, time_bin_secs=1.0, chan_bin_size=1): # Get the number of channels + correlations nchan, ncorr = chan_corrs(vis, flag, weight_spectrum, sigma_spectrum) # Merge flag_row and flag arrays flag_row = merge_flags(flag_row, flag) # Generate row mapping metadata row_meta = row_mapper(time, interval, antenna1, antenna2, flag_row=flag_row, time_bin_secs=time_bin_secs) # Generate channel mapping metadata chan_meta = channel_mapper(nchan, chan_bin_size) # Average row data row_data = row_average(row_meta, antenna1, antenna2, flag_row=flag_row, time_centroid=time_centroid, exposure=exposure, uvw=uvw, weight=weight, sigma=sigma) # Average channel data chan_data = chan_average(chan_meta, chan_freq=chan_freq, chan_width=chan_width) # Average row and channel data row_chan_data = row_chan_average(row_meta, chan_meta, flag_row=flag_row, weight=weight, vis=vis, flag=flag, weight_spectrum=weight_spectrum, sigma_spectrum=sigma_spectrum) # Have to explicitly write it out because numba tuples # are highly constrained types return AverageOutput(row_meta.time, row_meta.interval, row_meta.flag_row, row_data.antenna1, row_data.antenna2, row_data.time_centroid, row_data.exposure, row_data.uvw, row_data.weight, row_data.sigma, chan_data.chan_freq, chan_data.chan_width, row_chan_data.vis, row_chan_data.flag, row_chan_data.weight_spectrum, row_chan_data.sigma_spectrum)
def test_channel_mapper(): chan_map, out_chans = channel_mapper(64, 17) uchan, counts = np.unique(chan_map, return_counts=True) assert_array_equal(chan_map[0:17], 0) assert_array_equal(chan_map[17:34], 1) assert_array_equal(chan_map[34:51], 2) assert_array_equal(chan_map[51:64], 3) assert_array_equal(uchan, [0, 1, 2, 3]) assert_array_equal(counts, [17, 17, 17, 13]) assert out_chans == 4
def test_averager(time, ant1, ant2, flagged_rows, uvw, interval, weight, sigma, frequency, chan_width, vis, flag, weight_spectrum, sigma_spectrum, time_bin_secs, chan_bin_size): time_centroid = time exposure = interval vis = vis(time.shape[0], nchan, ncorr) flag = flag(time.shape[0], nchan, ncorr) flag_row = np.zeros(time.shape, dtype=np.uint8) # flagged_row and flag should agree flag_row[flagged_rows] = 1 flag[flag_row.astype(np.bool), :, :] = 1 flag[~flag_row.astype(np.bool), :, :] = 0 assert_array_equal(flag.all(axis=(1, 2)).astype(np.uint8), flag_row) row_meta = row_mapper(time, interval, ant1, ant2, flag_row, time_bin_secs) chan_map, chan_bins = channel_mapper(nchan, chan_bin_size) time_bl_row_map = _gen_testing_lookup(time_centroid, exposure, ant1, ant2, flag_row, time_bin_secs, row_meta) # Effective and Nominal rows associated with each output row eff_idx, nom_idx = zip(*[(nrows, erows) for _, _, nrows, erows in time_bl_row_map]) eff_idx = [ei for ei in eff_idx if len(ei) > 0] # Check that the averaged times from the test and accelerated lookup match assert_array_equal([t for t, _, _, _ in time_bl_row_map], row_meta.time) avg = time_and_channel(time, interval, ant1, ant2, flag_row=flag_row, time_centroid=time, exposure=exposure, uvw=uvw, weight=weight, sigma=sigma, chan_freq=frequency, chan_width=chan_width, visibilities=vis, flag=flag, weight_spectrum=weight_spectrum, sigma_spectrum=sigma_spectrum, time_bin_secs=time_bin_secs, chan_bin_size=chan_bin_size) # Take mean time, but first ant1 and ant2 expected_time_centroids = [time_centroid[i].mean(axis=0) for i in eff_idx] expected_times = [time[i].mean(axis=0) for i in nom_idx] expected_ant1 = [ant1[i[0]] for i in nom_idx] expected_ant2 = [ant2[i[0]] for i in nom_idx] expected_flag_row = [flag_row[i].any(axis=0) for i in eff_idx] # Take mean average, but sum of interval and exposure expected_uvw = [uvw[i].mean(axis=0) for i in eff_idx] expected_interval = [interval[i].sum(axis=0) for i in nom_idx] expected_exposure = [exposure[i].sum(axis=0) for i in eff_idx] expected_weight = [weight[i].sum(axis=0) for i in eff_idx] expected_sigma = [_calc_sigma(sigma, weight, i) for i in eff_idx] assert_array_equal(row_meta.time, expected_times) assert_array_equal(row_meta.interval, expected_interval) assert_array_equal(row_meta.flag_row, expected_flag_row) assert_array_equal(avg.antenna1, expected_ant1) assert_array_equal(avg.antenna2, expected_ant2) assert_array_equal(avg.time_centroid, expected_time_centroids) assert_array_equal(avg.exposure, expected_exposure) assert_array_equal(avg.uvw, expected_uvw) assert_array_equal(avg.weight, expected_weight) assert_array_equal(avg.sigma, expected_sigma) chan_avg_shape = (row_meta.interval.shape[0], chan_bins, flag.shape[2]) assert avg.visibilities.shape == chan_avg_shape assert avg.flag.shape == chan_avg_shape assert avg.weight_spectrum.shape == chan_avg_shape assert avg.sigma_spectrum.shape == chan_avg_shape chan_ranges = np.nonzero(np.ediff1d(chan_map, to_begin=1, to_end=1))[0] # Three python loops. Slow, but works... # Figure out some way to remove loops with numpy for orow, idx in enumerate(eff_idx): for ch, (cs, ce) in enumerate(zip(chan_ranges[:-1], chan_ranges[1:])): for corr in range(ncorr): in_flags = flag[idx, cs:ce, corr] != 0 out_flag = in_flags.all() assert_array_equal(out_flag, avg.flag[orow, ch, corr]) flags_match = in_flags == out_flag exp_vis = vis[idx, cs:ce, corr] exp_wts = weight_spectrum[idx, cs:ce, corr] exp_sigma = sigma_spectrum[idx, cs:ce, corr] # Use matching to flags to decide which # samples contribute to the bin chunk_exp_vis = exp_vis[flags_match] chunk_exp_wts = exp_wts[flags_match] chunk_exp_sigma = exp_sigma[flags_match] exp_vis = (chunk_exp_vis * chunk_exp_wts).sum() exp_sigma = (chunk_exp_sigma**2 * chunk_exp_wts**2).sum() exp_wts = chunk_exp_wts.sum() if exp_wts != 0.0: exp_vis = exp_vis / exp_wts exp_sigma = np.sqrt(exp_sigma / (exp_wts**2)) assert_array_almost_equal(exp_vis, avg.visibilities[orow, ch, corr]) assert_array_almost_equal(exp_wts, avg.weight_spectrum[orow, ch, corr]) assert_array_almost_equal(exp_sigma, avg.sigma_spectrum[orow, ch, corr])