def test_diffuse_foreground_orientation(self): fqs = np.linspace(.1, .2, 100, endpoint=False) omega_p = noise.bm_poly_to_omega_p(fqs) lsts = np.linspace(0, 2 * np.pi, 1000) Tsky_mdl = noise.HERA_Tsky_mdl['xx'] bl_vec = (0, 30.0) vis = foregrounds.diffuse_foreground(lsts, fqs, bl_vec, Tsky_mdl=Tsky_mdl, fringe_filter_type='tophat', omega_p=omega_p) self.assertEqual(vis.shape, (lsts.size, fqs.size)) # assert foregrounds show up at positive fringe-rates for FFT bl_vec = (100.0, 0.0) vis = foregrounds.diffuse_foreground(lsts, fqs, bl_vec, Tsky_mdl=Tsky_mdl, fringe_filter_type='gauss', fr_width=1e-5, omega_p=omega_p) dfft = np.fft.fftshift(np.fft.fft( vis * dspec.gen_window('blackmanharris', len(vis))[:, None], axis=0), axes=0) frates = np.fft.fftshift( np.fft.fftfreq(len(lsts), np.diff(lsts)[0] * 12 * 3600 / np.pi)) max_frate = frates[np.argmax(np.abs(dfft[:, 0]))] nt.assert_true(max_frate > 0) bl_vec = (-100.0, 0.0) vis = foregrounds.diffuse_foreground(lsts, fqs, bl_vec, Tsky_mdl=Tsky_mdl, fringe_filter_type='gauss', fr_width=1e-5, omega_p=omega_p) dfft = np.fft.fftshift(np.fft.fft( vis * dspec.gen_window('blackmanharris', len(vis))[:, None], axis=0), axes=0) max_frate = frates[np.argmax(np.abs(dfft[:, 0]))] nt.assert_true(max_frate < 0)
def filter_data(self, data, frps, flags=None, nsamples=None, output_prefix='filt', keys=None, overwrite=False, edgecut_low=0, edgecut_hi=0, axis=0, verbose=True): """ Apply an FIR filter to data. Args : data : DataContainer data to time average, must be consistent with self.lsts and self.freqs frps : DataContainer DataContainer holding 2D fringe-rate profiles for each key in data, with values the same shape as data. flags : DataContainer flags to use in averaging. Default is None. Must be consistent with self.lsts, self.freqs, etc. nsamples : DataContainer nsamples to use in averaging. Default is None. Must be consistent with self.lsts, self.freqs, etc. keys : list of len-3 antpair-pol tuples List of data keys to operate on. overwrite : bool If True, overwrite existing keys in output DataContainers. edgecut_low : int, number of bins to flag on low side of axis edgecut_hi : int, number of bins to flag on high side of axis """ if not HAVE_UVTOOLS: raise ImportError( "FRFilter.filter_data requires uvtools to be installed. Install hera_cal[all]" ) # setup containers for n in ['data', 'flags', 'nsamples']: name = "{}_{}".format(output_prefix, n) if not hasattr(self, name): setattr(self, name, DataContainer({})) if n == 'data': filt_data = getattr(self, name) elif n == 'flags': filt_flags = getattr(self, name) elif n == 'nsamples': filt_nsamples = getattr(self, name) # setup averaging quantities if flags is None: flags = DataContainer( dict([(k, np.zeros_like(data[k], np.bool)) for k in data])) if nsamples is None: nsamples = DataContainer( dict([(k, np.ones_like(data[k], np.float)) for k in data])) if keys is None: keys = data.keys() # iterate over keys for i, k in enumerate(keys): if k in filt_data and not overwrite: utils.echo( "{} exists in ouput DataContainer and overwrite == False, skipping..." .format(k), verbose=verbose) continue # get wgts w = (~flags[k]).astype(np.float) shape = [1, 1] shape[axis] = -1 w *= dspec.gen_window('none', w.shape[axis], edgecut_low=edgecut_low, edgecut_hi=edgecut_hi).reshape(tuple(shape)) f = np.isclose(w, 0.0) # calculate effective nsamples eff_nsamples = np.zeros_like(nsamples[k]) eff_nsamples += np.sum( nsamples[k] * w, axis=axis, keepdims=True) / np.sum( w, axis=axis, keepdims=True).clip(1e-10, np.inf) eff_nsamples *= fr_tavg( frps[k], axis=axis) * np.sum(w, axis=axis, keepdims=True).clip( 1e-10, np.inf) / w.shape[axis] # setup FIR fir, _ = frp_to_fir(frps[k], axis=axis, undo=False) # apply fir dfilt = apply_fir(data[k], fir, wgts=w, axis=axis) # append filt_data[k] = dfilt filt_flags[k] = f filt_nsamples[k] = eff_nsamples
def test_neb(self): n = vis_clean.noise_eq_bandwidth(dspec.gen_window('blackmanharris', 10000)) assert np.isclose(n, 1.9689862471203075)
def test_trim_model(self): # load data V = VisClean(os.path.join(DATA_PATH, "PyGSM_Jy_downselect.uvh5")) V.read(bls=[(23, 23, 'ee'), (23, 24, 'ee')]) # interpolate to 768 frequencies freqs = np.linspace(120e6, 180e6, 768) for k in V.data: V.data[k] = interpolate.interp1d(V.freqs, V.data[k], axis=1, fill_value='extrapolate', kind='cubic')(freqs) V.flags[k] = np.zeros_like(V.data[k], dtype=np.bool) V.freqs = freqs V.Nfreqs = len(V.freqs) # add noise np.random.seed(0) k = (23, 24, 'ee') Op = noise.bm_poly_to_omega_p(V.freqs / 1e9) V.data[k] += noise.sky_noise_jy(V.data[(23, 23, 'ee')], V.freqs / 1e9, V.lsts, Op, inttime=50) # add lots of random flags f = np.zeros(V.Nfreqs, dtype=np.bool)[None, :] f[:, 127:156] = True f[:, 300:303] = True f[:, 450:455] = True f[:, 625:630] = True V.flags[k] += f # vis clean V.vis_clean(data=V.data, flags=V.flags, keys=[k], tol=1e-6, min_dly=300, ax='freq', overwrite=True, window='tukey', alpha=0.2) V.fft_data(V.data, window='bh', overwrite=True, assign='dfft1') V.fft_data(V.clean_data, window='bh', overwrite=True, assign='dfft2') # trim model mdl, n = vis_clean.trim_model(V.clean_model, V.clean_resid, V.dnu, noise_thresh=3.0, delay_cut=500, kernel_size=21, polyfit_deg=None) clean_data2 = deepcopy(V.clean_data) clean_data2[k][V.flags[k]] = mdl[k][V.flags[k]] V.fft_data(clean_data2, window='bh', overwrite=True, assign='dfft3') # get averaged spectra n1 = vis_clean.noise_eq_bandwidth(dspec.gen_window('bh', V.Nfreqs)) n2 = vis_clean.noise_eq_bandwidth(dspec.gen_window('bh', V.Nfreqs) * ~V.flags[k][0]) d1 = np.mean(np.abs(V.dfft1[k]), axis=0) * n1 d2 = np.mean(np.abs(V.dfft2[k]), axis=0) * n2 d3 = np.mean(np.abs(V.dfft3[k]), axis=0) * n2 # confirm that dfft3 and dfft1 match while dfft2 and dfft1 do not near CLEAN boundary select = (np.abs(V.delays) < 300) & (np.abs(V.delays) > 100) assert np.isclose(np.mean(np.abs(d1)[select]), np.mean(np.abs(d3)[select]), atol=10) assert not np.isclose(np.mean(np.abs(d1)[select]), np.mean(np.abs(d2)[select]), atol=10) # test that polynomial fitting is a good fit _, n1 = vis_clean.trim_model(V.clean_model, V.clean_resid, V.dnu, noise_thresh=3.0, delay_cut=500, kernel_size=None, polyfit_deg=None) _, n2 = vis_clean.trim_model(V.clean_model, V.clean_resid, V.dnu, noise_thresh=3.0, delay_cut=500, kernel_size=None, polyfit_deg=5) assert (np.std(n1[k] - n2[k]) / np.mean(n2[k])) < 0.1 # assert residual is below 10% of fit # test well-conditioned check takes effect V2 = deepcopy(V) V2.clean_resid[k][:-2] = 0.0 # zero all the data except last two integrations _, n2 = vis_clean.trim_model(V2.clean_model, V2.clean_resid, V2.dnu, noise_thresh=3.0, delay_cut=500, kernel_size=None, polyfit_deg=5) assert np.all(np.isclose(n2[k][-1], n1[k][-1])) # assert non-zeroed output are same as n1 (no polyfit)
def fft_data(data, delta_bin, wgts=None, axis=-1, window='none', alpha=0.2, edgecut_low=0, edgecut_hi=0, ifft=False, ifftshift=False, fftshift=True, zeropad=0): """ FFT data along specified axis. Note the fourier convention of ifft and fftshift. Args: data : complex ndarray delta_bin : bin size (seconds or Hz). If axis is a tuple can feed as tuple with bin size for time and freq axis respectively. wgts : float ndarray of shape (Ntimes, Nfreqs) axis : int, FFT axis. Can feed as tuple for 2D fft. window : str Windowing function to apply across frequency before FFT. If axis is tuple, can feed as a tuple specifying window for each FFT axis. alpha : float If window is 'tukey' this is its alpha parameter. If axis is tuple, can feed as a tuple specifying alpha for each FFT axis. edgecut_low : int, number of bins to consider zero-padded at low-side of the FFT axis, such that the windowing function smoothly approaches zero. If axis is tuple, can feed as a tuple specifying for each FFT axis. edgecut_hi : int, number of bins to consider zero-padded at high-side of the FFT axis, such that the windowing function smoothly approaches zero. If axis is tuple, can feed as a tuple specifying for each FFT axis. ifft : bool, if True, use ifft instead of fft ifftshift : bool, if True, ifftshift data along FT axis before FFT. fftshift : bool, if True, fftshift along FT axes after FFT. zeropad : int, number of zero-valued channels to append to each side of FFT axis. If axis is tuple, can feed as a tuple specifying for each FFT axis. Returns: dfft : complex ndarray FFT of data fourier_axes : fourier axes, if axis is ndimensional, so is this. """ if not HAVE_UVTOOLS: raise ImportError("uvtools required, install hera_cal[all]") # type checks if not isinstance(axis, (tuple, list)): axis = [axis] if not isinstance(window, (tuple, list)): window = [window for i in range(len(axis))] if not isinstance(alpha, (tuple, list)): alpha = [alpha for i in range(len(axis))] if not isinstance(edgecut_low, (tuple, list)): edgecut_low = [edgecut_low for i in range(len(axis))] if not isinstance(edgecut_hi, (tuple, list)): edgecut_hi = [edgecut_hi for i in range(len(axis))] if not isinstance(zeropad, (tuple, list)): zeropad = [zeropad for i in range(len(axis))] if not isinstance(delta_bin, (tuple, list)): if len(axis) > 1: raise ValueError("delta_bin must have same len as axis") delta_bin = [delta_bin] else: if len(delta_bin) != len(axis): raise ValueError("delta_bin must have same len as axis") Nax = len(axis) # get a copy data = data.copy() # set fft convention fourier_axes = [] if ifft: fft = np.fft.ifft else: fft = np.fft.fft # get wgts if wgts is None: wgts = np.ones_like(data, dtype=np.float) data *= wgts # iterate over axis for i, ax in enumerate(axis): Nbins = data.shape[ax] # generate and apply window win = dspec.gen_window(window[i], Nbins, alpha=alpha[i], edgecut_low=edgecut_low[i], edgecut_hi=edgecut_hi[i]) wshape = np.ones(data.ndim, dtype=np.int) wshape[ax] = Nbins win.shape = tuple(wshape) data *= win # zeropad data, _ = zeropad_array(data, zeropad=zeropad[i], axis=ax) # ifftshift if ifftshift: data = np.fft.ifftshift(data, axes=ax) # FFT data = fft(data, axis=ax) # get fourier axis fax = np.fft.fftfreq(data.shape[ax], delta_bin[i]) # fftshift if fftshift: data = np.fft.fftshift(data, axes=ax) fax = np.fft.fftshift(fax) fourier_axes.append(fax) if len(axis) == 1: fourier_axes = fourier_axes[0] return data, fourier_axes
def _compute_pspec_scalar(cosmo, beam_freqs, omega_ratio, pspec_freqs, num_steps=5000, taper='none', little_h=True, noise_scalar=False, exact_norm=False): """ This is not to be used by the novice user to calculate a pspec scalar. Instead, look at the PSpecBeamUV and PSpecBeamGauss classes. Computes the scalar function to convert a power spectrum estimate in "telescope units" to cosmological units See arxiv:1304.4991 and HERA memo #27 for details. Parameters ---------- cosmo : conversions.Cosmo_Conversions instance Instance of the cosmological conversion object. beam_freqs : array of floats Frequency of beam integrals in omega_ratio in units of Hz. omega_ratio : array of floats Ratio of the integrated squared-beam power over the square of the integrated beam power for each frequency in beam_freqs. i.e. Omega_pp(nu) / Omega_p(nu)^2 pspec_freqs : array of floats Array of frequencies over which power spectrum is estimated in Hz. num_steps : int, optional Number of steps to use when interpolating primary beams for numerical integral. Default: 5000. taper : str, optional Whether a tapering function (e.g. Blackman-Harris) is being used in the power spectrum estimation. Default: 'none'. little_h : boolean, optional Whether to have cosmological length units be h^-1 Mpc or Mpc. Value of h is obtained from cosmo object stored in pspecbeam. Default: h^-1 Mpc. noise_scalar : boolean, optional Whether to calculate power spectrum scalar, or noise power scalar. The noise power scalar only differs in that the Bpp_over_BpSq term turns into 1_over_Bp. See Pober et al. 2014, ApJ 782, 66, and Parsons HERA Memo #27. Default: False. exact_norm : boolean, optional returns only X2Y for scalar if True, else uses the existing framework involving antenna beam and spectral tapering factors. Default: False. Returns ------- scalar: float [\int dnu (\Omega_PP / \Omega_P^2) ( B_PP / B_P^2 ) / (X^2 Y)]^-1 Units: h^-3 Mpc^3 or Mpc^3. """ # Get integration freqs df = np.median(np.diff(pspec_freqs)) integration_freqs = np.linspace(pspec_freqs.min(), pspec_freqs.min() + df * len(pspec_freqs), num_steps, endpoint=True, dtype=np.float) # The interpolations are generally more stable in MHz integration_freqs_MHz = integration_freqs / 1e6 # Get redshifts and cosmological functions redshifts = cosmo.f2z(integration_freqs).flatten() X2Y = np.array([cosmo.X2Y(z, little_h=little_h) for z in redshifts]) if exact_norm: #Beam and spectral tapering are already taken into account in normalization. We only use averaged X2Y scalar = integrate.trapz(X2Y, x=integration_freqs) / ( np.abs(integration_freqs[-1] - integration_freqs[0])) return scalar # Use linear interpolation to interpolate the frequency-dependent # quantities derived from the beam model to the same frequency grid as the # power spectrum estimation beam_model_freqs_MHz = beam_freqs / 1e6 dOpp_over_Op2_fit = interp1d(beam_model_freqs_MHz, omega_ratio, kind='quadratic', fill_value='extrapolate') dOpp_over_Op2 = dOpp_over_Op2_fit(integration_freqs_MHz) # Get B_pp = \int dnu taper^2 and Bp = \int dnu if taper == 'none': dBpp_over_BpSq = np.ones_like(integration_freqs, np.float) else: dBpp_over_BpSq = dspec.gen_window(taper, len(pspec_freqs))**2. dBpp_over_BpSq = interp1d(pspec_freqs, dBpp_over_BpSq, kind='nearest', fill_value='extrapolate')(integration_freqs) dBpp_over_BpSq /= (integration_freqs[-1] - integration_freqs[0])**2. # Keep dBpp_over_BpSq term or not if noise_scalar: dBpp_over_BpSq = 1. / (integration_freqs[-1] - integration_freqs[0]) # Integrate to get scalar d_inv_scalar = dBpp_over_BpSq * dOpp_over_Op2 / X2Y scalar = 1. / integrate.trapz(d_inv_scalar, x=integration_freqs) return scalar