def jacobian(params): a = params[0] i = params[1] t = params[2] # Get the frequencies, including the negitive ones. df = 1.0 / dt / n_time f = sp.arange(n_time, dtype=float) f[n_time // 2 + 1:] -= f[-1] + 1 f = abs(f) * df # 0th mode is meaningless. Set to unity to avoid errors. f[0] = 1 # The power law part. p_law = (f / f_0)**(-(params[1]**2 + min_index)) # Memory for the three derivative functions. spec = sp.empty((3, n_time), dtype=float) # The formula for the following derivatives derived in Kiyo's notes, # Nov. 28, 2011. # Derivative with respect to the amplitude parameter. spec[0, :] = -2.0 * a * p_law # Derivative with respect to the index parameter. spec[1, :] = (a**2 + T_small**2) * p_law * sp.log(f / f_0) * 2.0 * i # Derivative with respect to the thermal parameter. spec[2, :] = -2.0 * t # Get rid of the mean mode. spec[:, 0] = 0 # Convolve with the window function. spec = npow.convolve_power(spec, window[None, :], -1) # Prune to the same size as the data. spec = npow.prune_power(spec, -1) spec = spec[:, 1:].real return spec / weights
def jacobian(params): a = params[0] i = params[1] t = params[2] # Get the frequencies, including the negitive ones. df = 1.0 / dt / n_time f = sp.arange(n_time, dtype=float) f[n_time // 2 + 1 :] -= f[-1] + 1 f = abs(f) * df # 0th mode is meaningless. Set to unity to avoid errors. f[0] = 1 # The power law part. p_law = (f / f_0) ** (-(params[1] ** 2 + min_index)) # Memory for the three derivative functions. spec = sp.empty((3, n_time), dtype=float) # The formula for the following derivatives derived in Kiyo's notes, # Nov. 28, 2011. # Derivative with respect to the amplitude parameter. spec[0, :] = -2.0 * a * p_law # Derivative with respect to the index parameter. spec[1, :] = (a ** 2 + T_small ** 2) * p_law * sp.log(f / f_0) * 2.0 * i # Derivative with respect to the thermal parameter. spec[2, :] = -2.0 * t # Get rid of the mean mode. spec[:, 0] = 0 # Convolve with the window function. spec = npow.convolve_power(spec, window[None, :], -1) # Prune to the same size as the data. spec = npow.prune_power(spec, -1) spec = spec[:, 1:].real return spec / weights
def test_fit_over_f_plus_const(self): dt = 0.13 n_time = 10000 amp = 0.67 # K**2/Hz index = -1.3 f_0 = 1.0 thermal = 2.7 # K**2/Hz BW = 1./dt/2 window = sig.get_window('hanning', n_time) n_spec = 10 p = 0 for ii in range(n_spec): time_stream = noise_power.generate_overf_noise(amp, index, f_0, dt, n_time) time_stream += rand.normal(size=n_time) * sp.sqrt(thermal * BW * 2) time_stream -= sp.mean(time_stream) time_stream *= window p += noise_power.calculate_power(time_stream) p /= n_spec p = noise_power.make_power_physical_units(p, dt) w = noise_power.calculate_power(window) w_norm = sp.mean(w).real #w /= w_norm p = noise_power.prune_power(p).real #p /= w_norm f = noise_power.ps_freq_axis(dt, n_time) p = p[1:] f = f[1:] amp_m, index_m, f0_m, thermal_m = mn.fit_overf_const(p, w, f) self.assertTrue(sp.allclose(amp_m, amp, atol=0.2)) self.assertTrue(sp.allclose(index_m, index, atol=0.1)) self.assertTrue(sp.allclose(thermal_m, thermal, atol=0.1))
def test_statistical_different_windows(self): n_trials = 1000 n_points = 200 window1 = sp.ones(n_points, dtype=float) window1 += sp.sin(sp.arange(n_points) / 10.0) window2 = 2 * sp.ones(n_points, dtype=float) window2 += 2 * sp.sin(sp.arange(n_points) / 22.0) window2[window2 < 0.5] = 0 window1[window1 < 0.5] = 0 power = sp.zeros(n_points // 2) for ii in range(n_trials): wave = self.amp1 * random.randn(n_points) p = npow.windowed_power(wave * window1, window1, wave * window2, window2) power += npow.prune_power(p).real power /= n_trials self.assertTrue( sp.allclose(power / self.amp1**2, 1.0, atol=6.0 * (2.0 / sp.sqrt(n_trials)))) # Expect this to fail ~1/100 times. self.assertFalse( sp.allclose(power / self.amp1**2, 1.0, atol=0.01 * (2.0 / sp.sqrt(n_trials))))
def test_fit_over_f_plus_const(self): dt = 0.13 n_time = 10000 amp = 0.67 # K**2/Hz index = -1.3 f_0 = 1.0 thermal = 2.7 # K**2/Hz BW = 1. / dt / 2 window = sig.get_window('hanning', n_time) n_spec = 10 p = 0 for ii in range(n_spec): time_stream = noise_power.generate_overf_noise( amp, index, f_0, dt, n_time) time_stream += rand.normal(size=n_time) * sp.sqrt(thermal * BW * 2) time_stream -= sp.mean(time_stream) time_stream *= window p += noise_power.calculate_power(time_stream) p /= n_spec p = noise_power.make_power_physical_units(p, dt) w = noise_power.calculate_power(window) w_norm = sp.mean(w).real #w /= w_norm p = noise_power.prune_power(p).real #p /= w_norm f = noise_power.ps_freq_axis(dt, n_time) p = p[1:] f = f[1:] amp_m, index_m, f0_m, thermal_m = mn.fit_overf_const(p, w, f) self.assertTrue(sp.allclose(amp_m, amp, atol=0.2)) self.assertTrue(sp.allclose(index_m, index, atol=0.1)) self.assertTrue(sp.allclose(thermal_m, thermal, atol=0.1))
def model(params): a = params[0] ** 2 + T_small ** 2 i = -(params[1] ** 2 + min_index) t = params[2] ** 2 + T_small ** 2 spec = npow.overf_power_spectrum(a, i, f_0, dt, n_time) spec += t spec = npow.convolve_power(spec, window) spec = npow.prune_power(spec) spec = spec[1:].real return spec
def model(params): a = params[0]**2 + T_small**2 i = -(params[1]**2 + min_index) t = params[2]**2 + T_small**2 spec = npow.overf_power_spectrum(a, i, f_0, dt, n_time) spec += t spec = npow.convolve_power(spec, window) spec = npow.prune_power(spec) spec = spec[1:].real return spec
def test_window_with_zeros(self) : window = sp.ones_like(self.wave1) window += sp.sin(sp.arange(self.n)/50.0) self.wave1 *= window power = npow.windowed_power(self.wave1, window) power = npow.prune_power(power) self.assertAlmostEqual(power[self.mode1]/self.amp1**2/self.n*4, 1.0, 3) self.assertTrue(sp.allclose(power[:self.mode1], 0, atol=self.amp1**2*self.n/1e3)) self.assertTrue(sp.allclose(power[self.mode1+1:], 0, atol=self.amp1**2*self.n/1e3))
def test_no_window(self) : window = sp.ones_like(self.wave1) power = npow.windowed_power(self.wave1, window) power = npow.prune_power(power) self.assertAlmostEqual(power[self.mode1]/self.amp1**2/self.n*4, 1) self.assertTrue(sp.allclose(power[:self.mode1], 0, atol=self.amp1**2*self.n/1e15)) self.assertTrue(sp.allclose(power[self.mode1+1:], 0, atol=self.amp1**2*self.n/1e15)) # With no window, we have a quick way to the answer quick_power = npow.calculate_power(self.wave1) self.assertTrue(sp.allclose(power, quick_power[:self.n//2]))
def test_statistical_no_window(self) : n_trials = 1000 n_points = 200 window = sp.ones(n_points, dtype=float) power = sp.zeros(n_points//2) for ii in range(n_trials) : wave = self.amp1*random.randn(n_points) p = npow.windowed_power(wave, window) power += npow.prune_power(p).real power /= n_trials self.assertTrue(sp.allclose(power/self.amp1**2, 1.0, atol=4.0*(2.0/sp.sqrt(n_trials)))) # Expect this to fail ~1/100 times. self.assertFalse(sp.allclose(power/self.amp1**2, 1.0, atol=0.01*(2.0/sp.sqrt(n_trials))))
def test_different_windows(self) : window1 = sp.ones_like(self.wave1) window1 += sp.sin(sp.arange(self.n)/40.0) window2 = 2*sp.ones_like(self.wave1) window2 += 2*sp.sin(sp.arange(self.n)/62.0) window2[window2<0.5] = 0 wave1 = self.wave1*window1 wave2 = self.wave1*window2 power = npow.windowed_power(wave1, window1, wave2, window2) power = npow.prune_power(power) self.assertAlmostEqual(power[self.mode1]/self.amp1**2/self.n*4, 1.0, 3) self.assertTrue(sp.allclose(power[:self.mode1], 0, atol=self.amp1**2*self.n/1e3)) self.assertTrue(sp.allclose(power[self.mode1+1:], 0, atol=self.amp1**2*self.n/1e3))
def test_window_with_zeros(self): window = sp.ones_like(self.wave1) window += sp.sin(sp.arange(self.n) / 50.0) self.wave1 *= window power = npow.windowed_power(self.wave1, window) power = npow.prune_power(power) self.assertAlmostEqual(power[self.mode1] / self.amp1**2 / self.n * 4, 1.0, 3) self.assertTrue( sp.allclose(power[:self.mode1], 0, atol=self.amp1**2 * self.n / 1e3)) self.assertTrue( sp.allclose(power[self.mode1 + 1:], 0, atol=self.amp1**2 * self.n / 1e3))
def test_no_window(self): window = sp.ones_like(self.wave1) power = npow.windowed_power(self.wave1, window) power = npow.prune_power(power) self.assertAlmostEqual(power[self.mode1] / self.amp1**2 / self.n * 4, 1) self.assertTrue( sp.allclose(power[:self.mode1], 0, atol=self.amp1**2 * self.n / 1e15)) self.assertTrue( sp.allclose(power[self.mode1 + 1:], 0, atol=self.amp1**2 * self.n / 1e15)) # With no window, we have a quick way to the answer quick_power = npow.calculate_power(self.wave1) self.assertTrue(sp.allclose(power, quick_power[:self.n // 2]))
def test_statistical_physical_units(self) : n_trials = 1000 n_points = 200 dt = 0.001 window = sp.ones(n_points, dtype=float) power = sp.zeros(n_points//2) for ii in range(n_trials) : wave = self.amp1*random.randn(n_points) power += npow.prune_power(npow.calculate_power(wave)).real power /= n_trials power = npow.make_power_physical_units(power, dt) freqs = npow.ps_freq_axis(dt, n_points) df = abs(sp.mean(sp.diff(freqs))) # The integral of the power spectrum should be the variance. Factor of # 2 get the negitive frequencies. integrated_power = sp.sum(power) * df * 2 self.assertTrue(sp.allclose(integrated_power/self.amp1**2, 1.0, atol=4.0*(2.0/sp.sqrt(n_trials*n_points))))
def test_statistical_no_window(self): n_trials = 1000 n_points = 200 window = sp.ones(n_points, dtype=float) power = sp.zeros(n_points // 2) for ii in range(n_trials): wave = self.amp1 * random.randn(n_points) p = npow.windowed_power(wave, window) power += npow.prune_power(p).real power /= n_trials self.assertTrue( sp.allclose(power / self.amp1**2, 1.0, atol=4.0 * (2.0 / sp.sqrt(n_trials)))) # Expect this to fail ~1/100 times. self.assertFalse( sp.allclose(power / self.amp1**2, 1.0, atol=0.01 * (2.0 / sp.sqrt(n_trials))))
def test_statistical_physical_units(self): n_trials = 1000 n_points = 200 dt = 0.001 window = sp.ones(n_points, dtype=float) power = sp.zeros(n_points // 2) for ii in range(n_trials): wave = self.amp1 * random.randn(n_points) power += npow.prune_power(npow.calculate_power(wave)).real power /= n_trials power = npow.make_power_physical_units(power, dt) freqs = npow.ps_freq_axis(dt, n_points) df = abs(sp.mean(sp.diff(freqs))) # The integral of the power spectrum should be the variance. Factor of # 2 get the negitive frequencies. integrated_power = sp.sum(power) * df * 2 self.assertTrue( sp.allclose(integrated_power / self.amp1**2, 1.0, atol=4.0 * (2.0 / sp.sqrt(n_trials * n_points))))
def test_different_windows(self): window1 = sp.ones_like(self.wave1) window1 += sp.sin(sp.arange(self.n) / 40.0) window2 = 2 * sp.ones_like(self.wave1) window2 += 2 * sp.sin(sp.arange(self.n) / 62.0) window2[window2 < 0.5] = 0 wave1 = self.wave1 * window1 wave2 = self.wave1 * window2 power = npow.windowed_power(wave1, window1, wave2, window2) power = npow.prune_power(power) self.assertAlmostEqual(power[self.mode1] / self.amp1**2 / self.n * 4, 1.0, 3) self.assertTrue( sp.allclose(power[:self.mode1], 0, atol=self.amp1**2 * self.n / 1e3)) self.assertTrue( sp.allclose(power[self.mode1 + 1:], 0, atol=self.amp1**2 * self.n / 1e3))
def test_statistical_different_windows(self) : n_trials = 1000 n_points = 200 window1 = sp.ones(n_points, dtype=float) window1 += sp.sin(sp.arange(n_points)/10.0) window2 = 2*sp.ones(n_points, dtype=float) window2 += 2*sp.sin(sp.arange(n_points)/22.0) window2[window2<0.5] = 0 window1[window1<0.5] = 0 power = sp.zeros(n_points//2) for ii in range(n_trials) : wave = self.amp1*random.randn(n_points) p = npow.windowed_power(wave*window1, window1, wave*window2, window2) power += npow.prune_power(p).real power /= n_trials self.assertTrue(sp.allclose(power/self.amp1**2, 1.0, atol=6.0*(2.0/sp.sqrt(n_trials)))) # Expect this to fail ~1/100 times. self.assertFalse(sp.allclose(power/self.amp1**2, 1.0, atol=0.01*(2.0/sp.sqrt(n_trials))))
def get_freq_modes_over_f(power_mat, window_function, frequency, n_modes, plots=False): """Fines the most correlated frequency modes and fits thier noise.""" n_f = len(frequency) d_f = sp.mean(sp.diff(frequency)) dt = 1. / 2. / frequency[-1] n_chan = power_mat.shape[-1] n_time = window_function.shape[0] # The threshold for assuming there isn't enough data to measure anything. no_data_thres = 10. / n_time # Initialize the dictionary that will hold all the parameters. output_params = {} # First take the low frequency part of the spetrum matrix and average over # enough bins to get a well conditioned matrix. low_f_mat = sp.mean(power_mat[:4 * n_chan, :, :].real, 0) # Factor the matrix to get the most correlated modes. e, v = linalg.eigh(low_f_mat) # Make sure they are sorted. if not sp.alltrue(sp.diff(e) >= 0): raise RuntimeError("Eigenvalues not sorted") # Power matrix striped of the biggest modes. reduced_power = sp.copy(power_mat) mode_list = [] # Solve for the spectra of these modes. for ii in range(n_modes): this_mode_params = {} # Get power spectrum and window function for this mode. mode = v[:, -1 - ii] mode_power = sp.sum(mode * power_mat.real, -1) mode_power = sp.sum(mode * mode_power, -1) mode_window = sp.sum(mode[:, None]**2 * window_function, 1) mode_window = sp.sum(mode_window * mode[None, :]**2, 1) # Protect against no data. if sp.mean(mode_window).real < no_data_thres: this_mode_params['amplitude'] = 0. this_mode_params['index'] = 0. this_mode_params['f_0'] = 1. this_mode_params['thermal'] = T_infinity**2 * dt else: # Fit the spectrum. p = fit_overf_const(mode_power, mode_window, frequency) # Put all the parameters we measured into the output. this_mode_params['amplitude'] = p[0] this_mode_params['index'] = p[1] this_mode_params['f_0'] = p[2] this_mode_params['thermal'] = p[3] this_mode_params['mode'] = mode output_params['over_f_mode_' + str(ii)] = this_mode_params # Remove the mode from the power matrix. tmp_amp = sp.sum(reduced_power * mode, -1) tmp_amp2 = sp.sum(reduced_power * mode[:, None], -2) tmp_amp3 = sp.sum(tmp_amp2 * mode, -1) reduced_power -= tmp_amp[:, :, None] * mode reduced_power -= tmp_amp2[:, None, :] * mode[:, None] reduced_power += tmp_amp3[:, None, None] * mode[:, None] * mode mode_list.append(mode) # Initialize the compensation matrix, that will be used to restore thermal # noise that gets subtracted out. See Jan 29, Feb 17th, 2012 of Kiyo's # notes. compensation = sp.eye(n_chan, dtype=float) for mode1 in mode_list: compensation.flat[::n_chan + 1] -= 2 * mode1**2 for mode2 in mode_list: mode_prod = mode1 * mode2 compensation += mode_prod[:, None] * mode_prod[None, :] # Now that we've striped the noisiest modes, measure the auto power # spectrum, averaged over channels. auto_spec_mean = reduced_power.view() auto_spec_mean.shape = (n_f, n_chan**2) auto_spec_mean = auto_spec_mean[:, ::n_chan + 1].real auto_spec_mean = sp.mean(auto_spec_mean, -1) diag_window = window_function.view() diag_window.shape = (n_time, n_chan**2) diag_window = diag_window[:, ::n_chan + 1] auto_spec_window = sp.mean(diag_window, -1) if sp.mean(auto_spec_window).real < no_data_thres: auto_cross_over = 0. auto_index = 0. auto_thermal = 0 else: auto_spec_params = fit_overf_const(auto_spec_mean, auto_spec_window, frequency) auto_thermal = auto_spec_params[3] if (auto_spec_params[0] <= 0 or auto_spec_params[3] <= 0 or auto_spec_params[1] > -0.599): auto_cross_over = 0. auto_index = 0. else: auto_index = auto_spec_params[1] auto_cross_over = auto_spec_params[2] * ( auto_spec_params[0] / auto_spec_params[3])**(-1. / auto_index) #if auto_cross_over < d_f: # auto_index = 0. # auto_cross_over = 0. # Plot the mean auto spectrum if desired. if plots: h = plt.gcf() a = h.add_subplot(*h.current_subplot) norm = sp.mean(auto_spec_window).real auto_plot = auto_spec_mean / norm plotable = auto_plot > 0 lines = a.loglog(frequency[plotable], auto_plot[plotable]) c = lines[-1].get_color() # And plot the fit in a light color. if auto_cross_over > d_f / 4.: spec = npow.overf_power_spectrum(auto_thermal, auto_index, auto_cross_over, dt, n_time) else: spec = sp.zeros(n_time, dtype=float) spec += auto_thermal spec[0] = 0 spec = npow.convolve_power(spec, auto_spec_window) spec = npow.prune_power(spec) spec = spec[1:].real if norm > no_data_thres: spec /= norm plotable = spec > 0 a.loglog(frequency[plotable], spec[plotable], c=c, alpha=0.4, linestyle=':') output_params['all_channel_index'] = auto_index output_params['all_channel_corner_f'] = auto_cross_over # Finally measure the thermal part of the noise in each channel. cross_over_ind = sp.digitize([auto_cross_over * 4], frequency)[0] cross_over_ind = max(cross_over_ind, n_f // 2) cross_over_ind = min(cross_over_ind, int(9. * n_f / 10.)) thermal = reduced_power[cross_over_ind:, :, :].real n_high_f = thermal.shape[0] thermal.shape = (n_high_f, n_chan**2) thermal = sp.mean(thermal[:, ::n_chan + 1], 0) thermal_norms = sp.mean(diag_window, 0).real bad_inds = thermal_norms < no_data_thres thermal_norms[bad_inds] = 1. # Compensate for power lost in mode subtraction. compensation[:, bad_inds] = 0 compensation[bad_inds, :] = 0 for ii in xrange(n_chan): if bad_inds[ii]: compensation[ii, ii] = 1. thermal = linalg.solve(compensation, thermal) # Normalize thermal /= thermal_norms thermal[bad_inds] = T_infinity**2 * dt # Occationally the compensation fails horribly on a few channels. # When this happens, zero out the offending indices. thermal[thermal < 0] = 0 output_params['thermal'] = thermal # Now that we know what thermal is, we can subtract it out of the modes we # already measured. for ii in range(n_modes): mode_params = output_params['over_f_mode_' + str(ii)] thermal_contribution = sp.sum(mode_params['mode']**2 * thermal) # Subtract a maximum of 90% of the white noise to keep things positive # definate. new_white = max(mode_params['thermal'] - thermal_contribution, 0.1 * mode_params['thermal']) if mode_params['thermal'] < 0.5 * T_infinity**2 * dt: mode_params['thermal'] = new_white return output_params
def measure_noise_parameters(Blocks, parameters, split_scans=False, plots=False): """Given a set of data blocks, measure noise parameters. Measurement done for all polarizations but only the first cal state. """ # Initialize the output. out_parameters = {} if set(parameters) == {"channel_var"}: # Calculate the full correlated power spectrum. power_mat, window_function, dt, channel_means = npow.full_power_diag( Blocks, window="hanning", deconvolve=False, n_time=-1.05, normalize=False, split_scans=split_scans, subtract_slope=True, ) # This shouldn't be nessisary, since I've tried to keep things finite in # the above function. However, leave it in for now just in case. if not sp.alltrue(sp.isfinite(power_mat)): msg = "Non finite power spectrum calculated. Offending data in " "file starting with scan %d." % ( Blocks[0].field["SCAN"] ) raise ce.DataError(msg) # Get frequency axis and do unit conversions. n_time = power_mat.shape[0] n_chan = power_mat.shape[-1] frequency = npow.ps_freq_axis(dt, n_time) power_mat = npow.prune_power(power_mat, 0) power_mat = npow.make_power_physical_units(power_mat, dt) # Discard the mean mode. frequency = frequency[1:] power_mat = power_mat[1:, ...] n_f = len(frequency) # Loop over polarizations. cal_ind = 0 n_pols = power_mat.shape[1] for ii in range(n_pols): this_pol_power = power_mat[:, ii, cal_ind, :] this_pol_window = window_function[:, ii, cal_ind, :] this_pol = Blocks[0].field["CRVAL4"][ii] this_pol_parameters = {} # If we are plotting, activate the subplot for this polarization and # this band. if plots: h = plt.gcf() # The last subplot should be hidden in the figure object. current_subplot = h.current_subplot current_subplot = current_subplot[:2] + (current_subplot[2] + 1,) h.current_subplot = current_subplot # Now figure out what we want to measure and measure it. if "channel_var" in parameters: power_diag = this_pol_power.view() window_function_diag = this_pol_window.view() # Integral of the power spectrum from -BW to BW. channel_var = sp.mean(power_diag, 0) / dt # Normalize for the window. norms = sp.mean(window_function_diag, 0).real bad_inds = norms < 10.0 / n_time norms[bad_inds] = 1 channel_var /= norms # If a channel is completly masked Deweight it by giving a high # variance channel_var[bad_inds] = T_infinity ** 2 this_pol_parameters["channel_var"] = channel_var out_parameters[this_pol] = this_pol_parameters return out_parameters else: # Calculate the full correlated power spectrum. power_mat, window_function, dt, channel_means = npow.full_power_mat( Blocks, window="hanning", deconvolve=False, n_time=-1.05, normalize=False, split_scans=split_scans, subtract_slope=True, ) # This shouldn't be nessisary, since I've tried to keep things finite in # the above function. However, leave it in for now just in case. if not sp.alltrue(sp.isfinite(power_mat)): msg = "Non finite power spectrum calculated. Offending data in " "file starting with scan %d." % ( Blocks[0].field["SCAN"] ) raise ce.DataError(msg) # Get frequency axis and do unit conversions. n_time = power_mat.shape[0] n_chan = power_mat.shape[-1] frequency = npow.ps_freq_axis(dt, n_time) power_mat = npow.prune_power(power_mat, 0) power_mat = npow.make_power_physical_units(power_mat, dt) # Discard the mean mode. frequency = frequency[1:] power_mat = power_mat[1:, ...] n_f = len(frequency) # Loop over polarizations. cal_ind = 0 n_pols = power_mat.shape[1] for ii in range(n_pols): this_pol_power = power_mat[:, ii, cal_ind, :, :] this_pol_window = window_function[:, ii, cal_ind, :, :] this_pol = Blocks[0].field["CRVAL4"][ii] this_pol_parameters = {} # If we are plotting, activate the subplot for this polarization and # this band. if plots: h = plt.gcf() # The last subplot should be hidden in the figure object. current_subplot = h.current_subplot current_subplot = current_subplot[:2] + (current_subplot[2] + 1,) h.current_subplot = current_subplot # Now figure out what we want to measure and measure it. if "channel_var" in parameters: power_diag = this_pol_power.view() power_diag.shape = (n_f, n_chan ** 2) power_diag = power_diag[:, :: n_chan + 1].real window_function_diag = this_pol_window.view() window_function_diag.shape = (n_time, n_chan ** 2) window_function_diag = window_function_diag[:, :: n_chan + 1] # Integral of the power spectrum from -BW to BW. channel_var = sp.mean(power_diag, 0) / dt # Normalize for the window. norms = sp.mean(window_function_diag, 0).real bad_inds = norms < 10.0 / n_time norms[bad_inds] = 1 channel_var /= norms # If a channel is completly masked Deweight it by giving a high # variance channel_var[bad_inds] = T_infinity ** 2 this_pol_parameters["channel_var"] = channel_var for noise_model in parameters: if noise_model[:18] == "freq_modes_over_f_": n_modes = int(noise_model[18:]) this_pol_parameters[noise_model] = get_freq_modes_over_f( this_pol_power, this_pol_window, frequency, n_modes, plots=plots ) out_parameters[this_pol] = this_pol_parameters return out_parameters
def get_freq_modes_over_f(power_mat, window_function, frequency, n_modes, plots=False): """Fines the most correlated frequency modes and fits thier noise.""" n_f = len(frequency) d_f = sp.mean(sp.diff(frequency)) dt = 1.0 / 2.0 / frequency[-1] n_chan = power_mat.shape[-1] n_time = window_function.shape[0] # The threshold for assuming there isn't enough data to measure anything. no_data_thres = 10.0 / n_time # Initialize the dictionary that will hold all the parameters. output_params = {} # First take the low frequency part of the spetrum matrix and average over # enough bins to get a well conditioned matrix. low_f_mat = sp.mean(power_mat[: 4 * n_chan, :, :].real, 0) # Factor the matrix to get the most correlated modes. e, v = linalg.eigh(low_f_mat) # Make sure they are sorted. if not sp.alltrue(sp.diff(e) >= 0): raise RuntimeError("Eigenvalues not sorted") # Power matrix striped of the biggest modes. reduced_power = sp.copy(power_mat) mode_list = [] # Solve for the spectra of these modes. for ii in range(n_modes): this_mode_params = {} # Get power spectrum and window function for this mode. mode = v[:, -1 - ii] mode_power = sp.sum(mode * power_mat.real, -1) mode_power = sp.sum(mode * mode_power, -1) mode_window = sp.sum(mode[:, None] ** 2 * window_function, 1) mode_window = sp.sum(mode_window * mode[None, :] ** 2, 1) # Protect against no data. if sp.mean(mode_window).real < no_data_thres: this_mode_params["amplitude"] = 0.0 this_mode_params["index"] = 0.0 this_mode_params["f_0"] = 1.0 this_mode_params["thermal"] = T_infinity ** 2 * dt else: # Fit the spectrum. p = fit_overf_const(mode_power, mode_window, frequency) # Put all the parameters we measured into the output. this_mode_params["amplitude"] = p[0] this_mode_params["index"] = p[1] this_mode_params["f_0"] = p[2] this_mode_params["thermal"] = p[3] this_mode_params["mode"] = mode output_params["over_f_mode_" + str(ii)] = this_mode_params # Remove the mode from the power matrix. tmp_amp = sp.sum(reduced_power * mode, -1) tmp_amp2 = sp.sum(reduced_power * mode[:, None], -2) tmp_amp3 = sp.sum(tmp_amp2 * mode, -1) reduced_power -= tmp_amp[:, :, None] * mode reduced_power -= tmp_amp2[:, None, :] * mode[:, None] reduced_power += tmp_amp3[:, None, None] * mode[:, None] * mode mode_list.append(mode) # Initialize the compensation matrix, that will be used to restore thermal # noise that gets subtracted out. See Jan 29, Feb 17th, 2012 of Kiyo's # notes. compensation = sp.eye(n_chan, dtype=float) for mode1 in mode_list: compensation.flat[:: n_chan + 1] -= 2 * mode1 ** 2 for mode2 in mode_list: mode_prod = mode1 * mode2 compensation += mode_prod[:, None] * mode_prod[None, :] # Now that we've striped the noisiest modes, measure the auto power # spectrum, averaged over channels. auto_spec_mean = reduced_power.view() auto_spec_mean.shape = (n_f, n_chan ** 2) auto_spec_mean = auto_spec_mean[:, :: n_chan + 1].real auto_spec_mean = sp.mean(auto_spec_mean, -1) diag_window = window_function.view() diag_window.shape = (n_time, n_chan ** 2) diag_window = diag_window[:, :: n_chan + 1] auto_spec_window = sp.mean(diag_window, -1) if sp.mean(auto_spec_window).real < no_data_thres: auto_cross_over = 0.0 auto_index = 0.0 auto_thermal = 0 else: auto_spec_params = fit_overf_const(auto_spec_mean, auto_spec_window, frequency) auto_thermal = auto_spec_params[3] if auto_spec_params[0] <= 0 or auto_spec_params[3] <= 0 or auto_spec_params[1] > -0.599: auto_cross_over = 0.0 auto_index = 0.0 else: auto_index = auto_spec_params[1] auto_cross_over = auto_spec_params[2] * (auto_spec_params[0] / auto_spec_params[3]) ** (-1.0 / auto_index) # if auto_cross_over < d_f: # auto_index = 0. # auto_cross_over = 0. # Plot the mean auto spectrum if desired. if plots: h = plt.gcf() a = h.add_subplot(*h.current_subplot) norm = sp.mean(auto_spec_window).real auto_plot = auto_spec_mean / norm plotable = auto_plot > 0 lines = a.loglog(frequency[plotable], auto_plot[plotable]) c = lines[-1].get_color() # And plot the fit in a light color. if auto_cross_over > d_f / 4.0: spec = npow.overf_power_spectrum(auto_thermal, auto_index, auto_cross_over, dt, n_time) else: spec = sp.zeros(n_time, dtype=float) spec += auto_thermal spec[0] = 0 spec = npow.convolve_power(spec, auto_spec_window) spec = npow.prune_power(spec) spec = spec[1:].real if norm > no_data_thres: spec /= norm plotable = spec > 0 a.loglog(frequency[plotable], spec[plotable], c=c, alpha=0.4, linestyle=":") output_params["all_channel_index"] = auto_index output_params["all_channel_corner_f"] = auto_cross_over # Finally measure the thermal part of the noise in each channel. cross_over_ind = sp.digitize([auto_cross_over * 4], frequency)[0] cross_over_ind = max(cross_over_ind, n_f // 2) cross_over_ind = min(cross_over_ind, int(9.0 * n_f / 10.0)) thermal = reduced_power[cross_over_ind:, :, :].real n_high_f = thermal.shape[0] thermal.shape = (n_high_f, n_chan ** 2) thermal = sp.mean(thermal[:, :: n_chan + 1], 0) thermal_norms = sp.mean(diag_window, 0).real bad_inds = thermal_norms < no_data_thres thermal_norms[bad_inds] = 1.0 # Compensate for power lost in mode subtraction. compensation[:, bad_inds] = 0 compensation[bad_inds, :] = 0 for ii in xrange(n_chan): if bad_inds[ii]: compensation[ii, ii] = 1.0 thermal = linalg.solve(compensation, thermal) # Normalize thermal /= thermal_norms thermal[bad_inds] = T_infinity ** 2 * dt # Occationally the compensation fails horribly on a few channels. # When this happens, zero out the offending indices. thermal[thermal < 0] = 0 output_params["thermal"] = thermal # Now that we know what thermal is, we can subtract it out of the modes we # already measured. for ii in range(n_modes): mode_params = output_params["over_f_mode_" + str(ii)] thermal_contribution = sp.sum(mode_params["mode"] ** 2 * thermal) # Subtract a maximum of 90% of the white noise to keep things positive # definate. new_white = max(mode_params["thermal"] - thermal_contribution, 0.1 * mode_params["thermal"]) if mode_params["thermal"] < 0.5 * T_infinity ** 2 * dt: mode_params["thermal"] = new_white return output_params
def measure_noise_parameters(Blocks, parameters, split_scans=False, plots=False): """Given a set of data blocks, measure noise parameters. Measurement done for all polarizations but only the first cal state. """ # Initialize the output. out_parameters = {} if set(parameters) == {"channel_var"}: # Calculate the full correlated power spectrum. power_mat, window_function, dt, channel_means = npow.full_power_diag( Blocks, window="hanning", deconvolve=False, n_time=-1.05, normalize=False, split_scans=split_scans, subtract_slope=True) # This shouldn't be nessisary, since I've tried to keep things finite in # the above function. However, leave it in for now just in case. if not sp.alltrue(sp.isfinite(power_mat)): msg = ("Non finite power spectrum calculated. Offending data in " "file starting with scan %d." % (Blocks[0].field['SCAN'])) raise ce.DataError(msg) # Get frequency axis and do unit conversions. n_time = power_mat.shape[0] n_chan = power_mat.shape[-1] frequency = npow.ps_freq_axis(dt, n_time) power_mat = npow.prune_power(power_mat, 0) power_mat = npow.make_power_physical_units(power_mat, dt) # Discard the mean mode. frequency = frequency[1:] power_mat = power_mat[1:, ...] n_f = len(frequency) # Loop over polarizations. cal_ind = 0 n_pols = power_mat.shape[1] for ii in range(n_pols): this_pol_power = power_mat[:, ii, cal_ind, :] this_pol_window = window_function[:, ii, cal_ind, :] this_pol = Blocks[0].field['CRVAL4'][ii] this_pol_parameters = {} # If we are plotting, activate the subplot for this polarization and # this band. if plots: h = plt.gcf() # The last subplot should be hidden in the figure object. current_subplot = h.current_subplot current_subplot = current_subplot[:2] + (current_subplot[2] + 1, ) h.current_subplot = current_subplot # Now figure out what we want to measure and measure it. if "channel_var" in parameters: power_diag = this_pol_power.view() window_function_diag = this_pol_window.view() # Integral of the power spectrum from -BW to BW. channel_var = sp.mean(power_diag, 0) / dt # Normalize for the window. norms = sp.mean(window_function_diag, 0).real bad_inds = norms < 10. / n_time norms[bad_inds] = 1 channel_var /= norms # If a channel is completly masked Deweight it by giving a high # variance channel_var[bad_inds] = T_infinity**2 this_pol_parameters["channel_var"] = channel_var out_parameters[this_pol] = this_pol_parameters return out_parameters else: # Calculate the full correlated power spectrum. power_mat, window_function, dt, channel_means = npow.full_power_mat( Blocks, window="hanning", deconvolve=False, n_time=-1.05, normalize=False, split_scans=split_scans, subtract_slope=True) # This shouldn't be nessisary, since I've tried to keep things finite in # the above function. However, leave it in for now just in case. if not sp.alltrue(sp.isfinite(power_mat)): msg = ("Non finite power spectrum calculated. Offending data in " "file starting with scan %d." % (Blocks[0].field['SCAN'])) raise ce.DataError(msg) # Get frequency axis and do unit conversions. n_time = power_mat.shape[0] n_chan = power_mat.shape[-1] frequency = npow.ps_freq_axis(dt, n_time) power_mat = npow.prune_power(power_mat, 0) power_mat = npow.make_power_physical_units(power_mat, dt) # Discard the mean mode. frequency = frequency[1:] power_mat = power_mat[1:, ...] n_f = len(frequency) # Loop over polarizations. cal_ind = 0 n_pols = power_mat.shape[1] for ii in range(n_pols): this_pol_power = power_mat[:, ii, cal_ind, :, :] this_pol_window = window_function[:, ii, cal_ind, :, :] this_pol = Blocks[0].field['CRVAL4'][ii] this_pol_parameters = {} # If we are plotting, activate the subplot for this polarization and # this band. if plots: h = plt.gcf() # The last subplot should be hidden in the figure object. current_subplot = h.current_subplot current_subplot = current_subplot[:2] + (current_subplot[2] + 1, ) h.current_subplot = current_subplot # Now figure out what we want to measure and measure it. if "channel_var" in parameters: power_diag = this_pol_power.view() power_diag.shape = (n_f, n_chan**2) power_diag = power_diag[:, ::n_chan + 1].real window_function_diag = this_pol_window.view() window_function_diag.shape = (n_time, n_chan**2) window_function_diag = window_function_diag[:, ::n_chan + 1] # Integral of the power spectrum from -BW to BW. channel_var = sp.mean(power_diag, 0) / dt # Normalize for the window. norms = sp.mean(window_function_diag, 0).real bad_inds = norms < 10. / n_time norms[bad_inds] = 1 channel_var /= norms # If a channel is completly masked Deweight it by giving a high # variance channel_var[bad_inds] = T_infinity**2 this_pol_parameters["channel_var"] = channel_var for noise_model in parameters: if noise_model[:18] == "freq_modes_over_f_": n_modes = int(noise_model[18:]) this_pol_parameters[noise_model] = \ get_freq_modes_over_f(this_pol_power, this_pol_window, frequency, n_modes, plots=plots) out_parameters[this_pol] = this_pol_parameters return out_parameters