def test_estimate_welch(self): """Test estimating PSDs from data using Welch's method""" for seg_len in (2048, 4096, 8192): noise_model = (numpy.linspace(1., 100., seg_len/2 + 1)) ** (-2) for seg_stride in (seg_len, seg_len/2): for method in ('mean', 'median', 'median-mean'): with self.context: psd = pycbc.psd.welch(self.noise, seg_len=seg_len, \ seg_stride=seg_stride, avg_method=method) error = (psd.numpy() - noise_model) / noise_model err_rms = numpy.sqrt(numpy.mean(error ** 2)) self.assertTrue(err_rms < 0.2, msg='seg_len=%d seg_stride=%d method=%s -> rms=%.3f' % \ (seg_len, seg_stride, method, err_rms))
def test_estimate_welch(self): """Test estimating PSDs from data using Welch's method""" for seg_len in (2048, 4096, 8192): noise_model = (numpy.linspace(1., 100., seg_len // 2 + 1))**(-2) for seg_stride in (seg_len, seg_len // 2): for method in ('mean', 'median', 'median-mean'): with self.context: psd = pycbc.psd.welch(self.noise, seg_len=seg_len, \ seg_stride=seg_stride, avg_method=method) error = (psd.numpy() - noise_model) / noise_model err_rms = numpy.sqrt(numpy.mean(error**2)) self.assertTrue(err_rms < 0.2, msg='seg_len=%d seg_stride=%d method=%s -> rms=%.3f' % \ (seg_len, seg_stride, method, err_rms))
def trigger_list_from_map(tfmap, event_list, threshold, start_time, start_freq, duration, band, df, dt, psd=None): # FIXME: If we don't convert this the calculation takes forever --- # but we should convert it once and handle deltaF better later if psd is not None: npy_psd = psd.numpy() start_time = LIGOTimeGPS(float(start_time)) ndof = 2 * duration * band for i, j in zip(*numpy.where(tfmap > threshold)): event = event_list.RowType() # The points are summed forward in time and thus a `summed point' is the # sum of the previous N points. If this point is above threshold, it # corresponds to a tile which spans the previous N points. However, the # 0th point (due to the convolution specifier 'valid') is actually # already a duration from the start time. All of this means, the + # duration and the - duration cancels, and the tile 'start' is, by # definition, the start of the time frequency map if j = 0 # FIXME: I think this needs a + dt/2 to center the tile properly event.set_start(start_time + float(j * dt)) event.set_stop(start_time + float(j * dt) + duration) event.set_peak(event.get_start() + duration / 2) event.central_freq = start_freq + i * df + 0.5 * band event.duration = duration event.bandwidth = band event.chisq_dof = ndof event.snr = math.sqrt(tfmap[i,j] / event.chisq_dof - 1) # FIXME: Magic number 0.62 should be determine empircally event.confidence = -lal.LogChisqCCDF(event.snr * 0.62, event.chisq_dof * 0.62) if psd is not None: # NOTE: I think the pycbc PSDs always start at 0 Hz --- check psd_idx_min = int((event.central_freq - event.bandwidth / 2) / psd.delta_f) psd_idx_max = int((event.central_freq + event.bandwidth / 2) / psd.delta_f) # FIXME: heuristically this works better with E - D -- it's all # going away with the better h_rss calculation soon anyway event.amplitude = measure_hrss_poorly(tfmap[i,j] - event.chisq_dof, npy_psd[psd_idx_min:psd_idx_max]) else: event.amplitude = None event.process_id = None event.event_id = event_list.get_next_id() event_list.append(event)
def test_truncation(self): """Test inverse PSD truncation""" for seg_len in (2048, 4096, 8192): noise_model = (numpy.linspace(1., 100., seg_len/2 + 1)) ** (-2) for max_len in (1024, 512, 256): with self.context: psd = pycbc.psd.welch(self.noise, seg_len=seg_len, \ seg_stride=seg_len/2, avg_method='mean') psd_trunc = pycbc.psd.inverse_spectrum_truncation( psd, max_len, low_frequency_cutoff=self.psd_low_freq_cutoff) freq = psd.sample_frequencies.numpy() error = (psd.numpy() - noise_model) / noise_model error = error[freq > self.psd_low_freq_cutoff] err_rms = numpy.sqrt(numpy.mean(error ** 2)) self.assertTrue(err_rms < 0.1, msg='seg_len=%d max_len=%d -> rms=%.3f' \ % (seg_len, max_len, err_rms))
def test_truncation(self): """Test inverse PSD truncation""" for seg_len in (2048, 4096, 8192): noise_model = (numpy.linspace(1., 100., seg_len // 2 + 1))**(-2) for max_len in (1024, 512, 256): with self.context: psd = pycbc.psd.welch(self.noise, seg_len=seg_len, \ seg_stride=seg_len//2, avg_method='mean') psd_trunc = pycbc.psd.inverse_spectrum_truncation( psd, max_len, low_frequency_cutoff=self.psd_low_freq_cutoff) freq = psd.sample_frequencies.numpy() error = (psd.numpy() - noise_model) / noise_model error = error[freq > self.psd_low_freq_cutoff] err_rms = numpy.sqrt(numpy.mean(error**2)) self.assertTrue(err_rms < 0.1, msg='seg_len=%d max_len=%d -> rms=%.3f' \ % (seg_len, max_len, err_rms))
# List the available analytic psds print(pycbc.psd.get_lalsim_psd_list()) delta_f = 1.0 / 4 flen = int(1024 / delta_f) low_frequency_cutoff = 30.0 # PSD of detector psd = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, low_frequency_cutoff) psd_1 = pycbc.psd.aLIGOZeroDetLowPower(flen, delta_f, low_frequency_cutoff) psd_2 = pycbc.psd.eLIGOModel(flen, delta_f, low_frequency_cutoff) psd_3 = pycbc.psd.iLIGOModel(flen, delta_f, low_frequency_cutoff) # ASD of the PSD asd = pycbc.types.frequencyseries.FrequencySeries(numpy.sqrt(psd.numpy()), delta_f=psd.delta_f) asd_1 = pycbc.types.frequencyseries.FrequencySeries(numpy.sqrt(psd_1.numpy()), delta_f=psd_1.delta_f) asd_2 = pycbc.types.frequencyseries.FrequencySeries(numpy.sqrt(psd_2.numpy()), delta_f=psd_2.delta_f) asd_3 = pycbc.types.frequencyseries.FrequencySeries(numpy.sqrt(psd_3.numpy()), delta_f=psd_3.delta_f) # Potting fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.loglog(asd.sample_frequencies, asd, linewidth=3, label='aLIGO High Power Design')