def lfilter(coefficients, timeseries): """ Apply filter coefficients to a time series Parameters ---------- coefficients: numpy.ndarray Filter coefficients to apply timeseries: numpy.ndarray Time series to be filtered. Returns ------- tseries: numpy.ndarray filtered array """ from pycbc.filter import correlate fillen = len(coefficients) # If there aren't many points just use the default scipy method if len(timeseries) < 2**7: series = scipy.signal.lfilter(coefficients, 1.0, timeseries) return TimeSeries(series, epoch=timeseries.start_time, delta_t=timeseries.delta_t) elif (len(timeseries) < fillen * 10) or (len(timeseries) < 2**18): cseries = (Array(coefficients[::-1] * 1)).astype(timeseries.dtype) cseries.resize(len(timeseries)) cseries.roll(len(timeseries) - fillen + 1) flen = len(cseries) // 2 + 1 ftype = complex_same_precision_as(timeseries) cfreq = zeros(flen, dtype=ftype) tfreq = zeros(flen, dtype=ftype) fft(Array(cseries), cfreq) fft(Array(timeseries), tfreq) cout = zeros(flen, ftype) out = zeros(len(timeseries), dtype=timeseries) correlate(cfreq, tfreq, cout) ifft(cout, out) return TimeSeries(out.numpy() / len(out), epoch=timeseries.start_time, delta_t=timeseries.delta_t) else: # recursively perform which saves a bit on memory usage # but must keep within recursion limit chunksize = max(fillen * 5, len(timeseries) // 128) part1 = lfilter(coefficients, timeseries[0:chunksize]) part2 = lfilter(coefficients, timeseries[chunksize - fillen:]) out = timeseries.copy() out[:len(part1)] = part1 out[len(part1):] = part2[fillen:] return out
def lfilter(coefficients, timeseries): """ Apply filter coefficients to a time series Parameters ---------- coefficients: numpy.ndarray Filter coefficients to apply timeseries: numpy.ndarray Time series to be filtered. Returns ------- tseries: numpy.ndarray filtered array """ from pycbc.fft import fft, ifft from pycbc.filter import correlate # If there aren't many points just use the default scipy method if len(timeseries) < 2**7: if hasattr(timeseries, 'numpy'): timeseries = timeseries.numpy() series = scipy.signal.lfilter(coefficients, 1.0, timeseries) return series else: cseries = (Array(coefficients[::-1] * 1)).astype(timeseries.dtype) cseries.resize(len(timeseries)) cseries.roll(len(timeseries) - len(coefficients) + 1) timeseries = Array(timeseries, copy=False) flen = len(cseries) / 2 + 1 ftype = complex_same_precision_as(timeseries) cfreq = zeros(flen, dtype=ftype) tfreq = zeros(flen, dtype=ftype) fft(Array(cseries), cfreq) fft(Array(timeseries), tfreq) cout = zeros(flen, ftype) out = zeros(len(timeseries), dtype=timeseries) correlate(cfreq, tfreq, cout) ifft(cout, out) return out.numpy() / len(out)
t1 = time() for i in range(niter): corr.execute(b) t2 = time() print("Batch Correlate Perf Size:{} Time:{:3.3f}".format( N, (t2-t1)*1000 / niter)) for dtp in [complex64, complex128]: for N in [2**10, 2**15, 2**20]: a = zeros(N, dtype=dtp) a += Array(uniform(-1, 1, size=N) * (1 + -.5j), dtype=a.dtype) b = zeros(N, dtype=dtp) c = zeros(N, dtype=dtp) correlate(a, b, c) t1 = time() for i in range(niter): correlate(a, b, c) t2 = time() print("Correlate Perf Type:{} Size:{} Time:{:3.3f}".format(repr(dtp), N, (t2-t1)*1000 / niter)) if dtp is complex64: corr = Correlator(a, b, c) t1 = time() for i in range(niter): corr.correlate() t2 = time() print("Correlator Perf Type:{} Size:{} Time:{:3.3f}".format(repr(dtp),
def lfilter(coefficients, timeseries): """ Apply filter coefficients to a time series Parameters ---------- coefficients: numpy.ndarray Filter coefficients to apply timeseries: numpy.ndarray Time series to be filtered. Returns ------- tseries: numpy.ndarray filtered array """ from pycbc.filter import correlate fillen = len(coefficients) # If there aren't many points just use the default scipy method if len(timeseries) < 2**7: series = scipy.signal.lfilter(coefficients, 1.0, timeseries) return TimeSeries(series, epoch=timeseries.start_time, delta_t=timeseries.delta_t) elif (len(timeseries) < fillen * 10) or (len(timeseries) < 2**18): from pycbc.strain.strain import create_memory_and_engine_for_class_based_fft from pycbc.strain.strain import execute_cached_fft cseries = (Array(coefficients[::-1] * 1)).astype(timeseries.dtype) cseries.resize(len(timeseries)) cseries.roll(len(timeseries) - fillen + 1) flen = len(cseries) // 2 + 1 ftype = complex_same_precision_as(timeseries) if not USE_CACHING_FOR_LFILTER: cfreq = zeros(flen, dtype=ftype) tfreq = zeros(flen, dtype=ftype) fft(Array(cseries), cfreq) fft(Array(timeseries), tfreq) cout = zeros(flen, ftype) correlate(cfreq, tfreq, cout) out = zeros(len(timeseries), dtype=timeseries) ifft(cout, out) else: npoints = len(cseries) # NOTE: This function is cached! ifftouts = create_memory_and_engine_for_class_based_fft( npoints, timeseries.dtype, ifft=True, uid=LFILTER_UNIQUE_ID_1 ) # FFT contents of cseries into cfreq cfreq = execute_cached_fft(cseries, uid=LFILTER_UNIQUE_ID_2, copy_output=False, normalize_by_rate=False) # FFT contents of timeseries into tfreq tfreq = execute_cached_fft(timeseries, uid=LFILTER_UNIQUE_ID_3, copy_output=False, normalize_by_rate=False) cout, out, fft_class = ifftouts # Correlate cfreq and tfreq correlate(cfreq, tfreq, cout) # IFFT correlation output into out fft_class.execute() return TimeSeries(out.numpy() / len(out), epoch=timeseries.start_time, delta_t=timeseries.delta_t) else: # recursively perform which saves a bit on memory usage # but must keep within recursion limit chunksize = max(fillen * 5, len(timeseries) // 128) part1 = lfilter(coefficients, timeseries[0:chunksize]) part2 = lfilter(coefficients, timeseries[chunksize - fillen:]) out = timeseries.copy() out[:len(part1)] = part1 out[len(part1):] = part2[fillen:] return out
def __init__(self, ifos, coinc_results, **kwargs): """Initialize a ligolw xml representation of a zerolag trigger for upload from pycbc live to gracedb. Parameters ---------- ifos: list of strs A list of the ifos pariticipating in this trigger coinc_results: dict of values A dictionary of values. The format is define in pycbc/events/coinc.py and matches the on disk representation in the hdf file for this time. """ self.ifos = ifos self.template_id = coinc_results['foreground/%s/template_id' % self.ifos[0]] # remember if this should be marked as HWINJ self.is_hardware_injection = False if 'HWINJ' in coinc_results: self.is_hardware_injection = True # Set up the bare structure of the xml document outdoc = ligolw.Document() outdoc.appendChild(ligolw.LIGO_LW()) proc_id = ligolw_process.register_to_xmldoc( outdoc, 'pycbc', {}, ifos=ifos, comment='', version=pycbc_version.git_hash, cvs_repository='pycbc/' + pycbc_version.git_branch, cvs_entry_time=pycbc_version.date).process_id # Set up coinc_definer table coinc_def_table = lsctables.New(lsctables.CoincDefTable) coinc_def_id = lsctables.CoincDefID(0) coinc_def_row = lsctables.CoincDef() coinc_def_row.search = "inspiral" coinc_def_row.description = "sngl_inspiral<-->sngl_inspiral coincs" coinc_def_row.coinc_def_id = coinc_def_id coinc_def_row.search_coinc_type = 0 coinc_def_table.append(coinc_def_row) outdoc.childNodes[0].appendChild(coinc_def_table) # Set up coinc inspiral and coinc event tables coinc_id = lsctables.CoincID(0) coinc_event_table = lsctables.New(lsctables.CoincTable) coinc_event_row = lsctables.Coinc() coinc_event_row.coinc_def_id = coinc_def_id coinc_event_row.nevents = len(ifos) coinc_event_row.instruments = ','.join(ifos) coinc_event_row.time_slide_id = lsctables.TimeSlideID(0) coinc_event_row.process_id = proc_id coinc_event_row.coinc_event_id = coinc_id coinc_event_row.likelihood = 0. coinc_event_table.append(coinc_event_row) outdoc.childNodes[0].appendChild(coinc_event_table) # Set up sngls sngl_inspiral_table = lsctables.New(lsctables.SnglInspiralTable) coinc_event_map_table = lsctables.New(lsctables.CoincMapTable) sngl_id = 0 sngl_event_id_map = {} for ifo in ifos: names = [ n.split('/')[-1] for n in coinc_results if 'foreground/%s' % ifo in n ] sngl_id += 1 sngl = return_empty_sngl() sngl.event_id = lsctables.SnglInspiralID(sngl_id) sngl_event_id_map[ifo] = sngl.event_id sngl.ifo = ifo for name in names: val = coinc_results['foreground/%s/%s' % (ifo, name)] if name == 'end_time': sngl.set_end(lal.LIGOTimeGPS(val)) else: try: setattr(sngl, name, val) except AttributeError: pass sngl.mtotal, sngl.eta = pnutils.mass1_mass2_to_mtotal_eta( sngl.mass1, sngl.mass2) sngl.mchirp, _ = pnutils.mass1_mass2_to_mchirp_eta( sngl.mass1, sngl.mass2) sngl.eff_distance = (sngl.sigmasq)**0.5 / sngl.snr sngl_inspiral_table.append(sngl) # Set up coinc_map entry coinc_map_row = lsctables.CoincMap() coinc_map_row.table_name = 'sngl_inspiral' coinc_map_row.coinc_event_id = coinc_id coinc_map_row.event_id = sngl.event_id coinc_event_map_table.append(coinc_map_row) outdoc.childNodes[0].appendChild(coinc_event_map_table) outdoc.childNodes[0].appendChild(sngl_inspiral_table) # Set up the coinc inspiral table coinc_inspiral_table = lsctables.New(lsctables.CoincInspiralTable) coinc_inspiral_row = lsctables.CoincInspiral() # This seems to be used as FAP, which should not be in gracedb coinc_inspiral_row.false_alarm_rate = 0 coinc_inspiral_row.minimum_duration = 0. coinc_inspiral_row.set_ifos(ifos) coinc_inspiral_row.coinc_event_id = coinc_id coinc_inspiral_row.mchirp = sngl.mchirp coinc_inspiral_row.mass = sngl.mtotal coinc_inspiral_row.end_time = sngl.end_time coinc_inspiral_row.end_time_ns = sngl.end_time_ns coinc_inspiral_row.snr = coinc_results['foreground/stat'] far = 1.0 / (lal.YRJUL_SI * coinc_results['foreground/ifar']) coinc_inspiral_row.combined_far = far coinc_inspiral_table.append(coinc_inspiral_row) outdoc.childNodes[0].appendChild(coinc_inspiral_table) self.outdoc = outdoc self.time = sngl.get_end() # compute SNR time series self.upload_snr_series = kwargs['upload_snr_series'] if self.upload_snr_series: data_readers = kwargs['data_readers'] bank = kwargs['bank'] htilde = bank[self.template_id] self.snr_series = {} self.snr_series_psd = {} for ifo in self.ifos: stilde = data_readers[ifo].overwhitened_data(htilde.delta_f) norm = 4.0 * htilde.delta_f / (htilde.sigmasq(stilde.psd)**0.5) qtilde = zeros((len(htilde) - 1) * 2, dtype=htilde.dtype) correlate(htilde, stilde, qtilde) snr = qtilde * 0 ifft(qtilde, snr) valid_end = int(len(qtilde) - data_readers[ifo].trim_padding) valid_start = int(valid_end - data_readers[ifo].blocksize * data_readers[ifo].sample_rate) seg = slice(valid_start, valid_end) snr = snr[seg] snr *= norm delta_t = 1.0 / data_readers[ifo].sample_rate start = data_readers[ifo].start_time snr = TimeSeries(snr, delta_t=delta_t, epoch=start) self.snr_series[ifo] = snr self.snr_series_psd[ifo] = stilde.psd # store the on-source slice of the series into the XML doc snr_onsource_time = coinc_results['foreground/%s/end_time' % ifo] - snr.start_time snr_onsource_dur = lal.REARTH_SI / lal.C_SI onsource_idx = round(snr_onsource_time * snr.sample_rate) onsource_start = onsource_idx - int( snr.sample_rate * snr_onsource_dur / 2) onsource_end = onsource_idx + int( snr.sample_rate * snr_onsource_dur / 2) onsource_slice = slice(onsource_start, onsource_end + 1) snr_lal = snr[onsource_slice].lal() snr_lal.name = 'snr' snr_lal.sampleUnits = '' snr_xml = _build_series(snr_lal, (u'Time', u'Time,Real,Imaginary'), None, 'deltaT', 's') snr_node = outdoc.childNodes[-1].appendChild(snr_xml) eid_param = ligolw_param.new_param( u'event_id', u'ilwd:char', unicode(sngl_event_id_map[ifo])) snr_node.appendChild(eid_param)