def qtransform(fseries, Q, f0): """Calculate the energy 'TimeSeries' for the given fseries Parameters ---------- fseries: 'pycbc FrequencySeries' frequency-series data set Q: q value f0: central frequency Returns ------- norm_energy: '~pycbc.types.aligned.ArrayWithAligned' A 'TimeSeries' of the normalized energy from the Q-transform of this tile against the data. cenergy: '~pycbc.types.aligned.ArrayWithAligned' A 'TimeSeries' of the complex energy from the Q-transform of this tile against the data. """ # q-transform data for each (Q, frequency) tile # initialize parameters qprime = Q / 11**(1 / 2.) # ... self.qprime dur = 1.0 / fseries.delta_f # check for sampling rate sampling = (len(fseries) - 1) * 2 * fseries.delta_f # choice of output sampling rate output_sampling = sampling # Can lower this to highest bandwidth output_samples = int(dur * output_sampling) # window fft window_size = 2 * int(f0 / qprime * dur) + 1 # get start and end indices start = int((f0 - (f0 / qprime)) * dur) end = int(start + window_size) # apply window to fft # normalize and generate bi-square window norm = np.sqrt(315. * qprime / (128. * f0)) windowed = fseries[start:end].numpy() * (bisquare(window_size) * norm) # pad data, move negative frequencies to the end, and IFFT padded = np.pad(windowed, padding(window_size, output_samples), mode='constant') wenergy = npfft.ifftshift(padded) # return a 'TimeSeries' wenergy = FrequencySeries(wenergy, delta_f=1. / dur) cenergy = TimeSeries(zeros(output_samples, dtype=np.complex128), delta_t=1. / sampling) ifft(wenergy, cenergy) energy = cenergy.squared_norm() medianenergy = np.median(energy.numpy()) norm_energy = energy / float(medianenergy) return norm_energy, cenergy
def match_func(data, temp, psd, delta_t, f_min , f_max): data = TimeSeries(data, delta_t=delta_t, copy=True) temp = TimeSeries(temp, delta_t=delta_t, copy=True) amplitude1 = filter.matchedfilter.sigmasq(data, psd=psd, low_frequency_cutoff=f_min, high_frequency_cutoff=f_max ) amplitude2 = filter.matchedfilter.sigmasq(temp, psd=psd, low_frequency_cutoff=f_min, high_frequency_cutoff=f_max ) match,n =filter.matchedfilter.match(data, temp, psd=psd, low_frequency_cutoff=f_min , high_frequency_cutoff=f_max,v1_norm=True ,v2_norm=True) match = match/ np.sqrt(amplitude1 * amplitude2) return match
def evaluate_ts_pure_generator(net_path, generator, time_step=0.25, batch_size=32): net = keras.models.load_model(net_path) res = net.predict_generator(generator, verbose=1, workers=0) snr_ts = TimeSeries(res[0].flatten(), delta_t=time_step, epoch=ts[0]._epoch) bool_ts = TimeSeries([pt[0] for pt in res[1]], delta_t=time_step) return ((snr_ts, bool_ts))
def qseries(fseries, Q, f0, return_complex=False): """Calculate the energy 'TimeSeries' for the given fseries Parameters ---------- fseries: 'pycbc FrequencySeries' frequency-series data set Q: q value f0: central frequency return_complex: {False, bool} Return the raw complex series instead of the normalized power. Returns ------- energy: '~pycbc.types.TimeSeries' A 'TimeSeries' of the normalized energy from the Q-transform of this tile against the data. """ # normalize and generate bi-square window qprime = Q / 11**(1 / 2.) norm = numpy.sqrt(315. * qprime / (128. * f0)) window_size = 2 * int(f0 / qprime * fseries.duration) + 1 xfrequencies = numpy.linspace(-1., 1., window_size) start = int((f0 - (f0 / qprime)) * fseries.duration) end = int(start + window_size) center = (start + end) / 2 windowed = fseries[start:end] * (1 - xfrequencies**2)**2 * norm tlen = (len(fseries) - 1) * 2 windowed.resize(tlen) windowed = numpy.roll(windowed, -center) # calculate the time series for this q -value windowed = FrequencySeries(windowed, delta_f=fseries.delta_f, epoch=fseries.start_time) ctseries = TimeSeries(zeros(tlen, dtype=numpy.complex128), delta_t=fseries.delta_t) ifft(windowed, ctseries) if return_complex: return ctseries else: energy = ctseries.squared_norm() medianenergy = numpy.median(energy.numpy()) return energy / float(medianenergy)
def qseries(fseries, Q, f0, return_complex=False): """Calculate the energy 'TimeSeries' for the given fseries Parameters ---------- fseries: 'pycbc FrequencySeries' frequency-series data set Q: q value f0: central frequency return_complex: {False, bool} Return the raw complex series instead of the normalized power. Returns ------- energy: '~pycbc.types.TimeSeries' A 'TimeSeries' of the normalized energy from the Q-transform of this tile against the data. """ # normalize and generate bi-square window qprime = Q / 11**(1/2.) norm = numpy.sqrt(315. * qprime / (128. * f0)) window_size = 2 * int(f0 / qprime * fseries.duration) + 1 xfrequencies = numpy.linspace(-1., 1., window_size) start = int((f0 - (f0 / qprime)) * fseries.duration) end = int(start + window_size) center = (start + end) / 2 windowed = fseries[start:end] * (1 - xfrequencies ** 2) ** 2 * norm tlen = (len(fseries)-1) * 2 windowed.resize(tlen) windowed = numpy.roll(windowed, -center) # calculate the time series for this q -value windowed = FrequencySeries(windowed, delta_f=fseries.delta_f, epoch=fseries.start_time) ctseries = TimeSeries(zeros(tlen, dtype=numpy.complex128), delta_t=fseries.delta_t) ifft(windowed, ctseries) if return_complex: return ctseries else: energy = ctseries.squared_norm() medianenergy = numpy.median(energy.numpy()) return energy / float(medianenergy)
def fade_on(timeseries, alpha=0.25): """ Take a PyCBC time series and use a one-sided Tukey window to "fade on" the waveform (to reduce discontinuities in the amplitude). Args: timeseries (pycbc.types.timeseries.TimeSeries): The PyCBC TimeSeries object to be faded on. alpha (float): The alpha parameter for the Tukey window. Returns: The `timeseries` which has been faded on. """ # Save the parameters from the time series we are about to fade on delta_t = timeseries.delta_t epoch = timeseries.start_time duration = timeseries.duration sample_rate = timeseries.sample_rate # Create a one-sided Tukey window for the turn on window = tukey(M=int(duration * sample_rate), alpha=alpha) window[int(0.5 * len(window)):] = 1 # Apply the one-sided Tukey window for the fade-on ts = window * np.array(timeseries) # Create and return a TimeSeries object again from the resulting array # using the original parameters (delta_t and epoch) of the time series return TimeSeries(initial_array=ts, delta_t=delta_t, epoch=epoch)
def get_td_waveform(template=None, **kwargs): """Return the plus and cross polarizations of a time domain waveform. Parameters ---------- template: object An object that has attached properties. This can be used to subsitute for keyword arguments. A common example would be a row in an xml table. {params} Returns ------- hplus: TimeSeries The plus polarization of the waveform. hcross: TimeSeries The cross polarization of the waveform. """ hplus = None hcross = None if (not template): initial_array = np.ones(100) hplus = TimeSeries(initial_array, delta_t=kwargs['delta_t'], epoch='', dtype=None, copy=True) hcross = TimeSeries(initial_array, delta_t=kwargs['delta_t'], epoch='', dtype=None, copy=True) else: hplus = TimeSeries(template.hplus, delta_t=template.delta_t, epoch='', dtype=None, copy=True) hcross = TimeSeries(template.hcross, delta_t=template.delta_t, epoch='', dtype=None, copy=True) return hplus, hcross
def set_temp_offset(sig_list, t_len, t_offset, t_from_right): #Sanity check if not isinstance(sig_list, list) or not isinstance( sig_list[0], type(TimeSeries([0], 0.1))): raise TypeError( "A list of pycbc.types.timeseries.TimeSeries objects must be provided." ) sys.exit(0) dt = sig_list[0].delta_t for pt in sig_list: if not pt.delta_t == dt: raise ValueError("The timeseries must have the same delta_t!") sys.exit(0) #Take the first signal as reference and apply the timeshift in respect to #this signal (so only this template will be centered at t=0 for a timeshift #of 0) ref = sig_list[0].sample_times[0] #Calculate the temporal offset between the templates prep_list = [ref - dat.sample_times[0] for dat in sig_list] #Find the signal that happens the earliest and store its offset to the #reference signal (the earliest signal will have a negative offset) min_val = min(prep_list) #Calculate the offset of every template with respect to this earliest prep_list = [dat - min_val for dat in prep_list] #Convert time to samples prep_list = [int(dat / dt) for dat in prep_list] #Calculate for every signal how many zeros have to be prepended for the #first signal to have a temporal offset of T_OFFSET and the other signals #to stay in relation to this first signal prep_list = [ int(t_len / dt) - len(sig_list[0]) - dat + int(t_offset / dt) - int(t_from_right / dt) for dat in prep_list ] for i, dat in enumerate(sig_list): #Prepend the zeros calculated before if prep_list[i] < 0 and abs(prep_list[i]) > len(dat) / 2: #This already deals with setting the epcoh correctly by default sig_list[i] = sig_list[i][abs(prep_list[i]):] else: sig_list[i].prepend_zeros(prep_list[i]) #Append as many zeros as needed to get to a final length of T_LEN seconds sig_list[i].append_zeros(int(t_len / dt) - len(sig_list[i])) return
def resample(ts): time_slices = [ TimeSeries(ts.data[len(ts) - int(i * ts.sample_rate):], delta_t=ts.delta_t) for i in [1, 2, 4, 8, 16, 32, 64] ] res_slices = [] for i, t in enumerate(time_slices): res_slices.append(list(resample_to_delta_t(t, 1.0 / 2**(12 - i)))) #The following line was added to make it suite my current setup res_slices.append(list(np.zeros(len(res_slices[-1])))) del time_slices return (res_slices)
def detector_projection(hp, hc, **kwargs): """Returns the waveform projected onto different detectors. Arguments --------- hp : TimeSeries TimeSeries object containing the "plus" polarization of a GW hc : TimeSeries TimeSeries object containing the "cross" polarization of a GW Returns ------- list A list containing the signals projected onto the detectors specified in in kwargs['detectors]. """ end_time = kwargs['end_time'] detectors = kwargs['detectors'] declination = kwargs['declination'] right_ascension = kwargs['right_ascension'] polarization = kwargs['polarization'] del kwargs['end_time'] del kwargs['detectors'] del kwargs['declination'] del kwargs['right_ascension'] del kwargs['polarization'] detectors = [Detector(d) for d in detectors] hp.start_time += end_time hc.start_time += end_time ret = [ d.project_wave(TimeSeries(hp), TimeSeries(hc), right_ascension, declination, polarization) for d in detectors ] return (ret)
def get_strain_from_hdf_file(hdf_file_paths, gps_time, interval_width, original_sampling_rate=4096, target_sampling_rate=4096, as_pycbc_timeseries=False): """ For a given `gps_time`, select the interval of length `interval_width` (centered around `gps_time`) from the HDF files specified in `hdf_file_paths`, and resample them to the given `target_sampling_rate`. Args: hdf_file_paths (dict): A dictionary with keys `{'H1', 'L1'}`, which holds the paths to the HDF files containing the interval around `gps_time`. gps_time (int): A (valid) background noise time (GPS timestamp). interval_width (int): The length of the strain sample (in seconds) to be selected from the HDF files. original_sampling_rate (int): The original sampling rate (in Hertz) of the HDF files sample. Default is 4096. target_sampling_rate (int): The sampling rate (in Hertz) to which the strain should be down-sampled (if desired). Must be a divisor of the `original_sampling_rate`. as_pycbc_timeseries (bool): Whether to return the strain as a dict of numpy arrays or as a dict of objects of type `pycbc.types.timeseries.TimeSeries`. Returns: A dictionary with keys `{'H1', 'L1'}`. For each key, the dictionary contains a strain sample (as a numpy array) of the given length, centered around `gps_time`, (down)-sampled to the desired `target_sampling_rate`. """ # ------------------------------------------------------------------------- # Perform some basic sanity checks on the arguments # ------------------------------------------------------------------------- assert isinstance(gps_time, int), \ 'time is not an integer!' assert isinstance(interval_width, int), \ 'interval_width is not an integer' assert isinstance(original_sampling_rate, int), \ 'original_sampling_rate is not an integer' assert isinstance(target_sampling_rate, int), \ 'target_sampling_rate is not an integer' assert original_sampling_rate % target_sampling_rate == 0, \ 'Invalid target_sampling_rate: Not a divisor of ' \ 'original_sampling_rate!' # ------------------------------------------------------------------------- # Read out the strain from the HDF files # ------------------------------------------------------------------------- # Compute the offset = half the interval width (intervals are centered # around the given gps_time) offset = int(interval_width / 2) # Compute the resampling factor sampling_factor = int(original_sampling_rate / target_sampling_rate) # Store the sample we have selected from the HDF files sample = dict() # Loop over both detectors for detector in ('H1', 'L1'): # Extract the path to the HDF file file_path = hdf_file_paths[detector] # Read in the HDF file and select the noise sample with h5py.File(file_path, 'r') as hdf_file: # Get the start_time and compute array indices start_time = int(hdf_file['meta']['GPSstart'][()]) start_idx = \ (gps_time - start_time - offset) * original_sampling_rate end_idx = \ (gps_time - start_time + offset) * original_sampling_rate # Select the sample from the strain strain = np.array(hdf_file['strain']['Strain']) sample[detector] = strain[start_idx:end_idx] # Down-sample the selected sample to the target_sampling_rate sample[detector] = sample[detector][::sampling_factor] # ------------------------------------------------------------------------- # Convert to PyCBC time series, if necessary # ------------------------------------------------------------------------- # If we just want a plain numpy array, we can return it right away if not as_pycbc_timeseries: return sample # Otherwise we need to convert the numpy array to a time series first else: # Initialize an empty dict for the time series results timeseries = dict() # Convert strain of both detectors to a TimeSeries object for detector in ('H1', 'L1'): timeseries[detector] = \ TimeSeries(initial_array=sample[detector], delta_t=1.0/target_sampling_rate, epoch=LIGOTimeGPS(gps_time - offset)) return timeseries
def overlap_func(data, temp, psd, delta_t, f_min , f_max): data = TimeSeries(data, delta_t=delta_t, copy=True) temp = TimeSeries(temp, delta_t=delta_t, copy=True) return filter.matchedfilter.overlap(data, temp, psd=psd, low_frequency_cutoff=f_min , high_frequency_cutoff=f_max, normalized=True)
f_lower['GW170608', 'H1'] = 30 f_higher = 1650 N = int(T * fs) times = np.linspace(0, T, num=N, endpoint=False) # [s] freqs = np.linspace(0, fs / 2, fs / 2 / df + 1) # Load data, compute PSD: PSD = {} for event in events: for i, det in enumerate(LIGO): s = np.loadtxt('../1-estimate_parameters/{0}/{0}.dat'.format(event), usecols=i, skiprows=1) # GW data psd = welch(TimeSeries(s, delta_t=dt), avg_method='median-mean') psd_freqs = psd.sample_frequencies PSD[event, det] = np.interp(freqs, psd_freqs, psd) # [1/Hz] PSD[event, det][freqs < f_lower[event, det]] = np.inf PSD[event, det][freqs > f_higher] = np.inf PSD[event, 'total'] = (PSD[event, 'H1']**-2 + PSD[event, 'L1']**-2)**-.5 PSD['average'] = 1 / np.mean([1 / PSD[event, 'total'] for event in events], axis=0) ASD = {x: sqrt(PSD[x]) for x in PSD} with open('ASD_average.dat', 'w') as outfile: outfile.write('\n'.join('{}\t{}'.format(f, a) for f, a in zip(freqs, ASD['average']))) # fmin = 20 # fmax = 1500
# --------------------------------------------------------------------- # Re-sample to the desired target_sampling_rate # --------------------------------------------------------------------- print('Re-sampling to {} Hz...'.format(target_sampling_rate), end=' ') # Compute the re-sampling factor resampling_factor = int(static_arguments['original_sampling_rate'] / static_arguments['target_sampling_rate']) # Re-sample the time series for both detectors for det in ('H1', 'L1'): strain[det] = \ TimeSeries(initial_array=strain[det][::resampling_factor], delta_t=1.0 / target_sampling_rate, epoch=strain[det].start_time) print('Done!') # --------------------------------------------------------------------- # Whiten and band-pass the data # --------------------------------------------------------------------- for det in ('H1', 'L1'): # Whiten the 512 second stretch with a 4 second window print('Whitening the data...', end=' ') strain[det] = \ strain[det].whiten(segment_duration=segment_duration, max_filter_duration=max_filter_duration,
def run(self): proc_name = self.name while True: # Get next task to be completed from the queue next_task = self._task_queue.get() if next_task is None: # This poison pil means shutdown LOGGER.info("{}: Exiting".format(proc_name)) break results = list() # Initialise parameters from task queue to generate SNR time-series mass1 = next_task["mass1"] mass2 = next_task["mass2"] spin1z = next_task["spin1z"] spin2z = next_task["spin2z"] ra = next_task["ra"] dec = next_task["dec"] coa_phase = next_task["coa_phase"] inclination = next_task["inclination"] polarization = next_task["polarization"] injection_snr = next_task["injection_snr"] f_low = next_task["f_low"] approximant = next_task["approximant"] delta_t = next_task["delta_t"] index = next_task["index"] det_string = next_task["det_string"] strain_sample = next_task["strain_sample"] print("Generating optimal SNR time series: " + det_string + " - sample" + str(index)) # Convert sample to PyCBC time series strain_time_series = TimeSeries(strain_sample, delta_t=delta_t, epoch=0, dtype=None, copy=True) # Convert sample to PyCBC frequency series strain_freq_series = strain_time_series.to_frequencyseries() # Generate optimal matched filtering template template_hp, template_hc = get_td_waveform( approximant=approximant, mass1=mass1, mass2=mass2, spin1z=spin1z, spin2z=spin2z, ra=ra, dec=dec, coa_phase=coa_phase, inclination=inclination, f_lower=f_low, delta_t=delta_t, ) # Convert template to PyCBC frequency series template_freq_series_hp = template_hp.to_frequencyseries( delta_f=strain_freq_series.delta_f) # Resize template to work with the sample template_freq_series_hp.resize(len(strain_freq_series)) # Time shift the template so that the SNR peak matches the merger time template_freq_series_hp = template_freq_series_hp.cyclic_time_shift( template_freq_series_hp.start_time) # Compute SNR time-series from optimal matched filtering template snr_series = matched_filter(template_freq_series_hp, strain_freq_series.astype(complex), psd=None, low_frequency_cutoff=f_low) results.append({ "snr_strain": np.array(abs(snr_series)), "mass1": mass1, "mass2": mass2, "spin1z": spin1z, "spin2z": spin2z, "ra": ra, "dec": dec, "coa_phase": coa_phase, "inclination": inclination, "polarization": polarization, "injection_snr": injection_snr, "index": index, "det_string": det_string, # Unsure I need this for when I store files, using it and index to provide store locations later on }) # Put the results on the results queue if len(results) >= 1: for result in results: self._result_queue.put(result) # Add a poison pill self._result_queue.put(None)
def run(self): proc_name = self.name while True: next_task = self._template_task_queue.get() if next_task is None: # This poison pil means shutdown LOGGER.info("{}: Exiting".format(proc_name)) break template_results = list() f_low = next_task["f_low"] delta_t = next_task["delta_t"] template = next_task["template"] sample_index = next_task["sample_index"] template_index = next_task["template_index"] det_string = next_task["det_string"] strain_sample = next_task["strain_sample"] sample_type = next_task["sample_type"] delta_f = next_task["delta_f"] template_start_time = next_task["template_start_time"] print("Generating SNR time series: " + det_string + " - sample" + str(sample_index) + ", template" + str(template_index)) template_time_series = TimeSeries(template, delta_t=delta_t, epoch=0, dtype=None, copy=True) template_freq_series = template_time_series.to_frequencyseries( delta_f=delta_f) strain_sample_time_series = TimeSeries(strain_sample, delta_t=delta_t, epoch=0, dtype=None, copy=True) strain_freq_series = strain_sample_time_series.to_frequencyseries( delta_f=delta_f) template_freq_series.resize(len(strain_freq_series)) # Time shift the template so that the SNR peak matches the merger time template_freq_series = template_freq_series.cyclic_time_shift( template_start_time) # Compute SNR time-series from optimal matched filtering template snr_series = matched_filter(template_freq_series, strain_freq_series.astype(complex), psd=None, low_frequency_cutoff=f_low) template_results.append({ "snr_strain": np.array(abs(snr_series)), "sample_index": sample_index, "template_index": template_index, "det_string": det_string, "sample_type": sample_type, }) if len(template_results) >= 1: for result in template_results: self._template_result_queue.put(result) # Add a poison pill self._template_result_queue.put(None)
detector = "L1" # fetch the open data from GWOSC data = TimeSeries.fetch_open_data( detector, gpsstart, gpsend, sample_rate=samplerate, format='hdf5', host='https://www.gw-openscience.org', verbose=False, cache=True, ) # convert the data to a PyCBC time series pycbcdata = PyCBCTimeSeries(data.data, delta_t=(1 / data.sample_rate.value)) # high-pass filter the data to only include the frequencies we're interested in lowcutoff = 1000 buffer = 50 # just allow a bit of a buffer at the edges pycbcdata = pycbcdata.highpass_fir(lowcutoff - buffer, 8) # 8 is the "order" of the filter # create the template bank frange = [1230, 1240] # frequency range (Hz) taurange = [0.06, 0.07] # Quality factor ranges mm = 0.03 # maximum mismatch tb = ringdown.RingdownTemplateBank(frange, taurange=taurange, mm=mm) flow = lowcutoff # get the initial chunk of data to matched filter - let's get four seconds
def qtransform(fseries, Q, f0): """Calculate the energy 'TimeSeries' for the given fseries Parameters ---------- fseries: 'pycbc FrequencySeries' frequency-series data set Q: q value f0: central frequency Returns ------- norm_energy: '~pycbc.types.aligned.ArrayWithAligned' A 'TimeSeries' of the normalized energy from the Q-transform of this tile against the data. cenergy: '~pycbc.types.aligned.ArrayWithAligned' A 'TimeSeries' of the complex energy from the Q-transform of this tile against the data. """ # q-transform data for each (Q, frequency) tile # initialize parameters qprime = Q / 11**(1/2.) # ... self.qprime dur = fseries.duration # check for sampling rate sampling = fseries.sample_rate # window fft window_size = 2 * int(f0 / qprime * dur) + 1 # get start and end indices start = int((f0 - (f0 / qprime)) * dur) end = int(start + window_size) # apply window to fft # normalize and generate bi-square window norm = np.sqrt(315. * qprime / (128. * f0)) windowed = fseries[start:end].numpy() * bisquare(window_size) * norm # choice of output sampling rate output_sampling = sampling # Can lower this to highest bandwidth output_samples = int(dur * output_sampling) # pad data, move negative frequencies to the end, and IFFT padded = np.pad(windowed, padding(window_size, output_samples), mode='constant') wenergy = npfft.ifftshift(padded) # return a 'TimeSeries' wenergy = FrequencySeries(wenergy, delta_f=1./dur) cenergy = TimeSeries(zeros(output_samples, dtype=np.complex128), delta_t=1./sampling) ifft(wenergy, cenergy) energy = cenergy.squared_norm() medianenergy = np.median(energy.numpy()) norm_energy = energy / float(medianenergy) return norm_energy, cenergy
def evaluate_ts(ts, net_path, time_step=0.25, preemptive_whiten=False, whiten_len=4., whiten_crop=4.): net = keras.models.load_model(net_path) if preemptive_whiten: for i in range(len(ts)): ts[i] = ts[i].whiten(whiten_len, whiten_crop, low_frequency_cutoff=20.0) mp_arr = mp.Array(c.c_double, len(ts) * (len(ts[0]) + 2)) cache = tonumpyarray(mp_arr) numpy_array = cache.reshape((len(ts), len(ts[0]) + 2)) for idx, d in enumerate(ts): numpy_array[idx][:len(d)] = d.data[:] numpy_array[idx][-2] = d.delta_t numpy_array[idx][-1] = d.start_time #print(numpy_array) aux_info = mp.Array(c.c_double, 5) aux_info[0] = len(ts) aux_info[1] = len(ts[0]) + 2 aux_info[2] = 1 if preemptive_whiten else 0 aux_info[3] = whiten_len aux_info[4] = whiten_crop time_shift_back = ts[0].duration - (64.0 if preemptive_whiten else (64.0 + whiten_crop)) indexes = list(np.arange(time_shift_back, 0.0, -time_step)) inp = [] bar = progress_tracker(len(np.arange(time_shift_back, 0.0, -time_step)), name='Generating slices') with closing(mp.Pool(initializer=init, initargs=(mp_arr, aux_info))) as pool: #inp = list(pool.imap(get_slice, np.arange(time_shift_back, 0.0, -time_step))) for idx, l in enumerate( pool.imap(get_slice, np.arange(time_shift_back, 0.0, -time_step))): inp.append(l) bar.iterate() pool.join() #print("Inp") #print(inp) inp = np.array(inp) inp = inp.transpose((1, 0, 2)) real_inp = [np.zeros((2, inp.shape[1], inp.shape[2])) for i in range(14)] for i in range(14): real_inp[i][0] = inp[i] real_inp[i][1] = inp[i + 14] real_inp[i] = real_inp[i].transpose(1, 2, 0) true_pred = net.predict(real_inp, verbose=1) snrs = list(true_pred[0].flatten()) bools = [pt[0] for pt in true_pred[1]] snr_ts = TimeSeries(snrs, delta_t=time_step) bool_ts = TimeSeries(bools, delta_t=time_step) snr_ts.start_time = ts[0].start_time + (64.0 if preemptive_whiten else (64.0 + whiten_crop / 2.0)) bool_ts.start_time = ts[0].start_time + (64.0 if preemptive_whiten else (64.0 + whiten_crop / 2.0)) print(snr_ts.sample_times) return ((snr_ts.copy(), bool_ts.copy()))
# Gauss-Bonnet Theory b, beta = GB_to_ppE(m1, m2, chi1, chi2, alpha_GB) # Chern-Simons Theory #b, beta = dCS_to_ppE(m1,m2,chi1,chi2,alpha_CS) hp, hc = ppE_to_h(m1, m2, b, beta, freq_sp, freq_sc, sp_array, sc_array) tp, tc = IFFT_to_TD(hp, hc) noise = np.array(Noise(flow, delta_f, delta_t, tlen)) tp_long, tc_long = Array_Match(tp, tc, noise) signal_p = noise + tp_long signal_c = noise + tc_long signal_pTS = TimeSeries(np.real(signal_p), delta_t=delta_t, dtype=noise.dtype) signal_cTS = TimeSeries(np.real(signal_c), delta_t=delta_t, dtype=noise.dtype) template_pFS = FrequencySeries(np.zeros(len(noise) / 2 + 1), delta_f=delta_f, dtype=np.complex_) template_cFS = FrequencySeries(np.zeros(len(noise) / 2 + 1), delta_f=delta_f, dtype=np.complex_) template_pFS[:len(sp)] = sp template_cFS[:len(sc)] = sc noise_TS = TimeSeries(noise, delta_t=delta_t, dtype=noise.dtype) Signal_OFF = Matched_Filter(template_pFS, template_cFS, noise_TS, noise_TS, flen, delta_f, flow)
def get_slice(offset): #print("Offset: {}".format(offset)) #print("Offset: {} | Before first access".format(offset)) whiten_here = aux_info[2] < 0.5 #print("Offset: {} | After first access".format(offset)) #cache = tonumpyarray(mp_arr) #print("Offset: {} | After numpy".format(offset)) #numpy_array = cache.reshape((int(aux_info[0]), int(aux_info[1]))) #print("Offset: {} | After reshape: {}".format(offset, numpy_array.shape)) #print("Offset: {} | Array: {}".format(offset, mp_arr.shape)) numpy_array = mp_arr.reshape((int(aux_info[0]), int(aux_info[1]))) #numpy_array = numpy_array.reshape((int(aux_info[0]), int(aux_info[1]))) #print("Offset: {} | Numpy Array: {}".format(offset, numpy_array[0])) sample_list = [] #print("Offset: {} | aux[0] = {}".format(offset, int(aux_info[0]))) for i in range(int(aux_info[0])): #print("Offset: {} | i: {}".format(offset, i)) dt = numpy_array[i][-2] epoch = numpy_array[i][-1] sample_rate = int(round(1.0 / dt)) endpoint = int(len(numpy_array[i]) - 2 - offset * sample_rate) #print("Offset: {} | endpoint: {}".format(offset, endpoint)) if whiten_here: #print("Offset: {} | in if".format(offset)) #print("Offset: {} | Trying to access: {}".format(offset, (i, endpoint-int((64.0+aux_info[4])*sample_rate), endpoint))) #print("Offset: {} | Array: {}".format(offset, numpy_array)) #print("aux_info[4]: {}".format(aux_info[4])) white = TimeSeries(numpy_array[i][endpoint - int((64.0 + aux_info[4]) * sample_rate):endpoint], delta_t=dt, epoch=epoch).whiten(aux_info[3], aux_info[4], low_frequency_cutoff=20.0) #print("Offset: {} | after white".format(offset)) else: #print("Offset: {} | in else".format(offset)) white = TimeSeries(numpy_array[i][endpoint - int(64. * sample_rate):endpoint], delta_t=dt, epoch=epoch) sample_list.append(resample(white)) ret = [] for d in sample_list: ret += d #print("Will return now! | Offset: {}".format(offset)) return (ret)
def load_data(): path = '~/Downloads/tseries.hdf' with h5py.File(path, 'r') as FILE: raw = [FILE['L1'].value, FILE['H1'].value] return ([TimeSeries(d, delta_t=1.0 / 4096) for d in raw])
def worker(kwargs): #print("Worker here!") #print("Args: {}".format(kwargs)) full_kwargs = dict(kwargs) kwargs = dict(kwargs) opt_arg = {} opt_keys = [ 'snr', 'gw_prob', 'random_starting_time', 'resample_delta_t', 't_len', 'resample_t_len', 'time_offset', 'whiten_len', 'whiten_cutoff', 't_from_right', 'no_gw_snr' ] for key in opt_keys: try: opt_arg[key] = kwargs.get(key) del kwargs[key] except KeyError: print("The necessary argument '%s' was not supplied in '%s'" % (key, str(__file__))) projection_arg = {} projection_arg['end_time'] = 1337 * 137 * 42 projection_arg['declination'] = 0.0 projection_arg['right_ascension'] = 0.0 projection_arg['polarization'] = 0.0 projection_arg['detectors'] = ['L1', 'H1'] projection_arg, kwargs = filter_keys(projection_arg, kwargs) T_SAMPLES = int(opt_arg['t_len'] / kwargs['delta_t']) DELTA_F = 1.0 / opt_arg['t_len'] F_LEN = int(2.0 / (DELTA_F * kwargs['delta_t'])) gw_present = bool(random() < opt_arg['gw_prob']) psd = generate_psd(**full_kwargs) #TODO: Generate the seed for this prior to parallelizing noise_list = [ noise_from_psd(length=T_SAMPLES, delta_t=kwargs['delta_t'], psd=psd, seed=randint(0, 100000)) for d in projection_arg['detectors'] ] #print("Pre GW generation") if gw_present: #Generate waveform #print("Pre waveform") hp, hc = get_td_waveform(**kwargs) #Project it onto the considered detectors (This could be handeled using) #a list, to make room for more detectors #print("Pre projection") strain_list = detector_projection(TimeSeries(hp), TimeSeries(hc), **projection_arg) #Enlarge the signals bya adding zeros infront and after. Take care of a #random timeshift while still keeping the relative timeshift between #detectors #TODO: This should also be set outside if opt_arg['random_starting_time']: t_offset = opt_arg['time_offset'] else: t_offset = 0.0 #print("Pre embedding in zero") set_temp_offset(strain_list, opt_arg['t_len'], t_offset, opt_arg['t_from_right']) #Rescale the templates to match wanted SNR strain_list = rescale_to_snr(strain_list, opt_arg['snr'], psd, kwargs['f_lower']) else: strain_list = [ TimeSeries(np.zeros(len(n)), n.delta_t) for n in noise_list ] opt_arg['snr'] = opt_arg['no_gw_snr'] #print("post generating") total_white = [] matched_snr_sq = [] #print("Pre loop") tmp_white = [] for i, noise in enumerate(noise_list): #print("Loop i: {}".format(i)) #Add strain to noise noise._epoch = strain_list[i]._epoch #print("Post epoch, pre adding") total = TimeSeries(noise + strain_list[i]) #print("Post adding, pre whiten") #Whiten the total data, downsample and crop the data total_white.append( total.whiten(opt_arg['whiten_len'], opt_arg['whiten_cutoff'], low_frequency_cutoff=kwargs['f_lower'])) #print("Post whiten and appending, pre resampling") if isinstance(opt_arg['resample_delta_t'], tuple): if isinstance(opt_arg['resample_t_len'], tuple) and len( opt_arg['resample_delta_t']) == len( opt_arg['resample_t_len']): rdt = opt_arg['resample_delta_t'] resample_samples = [ int(opt_arg['resample_t_len'][idx] / kwargs['delta_t']) for idx in range(len(rdt)) ] for idx, sam in enumerate(resample_samples): #print("Sam: {}".format(sam)) #print("Len list: {}".format(len(total_white[i]))) #print(i) #print("Resample delta t: {}".format(rdt[idx])) #print("resample_t_len: {}".format(opt_arg['resample_t_len'][idx])) #print("") tmp_white.append( resample_to_delta_t( total_white[i][len(total_white[i]) - sam:], rdt[idx])) else: raise ValueError( "The options 'resample_delta_t' and 'resample_t_len' have to either both be floats or tuples of floats of the same length." ) #Error or handle else: total_white[i] = resample_to_delta_t(total_white[i], opt_arg['resample_delta_t']) #print("Post resampling, pre cropping") mid_point = (total_white[i].end_time + total_white[i].start_time) / 2 total_white[i] = total_white[i].time_slice( mid_point - opt_arg['resample_t_len'] / 2, mid_point + opt_arg['resample_t_len'] / 2) #print("Post cropping, pre matched filtering") #print("Strain list: {}\ntotal: {}\nPSD: {}".format(strain_list[i], total, psd)) #test = matched_filter(strain_list[i], total, psd=psd, low_frequency_cutoff=kwargs['f_lower']) #print("Can calc") #Calculate matched filter snr if gw_present: matched_snr_sq.append( max( abs( matched_filter( strain_list[i], total, psd=psd, low_frequency_cutoff=kwargs['f_lower'])))**2) else: #TODO: Implement matched filtering against template bank matched_snr_sq.append(opt_arg['no_gw_snr']**2 / len(noise_list)) #print("Post matched filtering, WTF!") if not len(tmp_white) == 0: total_white = tmp_white del total del strain_list #Calculate the total SNR of all detectors calc_snr = np.sqrt(sum(matched_snr_sq)) del matched_snr_sq #out_wav = [] #for i, dat in enumerate(total_white): #print("Length of total_white[{}]: {}".format(i, len(dat))) #for i in range(len(total_white[0])): ##print("Length of {}: {}".format(i, len(total_white[i]))) #tmp = [] #for j, dat in enumerate(total_white): #sys.stdout.write("\ri = {}| j = {} ".format(i, j)) #sys.stdout.flush() #tmp.append(dat[i]) #sys.stdout.write('\n') #sys.stdout.flush() ##print("\n") #out_wav.append(tmp) out_wav = [[dat[i] for dat in total_white] for i in range(len(total_white[0]))] #print("Pre return") return ((np.array(out_wav), np.array([opt_arg['snr'], int(gw_present) ]), np.array(calc_snr), np.array(str(kwargs)), np.array(str(opt_arg))))
from pycbc.types.timeseries import TimeSeries from pycbc.types.frequencyseries import FrequencySeries from pycbc.filter import match import numpy import matplotlib.pyplot as plt data = numpy.sin(numpy.arange(0, 100, 100 / (4096.0 * 64))) # data += numpy.random.normal(scale=.01, size=data.shape) # plt.plot(data) # plt.show() filtD = TimeSeries(data, dtype=numpy.float64, delta_t=1.0 / 4096) frequency_series_filt = filtD.to_frequencyseries() dt_fraction = .5 filtD_offset_subsample = ( frequency_series_filt * numpy.exp(2j * numpy.pi * frequency_series_filt.sample_frequencies * frequency_series_filt.delta_t * dt_fraction)) o, _ = match(filtD, filtD_offset_subsample, subsample_interpolation=True) print(1 - o) # assert numpy.isclose(1, o, rtol=0, atol=1e-8)