def rescale_to_netsnr(det1_TimeSeries, det2_TimeSeries, targetsnr): flen = len(det1_TimeSeries.to_frequencyseries()) delta_f = \ np.diff(det1_TimeSeries.to_frequencyseries().sample_frequencies)[0] f_low=10.0 psd = aLIGOZeroDetHighPower(flen, delta_f, f_low) sigma1sq = pycbc.filter.sigmasq(det1_TimeSeries, low_frequency_cutoff=f_low, psd=psd) sigma2sq = pycbc.filter.sigmasq(det2_TimeSeries, low_frequency_cutoff=f_low, psd=psd) snr_ratio = np.sqrt(targetsnr**2 / (sigma1sq + sigma2sq)) det1_TimeSeries.data *= snr_ratio det2_TimeSeries.data *= snr_ratio sigma1 = pycbc.filter.sigma(det1_TimeSeries, low_frequency_cutoff=f_low, psd=psd) sigma2 = pycbc.filter.sigma(det2_TimeSeries, low_frequency_cutoff=f_low, psd=psd) return det1_TimeSeries, det2_TimeSeries, sigma1, sigma2
def test_likelihood_evaluator_init(self): # data args seglen = 4 sample_rate = 2048 N = seglen * sample_rate/2 + 1 fmin = 30. # setup waveform generator and signal parameters m1, m2, s1z, s2z, tsig = 38.6, 29.3, 0., 0., 3.1 ra, dec, pol, dist = 1.37, -1.26, 2.76, 3*500. generator = waveform.FDomainDetFrameGenerator( waveform.FDomainCBCGenerator, 0., variable_args=["tc"], detectors=["H1", "L1"], delta_f=1./seglen, f_lower=fmin, approximant="TaylorF2", mass1=m1, mass2=m2, spin1z=s1z, spin2z=s2z, ra=ra, dec=dec, polarization=pol, distance=dist) signal = generator.generate(tsig) # get PSDs psd = pypsd.aLIGOZeroDetHighPower(N, 1./seglen, 20.) psds = {"H1": psd, "L1": psd} # get a prior evaluator uniform_prior = distributions.Uniform(tc=(tsig-0.2,tsig+0.2)) prior_eval = inference.prior.PriorEvaluator(["tc"], uniform_prior) # setup likelihood evaluator likelihood_eval = inference.GaussianLikelihood(generator, signal, fmin, psds=psds, prior=prior_eval, return_meta=False)
def create_noise_curve(noise_curve, f_low, delta_f, flen): if noise_curve=='aLIGO': from pycbc.psd import aLIGOZeroDetHighPower psd = aLIGOZeroDetHighPower(flen, delta_f, f_low) elif noise_curve=='iLIGO': from pycbc.psd import iLIGOSRD psd = iLIGOSRD(flen, delta_f, f_low) elif noise_curve=='eLIGO': from pycbc.psd import eLIGOModel psd = eLIGOModel(flen, delta_f, f_low) elif noise_curve=='adVirgo': from pycbc.psd import AdvVirgo psd = AdvVirgo(flen, delta_f, f_low) else: print >> sys.stderr, "error: noise curve (%s) not"\ " supported"%noise_curve sys.exit(-1) return psd
def match_inc(inc, spin_1, mass2): # Allow masses to vary as parameters m1_1 = mass1 m2_1 = mass2 m1_2 = mass1 m2_2 = mass2 # Phases phase1 = 0 phase2 = 0 # Convert to precessing coords inc_1, s1x, s1y, s1z, s2x, s2y, s2z = SimInspiralTransformPrecessingNewInitialConditions( inc, #theta_JN phi_JL, #phi_JL theta_z1, #theta1 theta_z2, #theta2 phi12, #phi12 spin_1, #chi1 - this parameter varies spin_2, #chi2 m1_1, m2_1, f_low, phiRef=0) #This is our 'spin1=0' waveform that we match the precessing one with inc_2, s1x_2, s1y_2, s1z_2, s2x_2, s2y_2, s2z_2 = SimInspiralTransformPrecessingNewInitialConditions( inc, #theta_JN phi_JL, #phi_JL theta_z1, #theta1 theta_z2, #theta2 phi12, #phi12 0, #chi1 spin_2, #chi2 m1_2, m2_2, f_low, phiRef=0) # Generate the two waveforms to compare hp, hc = get_td_waveform(approximant=approx1, mass1=m1_1, mass2=m2_1, spin1y=s1y, spin1x=s1x, spin1z=s1z, spin2y=s2y, spin2x=s2x, spin2z=s2z, f_lower=f_low, inclination=inc_1, coa_phase=phase1, delta_t=1.0 / sample_rate) sp, sc = get_td_waveform(approximant=approx2, mass1=m1_2, mass2=m2_2, spin1y=s1y_2, spin1x=s1x_2, spin1z=s1z_2, spin2y=s2y_2, spin2x=s2x_2, spin2z=s2z_2, f_lower=f_low, inclination=inc_2, coa_phase=phase2, delta_t=1.0 / sample_rate) # Resize the waveforms to the same length tlen = max(len(sc), len(hc)) sc.resize(tlen) hc.resize(tlen) # Generate the aLIGO ZDHP PSD delta_f = 1.0 / sc.duration flen = tlen / 2 + 1 psd = aLIGOZeroDetHighPower(flen, delta_f, f_low) # Note: This takes a while the first time as an FFT plan is generated # subsequent calls are much faster. m, i = match(hc, sc, psd=psd, low_frequency_cutoff=f_low) #print 'The match is: %1.3f' % m return m
def assign_noise_curve(self): ligo3_curves = [ 'base', 'highNN', 'highSPOT', 'highST', 'highSei', 'highloss', 'highmass', 'highpow', 'highsqz', 'lowNN', 'lowSPOT', 'lowST', 'lowSei' ] if self.noise_curve == 'aLIGO': from pycbc.psd import aLIGOZeroDetHighPower self.psd = aLIGOZeroDetHighPower(self.flen, self.delta_f, self.f_low) elif self.noise_curve == 'adVirgo': from pycbc.psd import AdvVirgo self.psd = AdvVirgo(self.flen, self.delta_f, self.f_low) elif self.noise_curve == 'Green' or self.noise_curve == 'Red': # Load from ascii pmnspy_path = os.getenv('PMNSPY_PREFIX') psd_path = pmnspy_path + '/ligo3/PSD/%sPSD.txt' % self.noise_curve psd_data = np.loadtxt(psd_path) target_freqs = np.arange(0.0, self.flen * self.delta_f, self.delta_f) target_psd = np.zeros(len(target_freqs)) # Interpolate existing psd data to target frequencies existing = \ np.concatenate(np.argwhere( (target_freqs<=psd_data[-1,0]) * (target_freqs>=psd_data[0,0]) )) target_psd[existing] = \ np.interp(target_freqs[existing], psd_data[:,0], psd_data[:,1]) # Extrapolate to higher frequencies assuming f^2 for QN fit_idx = np.concatenate(np.argwhere((psd_data[:,0]>2000)*\ (psd_data[:,0]<=psd_data[-1,0]))) p = np.polyfit(x=psd_data[fit_idx,0], \ y=psd_data[fit_idx,1], deg=2) target_psd[existing[-1]+1:] = \ p[0]*target_freqs[existing[-1]+1:]**2 + \ p[1]*target_freqs[existing[-1]+1:] + \ p[2] # After all that, reset everything below f_low to zero (this saves # significant time in noise generation if we only care about high # frequencies) target_psd[target_freqs < self.f_low] = 0.0 # Create psd as standard frequency series object self.psd = pycbc.types.FrequencySeries( initial_array=target_psd, delta_f=np.diff(target_freqs)[0]) elif self.noise_curve in ligo3_curves: # Load from ascii pmnspy_path = os.getenv('PMNSPY_PREFIX') psd_path = pmnspy_path + '/ligo3/PSD/BlueBird_%s-PSD_20140904.txt' % self.noise_curve psd_data = np.loadtxt(psd_path) target_freqs = np.arange(0.0, self.flen * self.delta_f, self.delta_f) target_psd = np.zeros(len(target_freqs)) # Interpolate existing psd data to target frequencies existing = \ np.concatenate(np.argwhere( (target_freqs<=psd_data[-1,0]) * (target_freqs>=psd_data[0,0]) )) target_psd[existing] = \ np.interp(target_freqs[existing], psd_data[:,0], psd_data[:,1]) # Extrapolate to higher frequencies assuming f^2 for QN fit_idx = np.concatenate(np.argwhere((psd_data[:,0]>2000)*\ (psd_data[:,0]<=psd_data[-1,0]))) p = np.polyfit(x=psd_data[fit_idx,0], \ y=psd_data[fit_idx,1], deg=2) target_psd[existing[-1]+1:] = \ p[0]*target_freqs[existing[-1]+1:]**2 + \ p[1]*target_freqs[existing[-1]+1:] + \ p[2] # After all that, reset everything below f_low to zero (this saves # significant time in noise generation if we only care about high # frequencies) target_psd[target_freqs < self.f_low] = 0.0 # Create psd as standard frequency series object self.psd = pycbc.types.FrequencySeries( initial_array=target_psd, delta_f=np.diff(target_freqs)[0]) else: print >> sys.stderr, "error: noise curve (%s) not"\ " supported"%self.noise_curve sys.exit(-1)
def reconstruct_freqseries(self, freqseries, npcs=1, this_fpeak=None, wfnum=None): """ Reconstruct the waveform in freqseries using <npcs> principal components from the catalogue Procedure: 1) Reconstruct the centered spectra (phase and mag) from the beta-weighted PCs 2) Un-center the spectra (add the mean back on) """ #print "Analysing reconstruction with %d PCs"%npcs if this_fpeak == None: # Locate fpeak # Note: we'll assume the peak we're aligning to is >2kHz. This # avoids any low frequency stuff. high_idx = self.sample_frequencies >= 2000 high_freq = self.sample_frequencies[high_idx] high_spec = freqseries[high_idx] this_fpeak = high_freq[np.argmax(abs(high_spec))] # Get projection: fd_projection = self.project_freqseries(freqseries) fd_reconstruction = dict() fd_reconstruction['fd_projection'] = fd_projection # # Original Waveforms # orimag = abs(freqseries) oriphi = phase_of(freqseries) orispec = orimag * np.exp(1j * oriphi) fd_reconstruction['original_spectrum'] = unit_hrss(orispec, delta=self.delta_f, domain='frequency') fd_reconstruction['sample_frequencies'] = np.copy( self.sample_frequencies) # # Magnitude and phase reconstructions # # Initialise reconstructions recmag = np.zeros(shape=np.shape(orimag)) recphi = np.zeros(shape=np.shape(oriphi)) # Sum contributions from PCs for i in xrange(npcs): recmag += \ fd_projection['magnitude_betas'][i]*\ self.pca['magnitude_pca'].components_[i,:] recphi += \ fd_projection['phase_betas'][i]*\ self.pca['phase_pca'].components_[i,:] # # De-center the reconstruction # recmag += self.pca['magnitude_pca'].mean_ recphi += self.pca['phase_pca'].mean_ # --- Raw reconstruction quality idx = (self.sample_frequencies>self.low_frequency_cutoff) \ * (orimag>0.01*max(orimag)) fd_reconstruction['magnitude_euclidean_raw'] = \ euclidean_distance(recmag[idx], fd_projection['magnitude_cent'][idx]) fd_reconstruction['phase_euclidean_raw'] = \ euclidean_distance(recphi[idx], fd_projection['phase_cent'][idx]) # # Move the spectrum back to where it should be # recmag = shift_vec(recmag, self.sample_frequencies, fcenter=this_fpeak, fpeak=self.fcenter).real # XXX: phase_align recphi = shift_vec(recphi, self.sample_frequencies, fcenter=this_fpeak, fpeak=self.fcenter).real fd_reconstruction['recon_mag'] = np.copy(recmag) fd_reconstruction['recon_phi'] = np.copy(recphi) # # Fourier spectrum reconstructions # recon_spectrum = recmag * np.exp(1j * recphi) # --- Unit norm reconstruction fd_reconstruction['recon_spectrum'] = unit_hrss(recon_spectrum, delta=self.delta_f, domain='frequency') fd_reconstruction['recon_timeseries'] = \ fd_reconstruction['recon_spectrum'].to_timeseries() # --- Match calculations for mag/phase reconstructions recon_spectrum = np.copy(fd_reconstruction['recon_spectrum'].data) # --- Match calculations for full reconstructions idx = (self.sample_frequencies>self.low_frequency_cutoff) \ * (orimag>0.01*max(orimag)) fd_reconstruction['magnitude_euclidean'] = \ euclidean_distance(recmag[idx], orimag[idx]) fd_reconstruction['phase_euclidean'] = \ euclidean_distance(recphi[idx], oriphi[idx]) # make psd flen = len(self.sample_frequencies) psd = aLIGOZeroDetHighPower(flen, self.delta_f, low_freq_cutoff=self.low_frequency_cutoff) fd_reconstruction['match_aligo'] = \ pycbc.filter.match(fd_reconstruction['recon_spectrum'], fd_reconstruction['original_spectrum'], psd = psd, low_frequency_cutoff = self.low_frequency_cutoff)[0] fd_reconstruction['match_noweight'] = \ pycbc.filter.match(fd_reconstruction['recon_spectrum'], fd_reconstruction['original_spectrum'], low_frequency_cutoff = self.low_frequency_cutoff)[0] return fd_reconstruction
def generate(file_path, duration, seed=0, signal_separation=200, signal_separation_interval=20, min_mass=1.2, max_mass=1.6, f_lower=20, srate=4096, padding=256, tstart=0): """Function that generates test data with injections. Arguments --------- file_path : str The path at which the data should be stored. duration : int or float Duration of the output file in seconds. seed : {int, 0}, optional A seed to use for generating injection parameters and noise. signal_separation : {int or float, 200}, optional The average duration between two injections. signal_separation_interval : {int or float, 20}, optional The duration between two signals will be signal_separation + t, where t is drawn uniformly from the interval [-signal_separation_interval, signal_separation_interval]. min_mass : {float, 1.2}, optional The minimal mass at which injections will be made (in solar masses). max_mass : {float, 1.6}, optional The maximum mass at which injections will be made (in solar masses). f_lower : {int or float, 20}, optional Noise will be generated down to the specified frequency. Below they will be set to zero. (The waveforms are generated with a lower frequency cutofff of 25 Hertz) srate : {int, 4096}, optional The sample rate at which the data is generated. padding : {int or float, 256}, optional Duration in the beginning and end of the data that does not contain any injections. tstart : {int or float, 0}, optional The inital time of the data. """ np.random.seed(seed) size = (duration // signal_separation) #Generate injection times random_time_samples = int(round(float(signal_separation_interval) * float(srate))) signal_separation_samples = int(round(float(signal_separation) * float(srate))) time_samples = randint(signal_separation_samples - random_time_samples, signal_separation_samples + random_time_samples, size=size) time_samples = time_samples.cumsum() times = time_samples / float(srate) times = times[np.where(np.logical_and(times > padding, times < duration - padding))[0]] size = len(times) #Generate parameters cphase = uniform(0, np.pi*2.0, size=size) ra = uniform(0, 2 * np.pi, size=size) dec = np.arccos(uniform(-1., 1., size=size)) - np.pi/2 inc = np.arccos(uniform(-1., 1., size=size)) pol = uniform(0, 2 * np.pi, size=size) dist = power(3, size) * 400 m1 = uniform(min_mass, max_mass, size=size) m2 = uniform(min_mass, max_mass, size=size) #Save parameters to file. stat_file_path, ext = os.path.splitext(file_path) stat_file_path = stat_file_path + '_stats' + ext with h5py.File(stat_file_path, 'w') as f: f['times'] = times f['cphase'] = cphase f['ra'] = ra f['dec'] = dec f['inc'] = inc f['pol'] = pol f['dist'] = dist f['mass1'] = m1 f['mass2'] = m2 f['seed'] = seed p = aLIGOZeroDetHighPower(2 * int(duration * srate), 1.0/64, f_lower) #Generate noise data = {} for i, ifo in enumerate(['H1', 'L1']): data[ifo] = colored_noise(p, int(tstart), int(tstart + duration), seed=seed + i, low_frequency_cutoff=f_lower) data[ifo] = resample_to_delta_t(data[ifo], 1.0/srate) # make waveforms and add them into the noise for i in range(len(times)): hp, hc = get_td_waveform(approximant="TaylorF2", mass1=m1[i], mass2=m2[i], f_lower=25, delta_t=1.0/srate, inclination=inc[i], coa_phase=cphase[i], distance=dist[i]) hp.start_time += times[i] + int(tstart) hc.start_time += times[i] + int(tstart) for ifo in ['H1', 'L1']: ht = Detector(ifo).project_wave(hp, hc, ra[i], dec[i], pol[i]) time_diff = float(ht.start_time - data[ifo].start_time) sample_diff = int(round(time_diff / data[ifo].delta_t)) ht.prepend_zeros(sample_diff) ht.start_time = data[ifo].start_time data[ifo] = data[ifo].add_into(ht) #Save the data for ifo in ['H1', 'L1']: data[ifo].save(file_path, group='%s' % (ifo))
# Scale this to the smallest OBSERVED chirp mass mtot_target = bnru.mtot_from_mchirp(smallest_observed_chirpmass, simulations.simulations[w]['q']) print 'scaling to ', mtot_target hplus_NR = bnru.scale_wave(hplus_NR, mtot_target, init_total_mass) hplus_NR = pycbc.types.TimeSeries(hplus_NR, delta_t=SI_deltaT) Hplus_NR = hplus_NR.to_frequencyseries() axspecSI.loglog(Hplus_NR.sample_frequencies, 2*abs(Hplus_NR)*np.sqrt(Hplus_NR.sample_frequencies), color='grey') delta_f = Hplus_NR.delta_f flen = len(Hplus_NR) psd = aLIGOZeroDetHighPower(flen, delta_f, 0.1) axspecSI.loglog(psd.sample_frequencies, np.sqrt(psd), label='aLIGO', color='k', linestyle='--') axspecSI.set_xlim(5, 512) axspecSI.axvline(30, color='r') axspecSI.minorticks_on() axspecSI.set_xlabel('Frequency [Hz]') axspecSI.set_ylabel('2|H$_+$($f$)|$\sqrt{f}$ & $\sqrt{S(f)}$') pl.tight_layout() pl.show() sys.exit()
def __init__(self, ts, time_step=0.25, batch_size=32, dt=None): self.batch_size = batch_size self.time_step = time_step if not isinstance(ts, list): ts = [ts] self.ts = [] self.dt = [] for t in ts: if isinstance(t, TimeSeries): self.dt.append(t.delta_t) self.ts.append(t) elif isinstance(t, type(np.array([]))): if dt == None: msg = 'If the provided data is not a pycbc.types.TimeSeries' msg += 'a value dt must be provided.' raise ValueError(msg) else: self.dt.append(dt) self.ts.append(TimeSeries(t, delta_t=dt)) else: msg = 'The provided data needs to be either a list or a ' msg += 'single instance of either a pycbc.types.TimeSeries' msg += 'or a numpy.array.' raise ValueError(msg) for delta_t in self.dt: if not delta_t == self.dt[0]: raise ValueError('All data must have the same delta_t.') #Number samples in each channel self.final_data_samples = 2048 #delta_t of all TimeSeries self.dt = self.dt[0] #How big is the window that is shifted over the data #(64s + 8s for cropping when whitening) self.window_size_time = 72.0 #Window size in samples self.window_size = int(self.window_size_time / self.dt) #How many points are shifted each step self.stride = int(self.time_step / self.dt) #total number of window shifts self.window_shifts = int( np.floor( float(len(self.ts[0]) - self.window_size + self.stride) / self.stride)) #Different parts of the signal are re-sampled to different #delta_t. This lists the target delta_t. self.resample_dt = [ 1.0 / 4096, 1.0 / 2048, 1.0 / 1024, 1.0 / 512, 1.0 / 256, 1.0 / 128 ] #The inverse of the re-sample delta_t self.resample_rates = [4096, 4096, 2048, 1024, 512, 256, 128] #PSD used to whiten the data. Calculate once to save #computational resources. self.psd = aLIGOZeroDetHighPower(self.window_size // 2 + 1, delta_f=1. / self.window_size_time, low_freq_cutoff=18.)
def generate_psd(**kwargs): DELTA_F = 1.0 / kwargs['t_len'] F_LEN = int(2.0 / (DELTA_F * kwargs['delta_t'])) return (aLIGOZeroDetHighPower(length=F_LEN, delta_f=DELTA_F, low_freq_cutoff=kwargs['f_lower']))
def Noise(flow, delta_f, delta_t, tlen): flen = int(1.0 / delta_t / delta_f) / 2 + 1 p_s_d = psd.aLIGOZeroDetHighPower(flen, delta_f, flow) Nt = int(tlen / delta_t) return noise.noise_from_psd(Nt, delta_t, p_s_d, seed=127)
test_catalogue = bwave.waveform_catalogue(test_simulations, ref_mass=total_mass, SI_deltaT=SI_deltaT, SI_datalen=SI_datalen, distance=distance) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Do the PCA # print '~~~~~~~~~~~~~~~~~~~~~' print 'Performing PCA' print '' pca1 = bpca.waveform_pca(train1_catalogue, test_catalogue) pca2 = bpca.waveform_pca(train2_catalogue, test_catalogue) # Characterise reconstructions # XXX build a PSD and call projection_fidelity() psd = aLIGOZeroDetHighPower(pca1.SI_flen, pca1.SI_deltaF, pca1.fmin) euclidean_distances1, projections1, matches1 = pca1.compute_projection_fidelity(psd=psd) euclidean_distances2, projections2, matches2 = pca2.compute_projection_fidelity(psd=psd) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Plots # print '~~~~~~~~~~~~~~~~~~~~~' print 'Plotting results' print '' # # Matches # # Adopt the same approach as for explained variance here
fixed_parameters = injection_parameters.copy() for key in priors: fixed_parameters.pop(key) # These lines generate the `model` object - see # https://gwin.readthedocs.io/en/latest/api/gwin.models.gaussian_noise.html generator = FDomainDetFrameGenerator(FDomainCBCGenerator, 0., variable_args=variable_parameters, detectors=['H1', 'L1'], delta_f=1. / seglen, f_lower=fmin, approximant='IMRPhenomPv2', **fixed_parameters) signal = generator.generate(**injection_parameters) psd = pypsd.aLIGOZeroDetHighPower(int(N), 1. / seglen, 20.) psds = {'H1': psd, 'L1': psd} model = gwin.models.GaussianNoise(variable_parameters, signal, generator, fmin, psds=psds) model.update(**injection_parameters) # This create a dummy class to convert the model into a bilby.likelihood object class GWINLikelihood(bilby.core.likelihood.Likelihood): def __init__(self, model): """ A likelihood to wrap around GWIN model objects Parameters
hp, hc = pycbc.waveform.waveform.get_td_waveform(**q) # Generte GR waveform tlen = len(hp) # Create parameter tlen tlen = math.log(tlen, 2) # Take the log base 2 of tlen tlen = math.ceil(tlen) # Round up to the nearest integer (result is a float) tlen = 2.0**tlen # Raise 2 to the nearest integer to help FFT go faster tlen = 2.0 * tlen tlen = int(tlen) hp.resize(tlen) # Resize hp f_low = 20 # Lowest frequency # Generate the aLIGO ZDHP PSD delta_f = 1.0 / hp.duration # Frequency incirment flen = tlen / 2 + 1 psd = aLIGOZeroDetHighPower(flen, delta_f, f_low) # Creating PSD ######################################################### ##--------------------NON GR PARAMS--------------------## ######################################################### p = default_args_ngr p['mass1'] = 20 p['mass2'] = 30 #p['spinx1'] = 0.5 #p['spinx2'] = -0.5 p['delta_t'] = 1. / 4096 p['f_lower'] = 20 p['approximant'] = 'IMRPhenomPv2' nGR = ['dalpha4']
def whiten_data(strain_list, low_freq_cutoff=20., max_filter_duration=4., psd=None): """Returns the data whitened by the PSD. Arguments --------- strain_list : list of pycbc.TimeSeries or pycbc.TimeSeries The data that should be whitened. low_freq_cutoff : {float, 20.} The lowest frequency that is considered during calculations. It must be >= than the lowest frequency where the PSD is not zero. Unit: hertz max_filter_duration : {float, 4.} The duration to which the PSD is truncated to in the time-domain. The amount of time is removed from both the beginning and end of the input data to avoid wrap-around errors. Unit: seconds psd : {None or pycbc.FrequencySeries, None} The PSD that should be used to whiten the data. If set to None the pycbc.psd.aLIGOZeroDetHighPower PSD will be used. If a PSD is provided which does not fit the delta_f of the data, it will be interpolated to fit. Returns ------- list of pycbc.TimeSeries or TimeSeries Depending on the input type it will return a list of TimeSeries or a single TimeSeries. The data contained in this time series is the whitened input data, where the inital and final seconds as specified by max_filter_duration are removed. """ org_type = type(strain_list) if not org_type == list: strain_list = [strain_list] ret = [] for strain in strain_list: df = strain.delta_f f_len = int(len(strain) / 2) + 1 if psd is None: psd = aLIGOZeroDetHighPower(length=f_len, delta_f=df, low_freq_cutoff=low_freq_cutoff - 2.) else: if not len(psd) == f_len: msg = 'Length of PSD does not match data.' raise ValueError(msg) elif not psd.delta_f == df: psd = interpolate(psd, df) max_filter_len = int( max_filter_duration * strain.sample_rate) #Cut out the beginning and end psd = inverse_spectrum_truncation(psd, max_filter_len=max_filter_len, low_frequency_cutoff=low_freq_cutoff, trunc_method='hann') f_strain = strain.to_frequencyseries() kmin = int(low_freq_cutoff / df) f_strain.data[:kmin] = 0 f_strain.data[-1] = 0 f_strain.data[kmin:] /= psd[kmin:]**0.5 strain = f_strain.to_timeseries() ret.append(strain[max_filter_len:len(strain) - max_filter_len]) if not org_type == list: return (ret[0]) else: return (ret)
spin2x=s2x, spin2y=s2y, delta_t=delta_t, distance=distance, f_lower=f_lower_he, inclination=inclination) ### multiply by stupid pycbc factor hep = hep * pycbc_factor hec = hec * pycbc_factor #Generate the aLIGO ZDHP PSD hep.resize(300000) delta_f = 1.0 / hep.duration t_len = len(hep) f_len = t_len / 2 + 1 if psd_type == 'aLIGOZeroDetHighPower': psd = aLIGOZeroDetHighPower(f_len, delta_f, f_low) elif psd_type != 'aLIGOZeroDetHighPower': psd = pycbc.psd.read.from_txt(psd_path, f_len, delta_f, f_low, is_asd_file=True) else: sys.exit('ERROR: enter vaild psd type') sigma_sqr_he = matchedfilter.sigmasq(hep, psd=psd, low_frequency_cutoff=f_low, high_frequency_cutoff=f_high) print sigma_sqr_he he_l2_norm = np.linalg.norm(np.array(hep)) func_compare_EOS1 = partial(compare, psd, psd_path, hep, hec, distance,
# Now make a catalogue at 350 solar masses and then compute the overlap # catalogue350 = bhex.waveform_catalogue(catalogue_name=catalogue_name, fs=2048, catalogue_len=catlen, mtotal_ref=350, Dist=1., theta=theta) oriwave350 = np.copy(catalogue350.aligned_catalogue[0,:]) # Finally, compute the match between the reconstructed 350 Msun system and the # system we generated at that mass in the first place recwave350_pycbc = pycbc.types.TimeSeries(np.real(recwave350), delta_t=1./2048) oriwave250_pycbc = pycbc.types.TimeSeries(np.real(oriwave250), delta_t=1./2048) oriwave350_pycbc = pycbc.types.TimeSeries(np.real(oriwave350), delta_t=1./2048) psd = aLIGOZeroDetHighPower(len(recwave350_pycbc.to_frequencyseries()), recwave350_pycbc.to_frequencyseries().delta_f, low_freq_cutoff=10.0) match_cat = pycbc.filter.match(oriwave250_pycbc.to_frequencyseries(), oriwave350_pycbc.to_frequencyseries(), psd=psd, low_frequency_cutoff=10)[0] match_rec = pycbc.filter.match(recwave350_pycbc.to_frequencyseries(), oriwave350_pycbc.to_frequencyseries(), psd=psd, low_frequency_cutoff=10)[0] print 'Match between 250 and 350 Msun catalogue waves: ', match_cat print 'Match between 350 reconstruction and 350 catalogue wave: ', match_rec #
def Noise(flow,delta_f,delta_t): flen = int(2048/delta_f) + 1 p_s_d = psd.aLIGOZeroDetHighPower(flen, delta_f, flow) tlen = int(2048/delta_t) return noise.noise_from_psd(tlen, delta_t, p_s_d, seed=127)
f_low = 30 sample_rate = 4096 # Generate the two waveforms to compare hp, hc = get_td_waveform(approximant="EOBNRv2", mass1=10, mass2=10, f_lower=f_low, delta_t=1.0/sample_rate) sp, sc = get_td_waveform(approximant="TaylorT4", mass1=10, mass2=10, f_lower=f_low, delta_t=1.0/sample_rate) # Resize the waveforms to the same length tlen = max(len(sp), len(hp)) sp.resize(tlen) hp.resize(tlen) # Generate the aLIGO ZDHP PSD delta_f = 1.0 / sp.duration flen = tlen/2 + 1 psd = aLIGOZeroDetHighPower(flen, delta_f, f_low) # Note: This takes a while the first time as an FFT plan is generated # subsequent calls are much faster. m, i = match(hp, sp, psd=psd, low_frequency_cutoff=f_low) print 'The match is: %1.3f' % m
def reconstructed_SineGaussianF(posterior, waveform, flow=1000, fupp=4096, nrec=100): """ return the reconstructed F-domain sine-Gaussians from the posterior samples in posterior, as well as the max-posterior reconstruction and the matches with the target waveform """ wlen=16384 # Get a zero-padded version of the target waveform htarget=np.zeros(wlen) htarget[0:len(waveform.hplus)]=waveform.hplus.data htarget=pycbc.types.TimeSeries(htarget, delta_t = waveform.hplus.delta_t) # Normalise so that the target has hrss=1 #hrssTarget = pycbc.filter.sigma(htarget, low_frequency_cutoff=flow, # high_frequency_cutoff=fupp) #htarget.data /= hrssTarget # Get frequency series of target waveform H_target = htarget.to_frequencyseries() # Make psd for matches flen = len(H_target) delta_f = np.diff(H_target.sample_frequencies)[0] psd = aLIGOZeroDetHighPower(flen, H_target.delta_f, low_freq_cutoff=flow) # ----------- # MAP waveform # XXX: Time-domain is a little easier, since we don't have to figure out # which frequencies to populate in a pycbc object hp, _ = lalsim.SimBurstSineGaussian(posterior.maxP[1]['quality'], posterior.maxP[1]['frequency'], posterior.maxP[1]['hrss'], 0.0, 0.0, waveform.hplus.delta_t) #hp, _ = lalsim.SimBurstSineGaussian(posterior.maxP[1]['quality'], # posterior.maxP[1]['frequency'], 1.0, 0.0, 0.0, # waveform.hplus.delta_t) # zero-pad h_MAP = np.zeros(wlen) # populate h_MAP[:hp.data.length]=hp.data.data # pycbc objects h_MAP_ts = pycbc.types.TimeSeries(h_MAP,waveform.hplus.delta_t) H_MAP = h_MAP_ts.to_frequencyseries() MAP_match = pycbc.filter.match(H_target, H_MAP, low_frequency_cutoff=flow, high_frequency_cutoff=fupp)[0] # ------------------------- # Waveforms for all samples # Pre-allocate: #nrec=500 if len(posterior['frequency'].samples)>nrec: decidx = np.random.randint(0, len(posterior['frequency'].samples), nrec) else: decidx = xrange(nrec) reconstructions = np.zeros(shape=(nrec, len(H_target)), dtype=complex) matches = np.zeros(nrec) # All samples! for idx in xrange(nrec): # let's just use hplus for now... hp, _ = lalsim.SimBurstSineGaussian( np.squeeze(posterior['quality'].samples)[idx], np.squeeze(posterior['frequency'].samples)[idx], np.squeeze(posterior['hrss'].samples)[idx], 0.0, 0.0, waveform.hplus.delta_t) #hp, _ = lalsim.SimBurstSineGaussian( # np.squeeze(posterior['quality'].samples)[idx], # np.squeeze(posterior['frequency'].samples)[idx], # 1.0, 0.0, 0.0, # waveform.hplus.delta_t) # zero pad hcurrent = np.zeros(wlen) # populate hcurrent[:hp.data.length] = hp.data.data # pycbc object hcurrent_ts = pycbc.types.TimeSeries(hcurrent, delta_t=hp.deltaT) # populate array of all reconstructions reconstructions[idx, :] = hcurrent_ts.to_frequencyseries().data # compute match for this sample matches[idx] = pycbc.filter.match(H_target, hcurrent_ts.to_frequencyseries(), low_frequency_cutoff=flow, high_frequency_cutoff=fupp)[0] # ----------- reconstruction = {} reconstruction['FrequencyAxis'] = H_MAP.sample_frequencies reconstruction['TargetSpectrum'] = H_target reconstruction['MAPSpectrum'] = H_MAP reconstruction['MAPMatch'] = MAP_match reconstruction['SampledReconstructions'] = reconstructions reconstruction['SampledMatches'] = matches return reconstruction