def from_string(psd_name, length, delta_f, low_freq_cutoff): """Generate a frequency series containing a LALSimulation PSD specified by name. Parameters ---------- psd_name : string PSD name as found in LALSimulation, minus the SimNoisePSD prefix. length : int Length of the frequency series in samples. delta_f : float Frequency resolution of the frequency series. low_freq_cutoff : float Frequencies below this value are set to zero. Returns ------- psd : FrequencySeries The generated frequency series. """ if psd_name not in _psd_list: raise ValueError(psd_name + ' not found among LALSimulation PSD functions.') kmin = int(low_freq_cutoff / delta_f) lalseries = lal.CreateREAL8FrequencySeries( '', lal.LIGOTimeGPS(0), 0, delta_f, lal.DimensionlessUnit, length) try: func = lalsimulation.__dict__[_name_prefix + psd_name + _name_suffix] except KeyError: func = lalsimulation.__dict__[_name_prefix + psd_name] func(lalseries, low_freq_cutoff) else: lalsimulation.SimNoisePSD(lalseries, 0, func) psd = FrequencySeries(lalseries.data.data, delta_f=delta_f) psd.data[:kmin] = 0 return psd
def td_waveform_to_fd_waveform(waveform, out=None, length=None, buffer_length=100): """ Convert a time domain into a frequency domain waveform by FFT. As a waveform is assumed to "wrap" in the time domain one must be careful to ensure the waveform goes to 0 at both "boundaries". To ensure this is done correctly the waveform must have the epoch set such the merger time is at t=0 and the length of the waveform should be shorter than the desired length of the FrequencySeries (times 2 - 1) so that zeroes can be suitably pre- and post-pended before FFTing. If given, out is a memory array to be used as the output of the FFT. If not given memory is allocated internally. If present the length of the returned FrequencySeries is determined from the length out. If out is not given the length can be provided expicitly, or it will be chosen as the nearest power of 2. If choosing length explicitly the waveform length + buffer_length is used when choosing the nearest binary number so that some zero padding is always added. """ # Figure out lengths and set out if needed if out is None: if length is None: N = pnutils.nearest_larger_binary_number(len(waveform) + \ buffer_length) n = int(N//2) + 1 else: n = length N = (n-1)*2 out = zeros(n, dtype=complex_same_precision_as(waveform)) else: n = len(out) N = (n-1)*2 delta_f = 1. / (N * waveform.delta_t) # total duration of the waveform tmplt_length = len(waveform) * waveform.delta_t if len(waveform) > N: err_msg = "The time domain template is longer than the intended " err_msg += "duration in the frequency domain. This situation is " err_msg += "not supported in this function. Please shorten the " err_msg += "waveform appropriately before calling this function or " err_msg += "increase the allowed waveform length. " err_msg += "Waveform length (in samples): {}".format(len(waveform)) err_msg += ". Intended length: {}.".format(N) raise ValueError(err_msg) # for IMR templates the zero of time is at max amplitude (merger) # thus the start time is minus the duration of the template from # lower frequency cutoff to merger, i.e. minus the 'chirp time' tChirp = - float( waveform.start_time ) # conversion from LIGOTimeGPS waveform.resize(N) k_zero = int(waveform.start_time / waveform.delta_t) waveform.roll(k_zero) htilde = FrequencySeries(out, delta_f=delta_f, copy=False) fft(waveform.astype(real_same_precision_as(htilde)), htilde) htilde.length_in_time = tmplt_length htilde.chirp_length = tChirp return htilde
def _shift_and_ifft(self, fdsinx, tshift, fseries=None): """Calls apply_fd_time_shift, and iFFTs to the time domain. """ start_time = self.time_series.start_time tdshift = apply_fd_time_shift(fdsinx, start_time+tshift, fseries=fseries) if not isinstance(tdshift, FrequencySeries): # cast to FrequencySeries so time series will work tdshift = FrequencySeries(tdshift, delta_f=fdsinx.delta_f, epoch=fdsinx.epoch) return tdshift.to_timeseries()
def get_waveform_filter(out, template=None, **kwargs): """Return a frequency domain waveform filter for the specified approximant """ n = len(out) input_params = props(template, **kwargs) if input_params['approximant'] in filter_approximants(_scheme.mgr.state): wav_gen = filter_wav[type(_scheme.mgr.state)] htilde = wav_gen[input_params['approximant']](out=out, **input_params) htilde.resize(n) htilde.chirp_length = get_waveform_filter_length_in_time(**input_params) htilde.length_in_time = htilde.chirp_length return htilde if input_params['approximant'] in fd_approximants(_scheme.mgr.state): wav_gen = fd_wav[type(_scheme.mgr.state)] hp, hc = wav_gen[input_params['approximant']](**input_params) hp.resize(n) hp.chirp_length = get_waveform_filter_length_in_time(**input_params) hp.length_in_time = hp.chirp_length return hp elif input_params['approximant'] in td_approximants(_scheme.mgr.state): # N: number of time samples required N = (n-1)*2 delta_f = 1.0 / (N * input_params['delta_t']) wav_gen = td_wav[type(_scheme.mgr.state)] hp, hc = wav_gen[input_params['approximant']](**input_params) # taper the time series hp if required if ('taper' in input_params.keys() and \ input_params['taper'] is not None): hp = wfutils.taper_timeseries(hp, input_params['taper'], return_lal=False) # total duration of the waveform tmplt_length = len(hp) * hp.delta_t # for IMR templates the zero of time is at max amplitude (merger) # thus the start time is minus the duration of the template from # lower frequency cutoff to merger, i.e. minus the 'chirp time' tChirp = - float( hp.start_time ) # conversion from LIGOTimeGPS hp.resize(N) k_zero = int(hp.start_time / hp.delta_t) hp.roll(k_zero) htilde = FrequencySeries(out, delta_f=delta_f, copy=False) fft(hp.astype(real_same_precision_as(htilde)), htilde) htilde.length_in_time = tmplt_length htilde.chirp_length = tChirp return htilde else: raise ValueError("Approximant %s not available" % (input_params['approximant']))
def from_string(psd_name, length, delta_f, low_freq_cutoff): """Generate a frequency series containing a LALSimulation PSD specified by name. Parameters ---------- psd_name : string PSD name as found in LALSimulation, minus the SimNoisePSD prefix. length : int Length of the frequency series in samples. delta_f : float Frequency resolution of the frequency series. low_freq_cutoff : float Frequencies below this value are set to zero. Returns ------- psd : FrequencySeries The generated frequency series. """ # check if valid PSD model if psd_name not in get_psd_model_list(): raise ValueError(psd_name + ' not found among analytical ' 'PSD functions.') # if PSD model is in LALSimulation if psd_name in get_lalsim_psd_list(): lalseries = lal.CreateREAL8FrequencySeries( '', lal.LIGOTimeGPS(0), 0, delta_f, lal.DimensionlessUnit, length) try: func = lalsimulation.__dict__[ _name_prefix + psd_name + _name_suffix] except KeyError: func = lalsimulation.__dict__[_name_prefix + psd_name] func(lalseries, low_freq_cutoff) else: lalsimulation.SimNoisePSD(lalseries, 0, func) psd = FrequencySeries(lalseries.data.data, delta_f=delta_f) # if PSD model is coded in PyCBC else: func = pycbc_analytical_psds[psd_name] psd = func(length, delta_f, low_freq_cutoff) # zero-out content below low-frequency cutoff kmin = int(low_freq_cutoff / delta_f) psd.data[:kmin] = 0 return psd
def get_waveform_filter(out, template=None, **kwargs): """Return a frequency domain waveform filter for the specified approximant """ n = len(out) input_params = props(template, **kwargs) if input_params['approximant'] in filter_approximants(_scheme.mgr.state): wav_gen = filter_wav[type(_scheme.mgr.state)] htilde = wav_gen[input_params['approximant']](out=out, **input_params) htilde.resize(n) htilde.chirp_length = get_waveform_filter_length_in_time(**input_params) htilde.length_in_time = htilde.chirp_length return htilde if input_params['approximant'] in fd_approximants(_scheme.mgr.state): wav_gen = fd_wav[type(_scheme.mgr.state)] hp, hc = wav_gen[input_params['approximant']](**input_params) hp.resize(n) out[0:len(hp)] = hp[:] hp = FrequencySeries(out, delta_f=hp.delta_f, copy=False) hp.chirp_length = get_waveform_filter_length_in_time(**input_params) hp.length_in_time = hp.chirp_length return hp elif input_params['approximant'] in td_approximants(_scheme.mgr.state): # N: number of time samples required N = (n-1)*2 delta_f = 1.0 / (N * input_params['delta_t']) wav_gen = td_wav[type(_scheme.mgr.state)] hp, hc = wav_gen[input_params['approximant']](**input_params) # taper the time series hp if required if ('taper' in input_params.keys() and \ input_params['taper'] is not None): hp = wfutils.taper_timeseries(hp, input_params['taper'], return_lal=False) return td_waveform_to_fd_waveform(hp, out=out) else: raise ValueError("Approximant %s not available" % (input_params['approximant']))
def flat_unity(length, delta_f, low_freq_cutoff): """ Returns a FrequencySeries of ones above the low_frequency_cutoff. Parameters ---------- length : int Length of output Frequencyseries. delta_f : float Frequency step for output FrequencySeries. low_freq_cutoff : int Low-frequency cutoff for output FrequencySeries. Returns ------- FrequencySeries Returns a FrequencySeries containing the unity PSD model. """ fseries = FrequencySeries(numpy.ones(length), delta_f=delta_f) kmin = int(low_freq_cutoff / fseries.delta_f) fseries.data[:kmin] = 0 return fseries
def _imrphenombfreq(**p): import lalinspiral params = lalinspiral.InspiralTemplate() m1 = p['mass1'] m2 = p['mass2'] mc, et = pnutils.mass1_mass2_to_mchirp_eta(m1, m2) params.approximant = lalsimulation.IMRPhenomB params.fLower = p['f_lower'] params.eta = et params.distance = p['distance'] * lal.PC_SI * 1e6 params.mass1 = m1 params.mass2 = m2 params.spin1[2] = p['spin1z'] params.spin2[2] = p['spin2z'] params.startPhase = p['coa_phase']*2 - lal.PI params.startTime = 0 params.tSampling = 8192 N = int(params.tSampling / p['delta_f']) n = N / 2 # Create temporary memory to hold the results and call the generator hpt = zeros(N, dtype=float32) hct = zeros(N, dtype=float32) hpt=hpt.lal() hct=hct.lal() lalinspiral.BBHPhenWaveBFreqDomTemplates(hpt, hct, params) # Copy the results to a complex frequencyseries format hctc = FrequencySeries(zeros(n, dtype=complex64), delta_f=p['delta_f']) hptc = FrequencySeries(zeros(n, dtype=complex64), delta_f=p['delta_f']) hptc.data += hpt.data[0:n] hptc.data[1:n] += hpt.data[N:N-n:-1] * 1j hctc.data += hct.data[0:n] hctc.data[1:n] += hct.data[N:N-n:-1] * 1j return hptc.astype(complex128), hctc.astype(complex128)
def from_lalsimulation(func, length, delta_f, low_freq_cutoff): """Generate a frequency series containing the specified LALSimulation PSD. Parameters ---------- func : function LALSimulation PSD function. length : int Length of the frequency series in samples. delta_f : float Frequency resolution of the frequency series. low_freq_cutoff : float Frequencies below this value are set to zero. Returns ------- psd : FrequencySeries The generated frequency series. """ psd = FrequencySeries(zeros(length), delta_f=delta_f) kmin = int(low_freq_cutoff / delta_f) psd.data[kmin:] = map(func, numpy.arange(length)[kmin:] * delta_f) return psd
def spintaylorf2(**kwds): """ Return a SpinTaylorF2 waveform using CUDA to generate the phase and amplitude """ #####Pull out the input arguments##### f_lower = double(kwds['f_lower']) delta_f = double(kwds['delta_f']) distance = double(kwds['distance']) mass1 = double(kwds['mass1']) mass2 = double(kwds['mass2']) spin1x = double(kwds['spin1x']) spin1y = double(kwds['spin1y']) spin1z = double(kwds['spin1z']) phi0 = double(kwds['coa_phase']) #Orbital Phase at coalescence phase_order = int(kwds['phase_order']) amplitude_order = int(kwds['amplitude_order']) inclination = double(kwds['inclination']) lnhatx = sin(inclination) lnhaty = 0. lnhatz = cos(inclination) psi = 0. tC= -1.0 / delta_f M = mass1 + mass2 eta = mass1 * mass2 / (M * M) m_sec = M * lal.MTSUN_SI piM = lal.PI * m_sec vISCO = 1. / sqrt(6.) fISCO = vISCO * vISCO * vISCO / piM f_max = ceilpow2(fISCO) n = int(f_max / delta_f + 1) kmax = int(fISCO / delta_f) kmin = int(numpy.ceil(f_lower / delta_f)) kmax = kmax if (kmax<n) else n #####Calculate the Orientation##### v0 = pow(piM * kmin * delta_f,1./3) chi = sqrt(spin1x**2+spin1y**2+spin1z**2) kappa = (lnhatx*spin1x+lnhaty*spin1y+lnhatz*spin1z)/chi if (chi > 0.) else 1. Jx0 = mass1*mass2*lnhatx/v0 + mass1*mass1*spin1x Jy0 = mass1*mass2*lnhaty/v0 + mass1*mass1*spin1y Jz0 = mass1*mass2*lnhatz/v0 + mass1*mass1*spin1z thetaJ = acos(Jz0 / sqrt(Jx0**2+Jy0**2+Jz0**2)) psiJ = atan2(Jy0, -Jx0) # FIXME: check that Jy0 and Jx0 are not both 0 # Rotate Lnhat back to frame where J is along z, to figure out initial alpha rotLx = lnhatx*cos(thetaJ)*cos(psiJ) - lnhaty*cos(thetaJ)*sin(psiJ) + lnhatz*sin(thetaJ) rotLy = lnhatx*sin(psiJ) + lnhaty*cos(psiJ) alpha0 = atan2(rotLy, rotLx) # FIXME: check that rotLy and rotLx are not both 0 psiJ_P =psiJ + psi psiJ_C =psiJ + psi + lal.PI/4. #####Calculate the Coefficients##### #quadparam = 1. gamma0 = mass1*chi/mass2 #Calculate the spin corrections # FIXME should use pycbc's function, but sigma has different expression # in Andy's code, double check # pn_beta, pn_sigma, pn_gamma = pycbc.pnutils.mass1_mass2_spin1z_spin2z_to_beta_sigma_gamma( # mass1, mass2, chi*kappa, 0) # FIXME: spin2 is taken to be 0 pn_beta = (113.*mass1/(12.*M) - 19.*eta/6.)*chi*kappa pn_sigma = ( (5.*(3.*kappa*kappa-1.)/2.) + (7. - kappa*kappa)/96. ) * (mass1*mass1*chi*chi/M/M) pn_gamma = (5.*(146597. + 7056.*eta)*mass1/(2268.*M) - 10.*eta*(1276. + 153.*eta)/81.)*chi*kappa prec_fac0 = 5.*(4. + 3.*mass2/mass1)/64. dtdv2 = 743./336. + 11.*eta/4. dtdv3 = -4.*lal.PI + pn_beta dtdv4 = 3058673./1016064. + 5429.*eta/1008. + 617.*eta*eta/144. - pn_sigma dtdv5 = (-7729./672.+13.*eta/8.)*lal.PI + 9.*pn_gamma/40. #####Calculate the Initial Euler Angles alpha_ref, beta_ref=0 and zeta_ref##### gam = gamma0*v0 sqrtfac = sqrt(1. + 2.*kappa*gam + gam*gam) logv0 = log(v0) logfac1 = log(1. + kappa*gam + sqrtfac) logfac2 = log(kappa + gam + sqrtfac) v02 = v0 * v0 v03 = v0 * v02 kappa2 = kappa * kappa kappa3 = kappa2 * kappa gamma02 = gamma0 * gamma0 gamma03 = gamma02 *gamma0 alpha_ref = prec_fac0*( logfac2 *( dtdv2*gamma0 + dtdv3*kappa - dtdv5*kappa/(2.*gamma02) + dtdv4/(2.*gamma0) - dtdv4*kappa2/(2.*gamma0) + (dtdv5*kappa3)/(2.*gamma02) ) + logfac1*( - dtdv2*gamma0*kappa - dtdv3 + kappa*gamma03/2. - gamma03*kappa3/2. ) + logv0 *( dtdv2*gamma0*kappa + dtdv3 - kappa*gamma03/2. + gamma03*kappa3/2. ) + sqrtfac *( dtdv3 + dtdv4*v0/2. + dtdv5/gamma02/3. + dtdv4*kappa/(2.*gamma0) + dtdv5*kappa*v0/(6.*gamma0) - dtdv5*kappa2/(2.*gamma02) - 1/(3.*v03) - gamma0*kappa/(6.*v02) - dtdv2/v0 - gamma02/(3.*v0) + gamma02*kappa2/(2.*v0) + dtdv5*v02/3. )) - alpha0 zeta_ref = prec_fac0*( dtdv3*gamma0*kappa*v0 + dtdv4*v0 + logfac2 *(-dtdv2*gamma0 - dtdv3*kappa + dtdv5*kappa/(2.*gamma02) - dtdv4/(2.*gamma0) + dtdv4*kappa2/(2.*gamma0) - dtdv5*kappa3/(2.*gamma02) ) + logv0 *( kappa*gamma03/2. - gamma03*kappa3/2. ) + logfac1 *( dtdv2*gamma0*kappa + dtdv3 - kappa*gamma03/2. + gamma03*kappa3/2. ) - 1/(3.*v03) - gamma0*kappa/(2.*v02) - dtdv2/v0 + dtdv4*gamma0*kappa*v02/2. + dtdv5*v02/2. + sqrtfac *( -dtdv3 - dtdv4*v0/2. - dtdv5/(3.*gamma02) - dtdv4*kappa/(2.*gamma0) - dtdv5*kappa*v0/(6.*gamma0) + dtdv5*kappa2/(2.*gamma02) + 1/(3.*v03) + gamma0*kappa/(6.*v02) + dtdv2/v0 + gamma02/(3.*v0) - gamma02*kappa2/(2.*v0) - dtdv5*v02/3. ) + dtdv5*gamma0*kappa*v03/3. ) #####Calculate the Complex sideband factors, mm=2 is first entry##### RE_SBfac0= (1.+cos(thetaJ)**2)/2. RE_SBfac1= sin(2.*thetaJ) RE_SBfac2= 3.*sin(thetaJ)**2 RE_SBfac3= -sin(2.*thetaJ) RE_SBfac4= (1.+cos(thetaJ)**2)/2. IM_SBfac0= -cos(thetaJ) IM_SBfac1= -2.*sin(thetaJ) IM_SBfac2= 0. IM_SBfac3= -2.*sin(thetaJ) IM_SBfac4= cos(thetaJ) #####Calculate the PN terms # FIXME replace with functions in lalsimulation ##### theta = -11831./9240. lambdaa = -1987./3080.0 pfaN = 3.0/(128.0 * eta) pfa2 = 5.0*(743.0/84 + 11.0 * eta)/9.0 pfa3 = -16.0*lal.PI + 4.0*pn_beta pfa4 = 5.0*(3058.673/7.056 + 5429.0/7.0 * eta + 617.0 * eta*eta)/72.0 - \ 10.0*pn_sigma pfa5 = 5.0/9.0 * (7729.0/84.0 - 13.0 * eta) * lal.PI - pn_gamma pfl5 = 5.0/3.0 * (7729.0/84.0 - 13.0 * eta) * lal.PI - pn_gamma * 3 pfa6 = (11583.231236531/4.694215680 - 640.0/3.0 * lal.PI * lal.PI- \ 6848.0/21.0*lal.GAMMA) + \ eta * (-15335.597827/3.048192 + 2255./12. * lal.PI * \ lal.PI - 1760./3.*theta +12320./9.*lambdaa) + \ eta*eta * 76055.0/1728.0 - \ eta*eta*eta* 127825.0/1296.0 pfl6 = -6848.0/21.0 pfa7 = lal.PI * 5.0/756.0 * ( 15419335.0/336.0 + 75703.0/2.0 * eta - \ 14809.0 * eta*eta) FTaN = 32.0 * eta*eta / 5.0 FTa2 = -(12.47/3.36 + 3.5/1.2 * eta) FTa3 = 4.0 * lal.PI FTa4 = -(44.711/9.072 - 92.71/5.04 * eta - 6.5/1.8 * eta*eta) FTa5 = -(81.91/6.72 + 58.3/2.4 * eta) * lal.PI FTa6 = (664.3739519/6.9854400 + 16.0/3.0 * lal.PI*lal.PI - 17.12/1.05 * lal.GAMMA + (4.1/4.8 * lal.PI*lal.PI - 134.543/7.776) * eta - 94.403/3.024 * eta*eta - 7.75/3.24 * eta*eta*eta) FTl6 = -8.56/1.05 FTa7 = -(162.85/5.04 - 214.745/1.728 * eta - 193.385/3.024 * eta*eta) \ * lal.PI dETaN = 2 * -eta/2.0 dETa1 = 2 * -(3.0/4.0 + 1.0/12.0 * eta) dETa2 = 3 * -(27.0/8.0 - 19.0/8.0 * eta + 1./24.0 * eta*eta) dETa3 = 4 * -(67.5/6.4 - (344.45/5.76 - 20.5/9.6 * lal.PI*lal.PI) * eta + 15.5/9.6 * eta*eta + 3.5/518.4 * eta*eta*eta) amp0 = -4. * mass1 * mass2 / (1.0e+06 * distance * lal.PC_SI ) * \ lal.MRSUN_SI * lal.MTSUN_SI * sqrt(lal.PI/12.0) htildeP = FrequencySeries(zeros(n,dtype=complex128), delta_f=delta_f, copy=False) htildeC = FrequencySeries(zeros(n,dtype=complex128), delta_f=delta_f, copy=False) spintaylorf2_kernel(htildeP.data[kmin:kmax], htildeC.data[kmin:kmax], kmin, phase_order, amplitude_order, delta_f, piM, pfaN, pfa2, pfa3, pfa4, pfa5, pfl5, pfa6, pfl6, pfa7, FTaN, FTa2, FTa3, FTa4, FTa5, FTa6, FTl6, FTa7, dETaN, dETa1, dETa2, dETa3, amp0, tC, phi0, kappa, prec_fac0, alpha_ref, zeta_ref, dtdv2, dtdv3, dtdv4, dtdv5, RE_SBfac0, RE_SBfac1, RE_SBfac2, RE_SBfac3, RE_SBfac4, IM_SBfac0, IM_SBfac1, IM_SBfac2, IM_SBfac3, IM_SBfac4, psiJ_P, psiJ_C, gamma0) return htildeP, htildeC
def fd_decompress(amp, phase, sample_frequencies, out=None, df=None, f_lower=None, interpolation='inline_linear'): """Decompresses an FD waveform using the given amplitude, phase, and the frequencies at which they are sampled at. Parameters ---------- amp : array The amplitude of the waveform at the sample frequencies. phase : array The phase of the waveform at the sample frequencies. sample_frequencies : array The frequency (in Hz) of the waveform at the sample frequencies. out : {None, FrequencySeries} The output array to save the decompressed waveform to. If this contains slots for frequencies > the maximum frequency in sample_frequencies, the rest of the values are zeroed. If not provided, must provide a df. df : {None, float} The frequency step to use for the decompressed waveform. Must be provided if out is None. f_lower : {None, float} The frequency to start the decompression at. If None, will use whatever the lowest frequency is in sample_frequencies. All values at frequencies less than this will be 0 in the decompressed waveform. interpolation : {'inline_linear', str} The interpolation to use for the amplitude and phase. Default is 'inline_linear'. If 'inline_linear' a custom interpolater is used. Otherwise, ``scipy.interpolate.interp1d`` is used; for other options, see possible values for that function's ``kind`` argument. Returns ------- out : FrequencySeries If out was provided, writes to that array. Otherwise, a new FrequencySeries with the decompressed waveform. """ precision = _precision_map[sample_frequencies.dtype.name] if _precision_map[amp.dtype.name] != precision or \ _precision_map[phase.dtype.name] != precision: raise ValueError("amp, phase, and sample_points must all have the " "same precision") if out is None: if df is None: raise ValueError("Either provide output memory or a df") hlen = int(numpy.ceil(sample_frequencies.max() / df + 1)) out = FrequencySeries(numpy.zeros(hlen, dtype=_complex_dtypes[precision]), copy=False, delta_f=df) else: # check for precision compatibility if out.precision == 'double' and precision == 'single': raise ValueError("cannot cast single precision to double") df = out.delta_f hlen = len(out) if f_lower is None: imin = 0 # pylint:disable=unused-variable f_lower = sample_frequencies[0] start_index = 0 else: if f_lower >= sample_frequencies.max(): raise ValueError("f_lower is > than the maximum sample frequency") if f_lower < sample_frequencies.min(): raise ValueError("f_lower is < than the minimum sample frequency") imin = int( numpy.searchsorted(sample_frequencies, f_lower, side='right')) - 1 # pylint:disable=unused-variable start_index = int(numpy.ceil(f_lower / df)) if start_index >= hlen: raise ValueError('requested f_lower >= largest frequency in out') # interpolate the amplitude and the phase if interpolation == "inline_linear": # Call the scheme-dependent function inline_linear_interp(amp, phase, sample_frequencies, out, df, f_lower, imin, start_index) else: # use scipy for fancier interpolation sample_frequencies = numpy.array(sample_frequencies) amp = numpy.array(amp) phase = numpy.array(phase) outfreq = out.sample_frequencies.numpy() amp_interp = interpolate.interp1d(sample_frequencies, amp, kind=interpolation, bounds_error=False, fill_value=0., assume_sorted=True) phase_interp = interpolate.interp1d(sample_frequencies, phase, kind=interpolation, bounds_error=False, fill_value=0., assume_sorted=True) A = amp_interp(outfreq) phi = phase_interp(outfreq) out.data[:] = A * numpy.cos(phi) + (1j) * A * numpy.sin(phi) return out
def _get_waveform_from_inspiral(**p): import lalmetaio # prefix with 'Inspiral-' name = p['approximant'][9:] if name.startswith('EOB'): p['phase_order'] = -8 params = lalmetaio.SimInspiralTable() params.waveform = name + string_from_order[p['phase_order']] params.mass1= p['mass1'] params.mass2= p['mass2'] params.f_lower = p['f_lower'] params.spin1x = p['spin1x'] params.spin1y = p['spin1y'] params.spin1z = p['spin1z'] params.spin2x = p['spin2x'] params.spin2y = p['spin2y'] params.spin2z = p['spin2z'] params.inclination = p['inclination'] params.distance = p['distance'] params.coa_phase = p['coa_phase'] import lalinspiral guess_length = lalinspiral.FindChirpChirpTime(params.mass1, params.mass2, params.f_lower, 7) guess_length = max(guess_length, 3) params.geocent_end_time = guess_length * 1.5 params.taper = 'TAPER_NONE' #FIXME - either explain or don't hardcode this bufferl = guess_length * 2 dt = p['delta_t'] df = 1.0 / bufferl sample_rate = int(1.0 / dt) epoch = lal.LIGOTimeGPS(0, 0) N = bufferl * sample_rate n = N / 2 + 1 resp = FrequencySeries(zeros(n), delta_f=df, epoch=epoch, dtype=complex64) + 1 out = TimeSeries(zeros(N), delta_t=dt, epoch=epoch, dtype=float32) outl = out.lal() outl.sampleUnits = lal.ADCCountUnit out2 = TimeSeries(zeros(N), delta_t=dt, epoch=epoch, dtype=float32) outl2 = out.lal() outl2.sampleUnits = lal.ADCCountUnit respl = resp.lal() respl.sampleUnits = lal.DimensionlessUnit lalinspiral.FindChirpInjectSignals(outl, params, respl) params.coa_phase -= lal.PI / 4 lalinspiral.FindChirpInjectSignals(outl2, params, respl) seriesp = TimeSeries(outl.data.data, delta_t=dt, epoch=epoch - params.geocent_end_time) seriesc = TimeSeries(outl2.data.data, delta_t=dt, epoch=epoch - params.geocent_end_time) return seriesp, seriesc
def matched_filter_core(template, data, psd=None, low_frequency_cutoff=None, high_frequency_cutoff=None, h_norm=None, out=None, corr_out=None): """ Return the complex snr and normalization. Return the complex snr, along with its associated normalization of the template, matched filtered against the data. Parameters ---------- template : TimeSeries or FrequencySeries The template waveform data : TimeSeries or FrequencySeries The strain data to be filtered. psd : {FrequencySeries}, optional The noise weighting of the filter. low_frequency_cutoff : {None, float}, optional The frequency to begin the filter calculation. If None, begin at the first frequency after DC. high_frequency_cutoff : {None, float}, optional The frequency to stop the filter calculation. If None, continue to the the nyquist frequency. h_norm : {None, float}, optional The template normalization. If none, this value is calculated internally. out : {None, Array}, optional An array to use as memory for snr storage. If None, memory is allocated internally. corr_out : {None, Array}, optional An array to use as memory for correlation storage. If None, memory is allocated internally. If provided, management of the vector is handled externally by the caller. No zero'ing is done internally. Returns ------- snr : TimeSeries A time series containing the complex snr. corrrelation: FrequencySeries A frequency series containing the correlation vector. norm : float The normalization of the complex snr. """ if corr_out is not None: _qtilde = corr_out else: global _qtilde_t _qtilde = _qtilde_t htilde = make_frequency_series(template) stilde = make_frequency_series(data) if len(htilde) != len(stilde): raise ValueError("Length of template and data must match") N = (len(stilde) - 1) * 2 kmin, kmax = get_cutoff_indices(low_frequency_cutoff, high_frequency_cutoff, stilde.delta_f, N) if out is None: _q = zeros(N, dtype=complex_same_precision_as(data)) elif (len(out) == N) and type(out) is Array and out.kind == 'complex': _q = out else: raise TypeError('Invalid Output Vector: wrong length or dtype') if corr_out: pass elif (_qtilde is None) or (len(_qtilde) != N) or _qtilde.dtype != data.dtype: _qtilde_t = _qtilde = zeros(N, dtype=complex_same_precision_as(data)) else: _qtilde.clear() correlate(htilde[kmin:kmax], stilde[kmin:kmax], _qtilde[kmin:kmax]) if psd is not None: if isinstance(psd, FrequencySeries): if psd.delta_f == stilde.delta_f: _qtilde[kmin:kmax] /= psd[kmin:kmax] else: raise TypeError("PSD delta_f does not match data") else: raise TypeError("PSD must be a FrequencySeries") ifft(_qtilde, _q) if h_norm is None: h_norm = sigmasq(htilde, psd, low_frequency_cutoff, high_frequency_cutoff) norm = (4.0 * stilde.delta_f) / sqrt(h_norm) delta_t = 1.0 / (N * stilde.delta_f) return (TimeSeries(_q, epoch=stilde._epoch, delta_t=delta_t, copy=False), FrequencySeries(_qtilde, epoch=stilde._epoch, delta_f=htilde.delta_f, copy=False), norm)
def detect_loud_glitches(strain, psd_duration=16, psd_stride=8, psd_avg_method='median', low_freq_cutoff=30., threshold=50., cluster_window=5., corrupted_time=4., high_freq_cutoff=None, output_intermediates=False): """Automatic identification of loud transients for gating purposes.""" if output_intermediates: strain.save_to_wav('strain_conditioned.wav') # don't waste time trying to optimize a single FFT pycbc.fft.fftw.set_measure_level(0) logging.info('Autogating: estimating PSD') psd = pycbc.psd.welch(strain, seg_len=psd_duration*strain.sample_rate, seg_stride=psd_stride*strain.sample_rate, avg_method=psd_avg_method) logging.info('Autogating: time -> frequency') strain_tilde = FrequencySeries(numpy.zeros(len(strain) / 2 + 1), delta_f=1./strain.duration, dtype=complex_same_precision_as(strain)) pycbc.fft.fft(strain, strain_tilde) logging.info('Autogating: interpolating PSD') psd = pycbc.psd.interpolate(psd, strain_tilde.delta_f) logging.info('Autogating: whitening') if high_freq_cutoff: kmax = int(high_freq_cutoff / strain_tilde.delta_f) strain_tilde[kmax:] = 0. norm = high_freq_cutoff - low_freq_cutoff else: norm = strain.sample_rate/2. - low_freq_cutoff strain_tilde /= (psd * norm) ** 0.5 kmin = int(low_freq_cutoff / strain_tilde.delta_f) strain_tilde[0:kmin] = 0. # FIXME at this point the strain can probably be downsampled logging.info('Autogating: frequency -> time') pycbc.fft.ifft(strain_tilde, strain) pycbc.fft.fftw.set_measure_level(pycbc.fft.fftw._default_measurelvl) logging.info('Autogating: stdev of whitened strain is %.4f', numpy.std(strain)) if output_intermediates: strain.save_to_wav('strain_whitened.wav') mag = abs(strain) if output_intermediates: mag.save('strain_whitened_mag.npy') mag = numpy.array(mag, dtype=numpy.float32) # remove corrupted strain at the ends corrupted_idx = int(corrupted_time * strain.sample_rate) mag[0:corrupted_idx] = 0 mag[-1:-corrupted_idx-1:-1] = 0 logging.info('Autogating: finding loud peaks') indices = numpy.where(mag > threshold)[0] cluster_idx = pycbc.events.findchirp_cluster_over_window( indices, mag[indices], int(cluster_window*strain.sample_rate)) times = [idx * strain.delta_t + strain.start_time \ for idx in indices[cluster_idx]] return times
def fd_decompress(amp, phase, sample_frequencies, out=None, df=None, f_lower=None, interpolation='linear'): """Decompresses an FD waveform using the given amplitude, phase, and the frequencies at which they are sampled at. Parameters ---------- amp : array The amplitude of the waveform at the sample frequencies. phase : array The phase of the waveform at the sample frequencies. sample_frequencies : array The frequency (in Hz) of the waveform at the sample frequencies. out : {None, FrequencySeries} The output array to save the decompressed waveform to. If this contains slots for frequencies > the maximum frequency in sample_frequencies, the rest of the values are zeroed. If not provided, must provide a df. df : {None, float} The frequency step to use for the decompressed waveform. Must be provided if out is None. f_lower : {None, float} The frequency to start the decompression at. If None, will use whatever the lowest frequency is in sample_frequencies. All values at frequencies less than this will be 0 in the decompressed waveform. interpolation : {'linear', str} The interpolation to use for the amplitude and phase. Default is 'linear'. If 'linear' a custom interpolater is used. Otherwise, ``scipy.interpolate.interp1d`` is used; for other options, see possible values for that function's ``kind`` argument. Returns ------- out : FrqeuencySeries If out was provided, writes to that array. Otherwise, a new FrequencySeries with the decompressed waveform. """ if out is None: if df is None: raise ValueError("Either provide output memory or a df") flen = int(numpy.ceil(sample_frequencies.max()/df+1)) out = FrequencySeries(numpy.zeros(flen, dtype=numpy.complex128), copy=False, delta_f=df) else: df = out.delta_f flen = len(out) if f_lower is None: jmin = 0 f_lower = sample_frequencies[0] else: if f_lower >= sample_frequencies.max(): raise ValueError("f_lower is > than the maximum sample frequency") jmin = int(numpy.searchsorted(sample_frequencies, f_lower)) imin = int(numpy.floor(f_lower/df)) # interpolate the amplitude and the phase if interpolation == "linear": # use custom interpolation sflen = len(sample_frequencies) h = numpy.array(out.data, copy=False) # make sure df is a float df = float(df) code = r""" # include <math.h> # include <stdio.h> int j = jmin-1; double sf = 0.; double A = 0.; double nextA = 0.; double phi = 0.; double nextPhi = 0.; double next_sf = sample_frequencies[jmin]; double f = 0.; double invsdf = 0.; double mAmp = 0.; double bAmp = 0.; double mPhi = 0.; double bPhi = 0.; double interpAmp = 0.; double interpPhi = 0.; // zero-out beginning of array std::fill(h, h+imin, std::complex<double>(0., 0.)); // cycle over desired samples for (int i=imin; i<flen; i++){ f = i*df; if (f >= next_sf){ // update linear interpolations j += 1; // if we have gone beyond the sampled frequencies, just break if ((j+1) == sflen) { // zero-out rest the rest of the array & exit std::fill(h+i, h+flen, std::complex<double>(0., 0.)); break; } sf = (double) sample_frequencies[j]; next_sf = (double) sample_frequencies[j+1]; A = (double) amp[j]; nextA = (double) amp[j+1]; phi = (double) phase[j]; nextPhi = (double) phase[j+1]; invsdf = 1./(next_sf - sf); mAmp = (nextA - A)*invsdf; bAmp = A - mAmp*sf; mPhi = (nextPhi - phi)*invsdf; bPhi = phi - mPhi*sf; } interpAmp = mAmp * f + bAmp; interpPhi = mPhi * f + bPhi; h[i] = std::complex<double> (interpAmp*cos(interpPhi), interpAmp*sin(interpPhi)); } """ inline(code, ['flen', 'sflen', 'df', 'sample_frequencies', 'amp', 'phase', 'h', 'imin', 'jmin'], extra_compile_args=[WEAVE_FLAGS + '-march=native -O3 -w'] +\ omp_flags, libraries=omp_libs) else: # use scipy for fancier interpolation outfreq = out.sample_frequencies.numpy() amp_interp = interpolate.interp1d(sample_frequencies, amp, kind=interpolation, bounds_error=False, fill_value=0., assume_sorted=True) phase_interp = interpolate.interp1d(sample_frequencies, phase, kind=interpolation, bounds_error=False, fill_value=0., assume_sorted=True) A = amp_interp(outfreq) phi = phase_interp(outfreq) out.data[:] = A*numpy.cos(phi) + (1j)*A*numpy.sin(phi) return out
def get_fd_lm(template=None, **kwargs): """Return frequency domain lm mode with a given number of overtones. Parameters ---------- template: object An object that has attached properties. This can be used to substitute for keyword arguments. A common example would be a row in an xml table. freqs : dict {lmn:f_lmn} Dictionary of the central frequencies for each overtone, as many as number of modes. taus : dict {lmn:tau_lmn} Dictionary of the damping times for each overtone, as many as number of modes. l : int l mode (lm modes available: 22, 21, 33, 44, 55). m : int m mode (lm modes available: 22, 21, 33, 44, 55). nmodes: int Number of overtones desired (maximum n=8) amplmn : float Amplitude of the lmn overtone, as many as the number of nmodes. philmn : float Phase of the lmn overtone, as many as the number of modes. Should also include the information from the azimuthal angle (phi + m*Phi). inclination : {0., float}, optional Inclination of the system in radians. Default is 0 (face on). delta_f : {None, float}, optional The frequency step used to generate the ringdown. If None, it will be set to the inverse of the time at which the amplitude is 1/1000 of the peak amplitude (the minimum of all modes). f_lower: {None, float}, optional The starting frequency of the output frequency series. If None, it will be set to delta_f. f_final : {None, float}, optional The ending frequency of the output frequency series. If None, it will be set to the frequency at which the amplitude is 1/1000 of the peak amplitude (the maximum of all modes). Returns ------- hplustilde: FrequencySeries The plus phase of a lm mode with n overtones in frequency domain. hcrosstilde: FrequencySeries The cross phase of a lm mode with n overtones in frequency domain. """ input_params = props(template, lm_required_args, **kwargs) # Get required args amps, phis = lm_amps_phases(**input_params) f_0 = input_params.pop('freqs') tau = input_params.pop('taus') l, m = input_params.pop('l'), input_params.pop('m') inc = input_params.pop('inclination', 0.) nmodes = input_params.pop('nmodes') if int(nmodes) == 0: raise ValueError('Number of overtones (nmodes) must be greater ' 'than zero.') # The following may not be in input_params delta_f = input_params.pop('delta_f', None) f_lower = input_params.pop('f_lower', None) f_final = input_params.pop('f_final', None) if delta_f is None: delta_f = lm_deltaf(tau, ['%d%d%d' %(l,m,nmodes)]) if f_final is None: f_final = lm_ffinal(f_0, tau, ['%d%d%d' %(l, m, nmodes)]) kmax = int(f_final / delta_f) + 1 outplus = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f) outcross = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f) for n in range(nmodes): hplus, hcross = get_fd_qnm(template=None, f_0=f_0['%d%d%d' %(l,m,n)], tau=tau['%d%d%d' %(l,m,n)], amp=amps['%d%d%d' %(l,m,n)], phi=phis['%d%d%d' %(l,m,n)], inclination=inc, l=l, m=m, delta_f=delta_f, f_lower=f_lower, f_final=f_final) outplus.data += hplus.data outcross.data += hcross.data return outplus, outcross
dec_inj ] snr_list = [] fig = pyplot.figure() ii = 0 colors = {'H1': 'r', 'L1': 'g'} for ifo in ['H1', 'L1']: ii += 1 ax = fig.add_subplot(int('21{}'.format(ii))) print ifo # get the psd print "loading psd" psd = FrequencySeries(fp['{}/psds/0'.format(ifo)][:], delta_f=fp['{}/psds/0'.format(ifo)].attrs['delta_f'])\ /DYN_RANGE_FAC**2 asd = FrequencySeries(numpy.sqrt(psd.numpy()), delta_f=psd.delta_f) # get the strain print "loading strain" stilde = FrequencySeries( fp['{}/stilde'.format(ifo)][:], delta_f=fp['{}/stilde'.format(ifo)].attrs['delta_f'], epoch=fp['{}/stilde'.format(ifo)].attrs['epoch']) print "whitening" wh_stilde = FrequencySeries(stilde / asd, delta_f=stilde.delta_f, epoch=stilde.epoch) wh_strain = wh_stilde.to_timeseries()
def welch(timeseries, seg_len=4096, seg_stride=2048, window='hann', avg_method='median', num_segments=None, require_exact_data_fit=False): """PSD estimator based on Welch's method. Parameters ---------- timeseries : TimeSeries Time series for which the PSD is to be estimated. seg_len : int Segment length in samples. seg_stride : int Separation between consecutive segments, in samples. window : {'hann', numpy.ndarray} Function used to window segments before Fourier transforming, or a `numpy.ndarray` that specifies the window. avg_method : {'median', 'mean', 'median-mean'} Method used for averaging individual segment PSDs. Returns ------- psd : FrequencySeries Frequency series containing the estimated PSD. Raises ------ ValueError For invalid choices of `seg_len`, `seg_stride` `window` and `avg_method` and for inconsistent combinations of len(`timeseries`), `seg_len` and `seg_stride`. Notes ----- See arXiv:gr-qc/0509116 for details. """ window_map = {'hann': numpy.hanning} # sanity checks if isinstance(window, numpy.ndarray) and window.size != seg_len: raise ValueError('Invalid window: incorrect window length') if not isinstance(window, numpy.ndarray) and window not in window_map: raise ValueError('Invalid window: unknown window {!r}'.format(window)) if avg_method not in ('mean', 'median', 'median-mean'): raise ValueError('Invalid averaging method') if type(seg_len) is not int or type(seg_stride) is not int \ or seg_len <= 0 or seg_stride <= 0: raise ValueError('Segment length and stride must be positive integers') if timeseries.precision == 'single': fs_dtype = numpy.complex64 elif timeseries.precision == 'double': fs_dtype = numpy.complex128 num_samples = len(timeseries) if num_segments is None: num_segments = int(num_samples // seg_stride) # NOTE: Is this not always true? if (num_segments - 1) * seg_stride + seg_len > num_samples: num_segments -= 1 if not require_exact_data_fit: data_len = (num_segments - 1) * seg_stride + seg_len # Get the correct amount of data if data_len < num_samples: diff = num_samples - data_len start = diff // 2 end = num_samples - diff // 2 # Want this to be integers so if diff is odd, catch it here. if diff % 2: start = start + 1 timeseries = timeseries[start:end] num_samples = len(timeseries) if data_len > num_samples: err_msg = "I was asked to estimate a PSD on %d " % (data_len) err_msg += "data samples. However the data provided only contains " err_msg += "%d data samples." % (num_samples) if num_samples != (num_segments - 1) * seg_stride + seg_len: raise ValueError('Incorrect choice of segmentation parameters') if not isinstance(window, numpy.ndarray): window = window_map[window](seg_len) w = Array(window.astype(timeseries.dtype)) # calculate psd of each segment delta_f = 1. / timeseries.delta_t / seg_len segment_tilde = FrequencySeries(numpy.zeros(seg_len / 2 + 1), \ delta_f=delta_f, dtype=fs_dtype) segment_psds = [] for i in xrange(num_segments): segment_start = i * seg_stride segment_end = segment_start + seg_len segment = timeseries[segment_start:segment_end] assert len(segment) == seg_len fft(segment * w, segment_tilde) seg_psd = abs(segment_tilde * segment_tilde.conj()).numpy() #halve the DC and Nyquist components to be consistent with TO10095 seg_psd[0] /= 2 seg_psd[-1] /= 2 segment_psds.append(seg_psd) segment_psds = numpy.array(segment_psds) if avg_method == 'mean': psd = numpy.mean(segment_psds, axis=0) elif avg_method == 'median': psd = numpy.median(segment_psds, axis=0) / median_bias(num_segments) elif avg_method == 'median-mean': odd_psds = segment_psds[::2] even_psds = segment_psds[1::2] odd_median = numpy.median(odd_psds, axis=0) / \ median_bias(len(odd_psds)) even_median = numpy.median(even_psds, axis=0) / \ median_bias(len(even_psds)) psd = (odd_median + even_median) / 2 psd *= 2 * delta_f * seg_len / (w * w).sum() return FrequencySeries(psd, delta_f=delta_f, dtype=timeseries.dtype, epoch=timeseries.start_time)
def inverse_spectrum_truncation(psd, max_filter_len, low_frequency_cutoff=None, trunc_method=None): """Modify a PSD such that the impulse response associated with its inverse square root is no longer than `max_filter_len` time samples. In practice this corresponds to a coarse graining or smoothing of the PSD. Parameters ---------- psd : FrequencySeries PSD whose inverse spectrum is to be truncated. max_filter_len : int Maximum length of the time-domain filter in samples. low_frequency_cutoff : {None, int} Frequencies below `low_frequency_cutoff` are zeroed in the output. trunc_method : {None, 'hann'} Function used for truncating the time-domain filter. None produces a hard truncation at `max_filter_len`. Returns ------- psd : FrequencySeries PSD whose inverse spectrum has been truncated. Raises ------ ValueError For invalid types or values of `max_filter_len` and `low_frequency_cutoff`. Notes ----- See arXiv:gr-qc/0509116 for details. """ # sanity checks if type(max_filter_len) is not int or max_filter_len <= 0: raise ValueError('max_filter_len must be a positive integer') if low_frequency_cutoff is not None and low_frequency_cutoff < 0 \ or low_frequency_cutoff > psd.sample_frequencies[-1]: raise ValueError( 'low_frequency_cutoff must be within the bandwidth of the PSD') N = (len(psd) - 1) * 2 inv_asd = FrequencySeries((1. / psd)**0.5, delta_f=psd.delta_f, \ dtype=complex_same_precision_as(psd)) inv_asd[0] = 0 inv_asd[N / 2] = 0 q = TimeSeries(numpy.zeros(N), delta_t=(N / psd.delta_f), \ dtype=real_same_precision_as(psd)) if low_frequency_cutoff: kmin = int(low_frequency_cutoff / psd.delta_f) inv_asd[0:kmin] = 0 ifft(inv_asd, q) trunc_start = max_filter_len / 2 trunc_end = N - max_filter_len / 2 if trunc_end < trunc_start: raise ValueError('Invalid value in inverse_spectrum_truncation') if trunc_method == 'hann': trunc_window = Array(numpy.hanning(max_filter_len), dtype=q.dtype) q[0:trunc_start] *= trunc_window[max_filter_len / 2:max_filter_len] q[trunc_end:N] *= trunc_window[0:max_filter_len / 2] if trunc_start < trunc_end: q[trunc_start:trunc_end] = 0 psd_trunc = FrequencySeries(numpy.zeros(len(psd)), delta_f=psd.delta_f, \ dtype=complex_same_precision_as(psd)) fft(q, psd_trunc) psd_trunc *= psd_trunc.conj() psd_out = 1. / abs(psd_trunc) return psd_out
def fd_decompress(amp, phase, sample_frequencies, out=None, df=None, f_lower=None, interpolation='linear'): """Decompresses an FD waveform using the given amplitude, phase, and the frequencies at which they are sampled at. Parameters ---------- amp : array The amplitude of the waveform at the sample frequencies. phase : array The phase of the waveform at the sample frequencies. sample_frequencies : array The frequency (in Hz) of the waveform at the sample frequencies. out : {None, FrequencySeries} The output array to save the decompressed waveform to. If this contains slots for frequencies > the maximum frequency in sample_frequencies, the rest of the values are zeroed. If not provided, must provide a df. df : {None, float} The frequency step to use for the decompressed waveform. Must be provided if out is None. f_lower : {None, float} The frequency to start the decompression at. If None, will use whatever the lowest frequency is in sample_frequencies. All values at frequencies less than this will be 0 in the decompressed waveform. interpolation : {'linear', str} The interpolation to use for the amplitude and phase. Default is 'linear'. If 'linear' a custom interpolater is used. Otherwise, ``scipy.interpolate.interp1d`` is used; for other options, see possible values for that function's ``kind`` argument. Returns ------- out : FrqeuencySeries If out was provided, writes to that array. Otherwise, a new FrequencySeries with the decompressed waveform. """ if out is None: if df is None: raise ValueError("Either provide output memory or a df") flen = int(numpy.ceil(sample_frequencies.max() / df + 1)) out = FrequencySeries(numpy.zeros(flen, dtype=numpy.complex128), copy=False, delta_f=df) else: df = out.delta_f flen = len(out) if f_lower is None: jmin = 0 f_lower = sample_frequencies[0] else: if f_lower >= sample_frequencies.max(): raise ValueError("f_lower is > than the maximum sample frequency") jmin = int(numpy.searchsorted(sample_frequencies, f_lower)) imin = int(numpy.floor(f_lower / df)) # interpolate the amplitude and the phase if interpolation == "linear": # use custom interpolation sflen = len(sample_frequencies) h = numpy.array(out.data, copy=False) # make sure df is a float df = float(df) code = r""" # include <math.h> # include <stdio.h> int j = jmin-1; double sf = 0.; double A = 0.; double nextA = 0.; double phi = 0.; double nextPhi = 0.; double next_sf = sample_frequencies[jmin]; double f = 0.; double invsdf = 0.; double mAmp = 0.; double bAmp = 0.; double mPhi = 0.; double bPhi = 0.; double interpAmp = 0.; double interpPhi = 0.; // zero-out beginning of array std::fill(h, h+imin, std::complex<double>(0., 0.)); // cycle over desired samples for (int i=imin; i<flen; i++){ f = i*df; if (f >= next_sf){ // update linear interpolations j += 1; // if we have gone beyond the sampled frequencies, just break if ((j+1) == sflen) { // zero-out rest the rest of the array & exit std::fill(h+i, h+flen, std::complex<double>(0., 0.)); break; } sf = (double) sample_frequencies[j]; next_sf = (double) sample_frequencies[j+1]; A = (double) amp[j]; nextA = (double) amp[j+1]; phi = (double) phase[j]; nextPhi = (double) phase[j+1]; invsdf = 1./(next_sf - sf); mAmp = (nextA - A)*invsdf; bAmp = A - mAmp*sf; mPhi = (nextPhi - phi)*invsdf; bPhi = phi - mPhi*sf; } interpAmp = mAmp * f + bAmp; interpPhi = mPhi * f + bPhi; h[i] = std::complex<double> (interpAmp*cos(interpPhi), interpAmp*sin(interpPhi)); } """ inline(code, ['flen', 'sflen', 'df', 'sample_frequencies', 'amp', 'phase', 'h', 'imin', 'jmin'], extra_compile_args=[WEAVE_FLAGS + '-march=native -O3 -w'] +\ omp_flags, libraries=omp_libs) else: # use scipy for fancier interpolation outfreq = out.sample_frequencies.numpy() amp_interp = interpolate.interp1d(sample_frequencies, amp, kind=interpolation, bounds_error=False, fill_value=0., assume_sorted=True) phase_interp = interpolate.interp1d(sample_frequencies, phase, kind=interpolation, bounds_error=False, fill_value=0., assume_sorted=True) A = amp_interp(outfreq) phi = phase_interp(outfreq) out.data[:] = A * numpy.cos(phi) + (1j) * A * numpy.sin(phi) return out
def adjust_strain(self, strain, delta_fs=None, delta_qinv=None, delta_fc=None, kappa_c=1.0, kappa_tst_re=1.0, kappa_tst_im=0.0, kappa_pu_re=1.0, kappa_pu_im=0.0): """Adjust the FrequencySeries strain by changing the time-dependent calibration parameters kappa_c(t), kappa_a(t), f_c(t), fs, and qinv. Parameters ---------- strain : FrequencySeries The strain data to be adjusted. delta_fc : float Change in coupled-cavity (CC) pole at time t. kappa_c : float Scalar correction factor for sensing function c0 at time t. kappa_tst_re : float Real part of scalar correction factor for actuation function A_{tst0} at time t. kappa_tst_im : float Imaginary part of scalar correction factor for actuation function A_tst0 at time t. kappa_pu_re : float Real part of scalar correction factor for actuation function A_{pu0} at time t. kappa_pu_im : float Imaginary part of scalar correction factor for actuation function A_{pu0} at time t. fs : float Spring frequency for signal recycling cavity. qinv : float Inverse quality factor for signal recycling cavity. Returns ------- strain_adjusted : FrequencySeries The adjusted strain. """ fc = self.fc0 + delta_fc if delta_fc else self.fc0 fs = self.fs0 + delta_fs if delta_fs else self.fs0 qinv = self.qinv0 + delta_qinv if delta_qinv else self.qinv0 # calculate adjusted response function r_adjusted = self.update_r(fs=fs, qinv=qinv, fc=fc, kappa_c=kappa_c, kappa_tst_re=kappa_tst_re, kappa_tst_im=kappa_tst_im, kappa_pu_re=kappa_pu_re, kappa_pu_im=kappa_pu_im) # calculate error function k = r_adjusted / self.r0 # decompose into amplitude and unwrapped phase k_amp = numpy.abs(k) k_phase = numpy.unwrap(numpy.angle(k)) # convert to FrequencySeries by interpolating then resampling order = 1 k_amp_off = UnivariateSpline(self.freq, k_amp, k=order, s=0) k_phase_off = UnivariateSpline(self.freq, k_phase, k=order, s=0) freq_even = strain.sample_frequencies.numpy() k_even_sample = k_amp_off(freq_even) * \ numpy.exp(1.0j * k_phase_off(freq_even)) strain_adjusted = FrequencySeries(strain.numpy() * \ k_even_sample, delta_f=strain.delta_f) return strain_adjusted
m1 = m2 = 1.4 mchirp = float(pycbc.conversions.mchirp_from_mass1_mass2(m1, m2)) eta = float(pycbc.conversions.eta_from_mass1_mass2(m1, m2)) hp = numpy.zeros(flen, dtype=numpy.complex128) hc = hp.copy() f = lib.generate f.argtypes = [ c_void_p, c_void_p, c_double, c_double, c_double, c_double, c_double, c_double, c_double, c_double, c_double ] _ = f(hp.ctypes.data, hc.ctypes.data, mchirp, eta, inc, ecc, lamc, lc, dist, fend, df) import pylab # it appears that the plus / cross data is time inverted hp = FrequencySeries(hp.conj(), delta_f=df, epoch=-int(1.0 / df)) hc = FrequencySeries(hc.conj(), delta_f=df, epoch=-int(1.0 / df)) kmin = int(flow / df) hp[:kmin].clear() hc[:kmin].clear() t = hp.to_timeseries() pylab.plot(t.sample_times, t) #pylab.xscale('log') pylab.show()
def calculate_acf(data, delta_t=1.0, unbiased=False): r"""Calculates the one-sided autocorrelation function. Calculates the autocorrelation function (ACF) and returns the one-sided ACF. The ACF is defined as the autocovariance divided by the variance. The ACF can be estimated using .. math:: \hat{R}(k) = \frac{1}{n \sigma^{2}} \sum_{t=1}^{n-k} \left( X_{t} - \mu \right) \left( X_{t+k} - \mu \right) Where :math:`\hat{R}(k)` is the ACF, :math:`X_{t}` is the data series at time t, :math:`\mu` is the mean of :math:`X_{t}`, and :math:`\sigma^{2}` is the variance of :math:`X_{t}`. Parameters ----------- data : TimeSeries or numpy.array A TimeSeries or numpy.array of data. delta_t : float The time step of the data series if it is not a TimeSeries instance. unbiased : bool If True the normalization of the autocovariance function is n-k instead of n. This is called the unbiased estimation of the autocovariance. Note that this does not mean the ACF is unbiased. Returns ------- acf : numpy.array If data is a TimeSeries then acf will be a TimeSeries of the one-sided ACF. Else acf is a numpy.array. """ # if given a TimeSeries instance then get numpy.array if isinstance(data, TimeSeries): y = data.numpy() delta_t = data.delta_t else: y = data # Zero mean y = y - y.mean() ny_orig = len(y) npad = 1 while npad < 2*ny_orig: npad = npad << 1 ypad = numpy.zeros(npad) ypad[:ny_orig] = y # FFT data minus the mean fdata = TimeSeries(ypad, delta_t=delta_t).to_frequencyseries() # correlate # do not need to give the congjugate since correlate function does it cdata = FrequencySeries(zeros(len(fdata), dtype=fdata.dtype), delta_f=fdata.delta_f, copy=False) correlate(fdata, fdata, cdata) # IFFT correlated data to get unnormalized autocovariance time series acf = cdata.to_timeseries() acf = acf[:ny_orig] # normalize the autocovariance # note that dividing by acf[0] is the same as ( y.var() * len(acf) ) if unbiased: acf /= ( y.var() * numpy.arange(len(acf), 0, -1) ) else: acf /= acf[0] # return input datatype if isinstance(data, TimeSeries): return TimeSeries(acf, delta_t=delta_t) else: return acf
def template_segment_checker(self, bank, t_num, segment, start_time): """Test if injections in segment are worth filtering with template. Using the current template, current segment, and injections within that segment. Test if the injections and sufficiently "similar" to any of the injections to justify actually performing a matched-filter call. Ther are two parts to this test: First we check if the chirp time of the template is within a provided window of any of the injections. If not then stop here, it is not worth filtering this template, segment combination for this injection set. If this check passes we compute a match between a coarse representation of the template and a coarse representation of each of the injections. If that match is above a user-provided value for any of the injections then filtering can proceed. This is currently only available if using frequency-domain templates. Parameters ----------- FIXME Returns -------- FIXME """ if not self.enabled: # If disabled, always filter (ie. return True) return True # Get times covered by segment analyze sample_rate = 2. * (len(segment) - 1) * segment.delta_f cum_ind = segment.cumulative_index diff = segment.analyze.stop - segment.analyze.start seg_start_time = cum_ind / sample_rate + start_time seg_end_time = (cum_ind + diff) / sample_rate + start_time # And add buffer seg_start_time = seg_start_time - self.seg_buffer seg_end_time = seg_end_time + self.seg_buffer # Chirp time test if self.chirp_time_window is not None: m1 = bank.table[t_num]['mass1'] m2 = bank.table[t_num]['mass2'] tau0_temp, _ = mass1_mass2_to_tau0_tau3(m1, m2, self.f_lower) for inj in self.injection_params.table: end_time = inj.geocent_end_time + \ 1E-9 * inj.geocent_end_time_ns if not(seg_start_time <= end_time <= seg_end_time): continue tau0_inj, _ = \ mass1_mass2_to_tau0_tau3(inj.mass1, inj.mass2, self.f_lower) tau_diff = abs(tau0_temp - tau0_inj) if tau_diff <= self.chirp_time_window: break else: # Get's here if all injections are outside chirp-time window return False # Coarse match test if self.match_threshold: if self._short_template_mem is None: # Set the memory for the short templates wav_len = 1 + int(self.coarsematch_fmax / self.coarsematch_deltaf) self._short_template_mem = zeros(wav_len, dtype=np.complex64) # Set the current short PSD to red_psd try: red_psd = self._short_psd_storage[id(segment.psd)] except KeyError: # PSD doesn't exist yet, so make it! curr_psd = segment.psd.numpy() step_size = int(self.coarsematch_deltaf / segment.psd.delta_f) max_idx = int(self.coarsematch_fmax / segment.psd.delta_f) + 1 red_psd_data = curr_psd[:max_idx:step_size] red_psd = FrequencySeries(red_psd_data, #copy=False, delta_f=self.coarsematch_deltaf) self._short_psd_storage[id(curr_psd)] = red_psd # Set htilde to be the current short template if not t_num == self._short_template_id: # Set the memory for the short templates if unset if self._short_template_mem is None: wav_len = 1 + int(self.coarsematch_fmax / self.coarsematch_deltaf) self._short_template_mem = zeros(wav_len, dtype=np.complex64) # Generate short waveform htilde = bank.generate_with_delta_f_and_max_freq( t_num, self.coarsematch_fmax, self.coarsematch_deltaf, low_frequency_cutoff=bank.table[t_num].f_lower, cached_mem=self._short_template_mem) self._short_template_id = t_num self._short_template_wav = htilde else: htilde = self._short_template_wav for inj in self.injection_params.table: end_time = inj.geocent_end_time + \ 1E-9 * inj.geocent_end_time_ns if not(seg_start_time < end_time < seg_end_time): continue curr_inj = self.short_injections[inj.simulation_id] o, _ = match(htilde, curr_inj, psd=red_psd, low_frequency_cutoff=self.f_lower) if o > self.match_threshold: break else: # Get's here if all injections are outside match threshold return False return True
def get_fd_qnm(template=None, **kwargs): """Return a frequency domain damped sinusoid. Parameters ---------- template: object An object that has attached properties. This can be used to substitute for keyword arguments. A common example would be a row in an xml table. f_0 : float The ringdown-frequency. tau : float The damping time of the sinusoid. amp : float The amplitude of the ringdown (constant for now). phi : float The initial phase of the ringdown. Should also include the information from the azimuthal angle (phi_0 + m*Phi). inclination : {0., float}, optional Inclination of the system in radians. Default is 0 (face on). l : {2, int}, optional l mode for the spherical harmonics. Default is l=2. m : {2, int}, optional m mode for the spherical harmonics. Default is m=2. t_0 : {0, float}, optional The starting time of the ringdown. delta_f : {None, float}, optional The frequency step used to generate the ringdown. If None, it will be set to the inverse of the time at which the amplitude is 1/1000 of the peak amplitude. f_lower: {None, float}, optional The starting frequency of the output frequency series. If None, it will be set to delta_f. f_final : {None, float}, optional The ending frequency of the output frequency series. If None, it will be set to the frequency at which the amplitude is 1/1000 of the peak amplitude. Returns ------- hplustilde: FrequencySeries The plus phase of the ringdown in frequency domain. hcrosstilde: FrequencySeries The cross phase of the ringdown in frequency domain. """ input_params = props(template, qnm_required_args, **kwargs) f_0 = input_params.pop('f_0') tau = input_params.pop('tau') amp = input_params.pop('amp') phi = input_params.pop('phi') # the following have defaults, and so will be populated t_0 = input_params.pop('t_0') # the following may not be in input_params inc = input_params.pop('inclination', 0.) l = input_params.pop('l', 2) m = input_params.pop('m', 2) delta_f = input_params.pop('delta_f', None) f_lower = input_params.pop('f_lower', None) f_final = input_params.pop('f_final', None) if delta_f is None: delta_f = 1. / qnm_time_decay(tau, 1./1000) if f_lower is None: f_lower = delta_f kmin = 0 else: kmin = int(f_lower / delta_f) if f_final is None: f_final = qnm_freq_decay(f_0, tau, 1./1000) if f_final > max_freq: f_final = max_freq kmax = int(f_final / delta_f) + 1 freqs = numpy.arange(kmin, kmax)*delta_f # FIXME: we are using spin -2 weighted spherical harmonics for now, # when possible switch to spheroidal harmonics. sph_lm = lal.SpinWeightedSphericalHarmonic(inc, 0., -2, l, m).real sph_lminusm = lal.SpinWeightedSphericalHarmonic(inc, 0., -2, l, -m).real spherical_plus = sph_lm + (-1)**l * sph_lminusm spherical_cross = sph_lm - (-1)**l * sph_lminusm denominator = 1 + (4j * pi * freqs * tau) - (4 * pi_sq * ( freqs*freqs - f_0*f_0) * tau*tau) norm = amp * tau / denominator if t_0 != 0: time_shift = numpy.exp(-1j * two_pi * freqs * t_0) norm *= time_shift # Analytical expression for the Fourier transform of the ringdown (damped sinusoid) hp_tilde = norm * spherical_plus * ( (1 + 2j * pi * freqs * tau) * numpy.cos(phi) - two_pi * f_0 * tau * numpy.sin(phi) ) hc_tilde = norm * spherical_cross * ( (1 + 2j * pi * freqs * tau) * numpy.sin(phi) + two_pi * f_0 * tau * numpy.cos(phi) ) hplustilde = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f) hcrosstilde = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f) hplustilde.data[kmin:kmax] = hp_tilde hcrosstilde.data[kmin:kmax] = hc_tilde return hplustilde, hcrosstilde
def spa_tmplt(**kwds): """ Generate a minimal TaylorF2 approximant with optimations for the sin/cos """ # Pull out the input arguments f_lower = kwds['f_lower'] delta_f = kwds['delta_f'] distance = kwds['distance'] mass1 = kwds['mass1'] mass2 = kwds['mass2'] s1z = kwds['spin1z'] s2z = kwds['spin2z'] phase_order = int(kwds['phase_order']) #amplitude_order = int(kwds['amplitude_order']) spin_order = int(kwds['spin_order']) if 'out' in kwds: out = kwds['out'] else: out = None amp_factor = spa_amplitude_factor(mass1=mass1, mass2=mass2) / distance lal_pars = lal.CreateDict() if phase_order != -1: lalsimulation.SimInspiralWaveformParamsInsertPNPhaseOrder( lal_pars, phase_order) if spin_order != -1: lalsimulation.SimInspiralWaveformParamsInsertPNSpinOrder( lal_pars, spin_order) #Calculate the PN terms phasing = lalsimulation.SimInspiralTaylorF2AlignedPhasing( float(mass1), float(mass2), float(s1z), float(s2z), lal_pars) pfaN = phasing.v[0] pfa2 = phasing.v[2] / pfaN pfa3 = phasing.v[3] / pfaN pfa4 = phasing.v[4] / pfaN pfa5 = phasing.v[5] / pfaN pfa6 = (phasing.v[6] - phasing.vlogv[6] * log(4)) / pfaN pfa7 = phasing.v[7] / pfaN pfl5 = phasing.vlogv[5] / pfaN pfl6 = phasing.vlogv[6] / pfaN piM = lal.PI * (mass1 + mass2) * lal.MTSUN_SI kmin = int(f_lower / float(delta_f)) vISCO = 1. / sqrt(6.) fISCO = vISCO * vISCO * vISCO / piM kmax = int(fISCO / delta_f) f_max = ceilpow2(fISCO) n = int(f_max / delta_f) + 1 if not out: htilde = FrequencySeries(zeros(n, dtype=numpy.complex64), delta_f=delta_f, copy=False) else: if type(out) is not Array: raise TypeError("Output must be an instance of Array") if len(out) < kmax: kmax = len(out) if out.dtype != complex64: raise TypeError("Output array is the wrong dtype") htilde = FrequencySeries(out, delta_f=delta_f, copy=False) spa_tmplt_engine(htilde[kmin:kmax], kmin, phase_order, delta_f, piM, pfaN, pfa2, pfa3, pfa4, pfa5, pfl5, pfa6, pfl6, pfa7, amp_factor) return htilde
def get_fd_from_freqtau(template=None, **kwargs): """Return frequency domain ringdown with all the modes specified. Parameters ---------- template: object An object that has attached properties. This can be used to substitute for keyword arguments. A common example would be a row in an xml table. lmns : list Desired lmn modes as strings (lm modes available: 22, 21, 33, 44, 55). The n specifies the number of overtones desired for the corresponding lm pair (maximum n=8). Example: lmns = ['223','331'] are the modes 220, 221, 222, and 330 f_lmn: float Central frequency of the lmn overtone, as many as number of modes. tau_lmn: float Damping time of the lmn overtone, as many as number of modes. amp220 : float Amplitude of the fundamental 220 mode. amplmn : float Fraction of the amplitude of the lmn overtone relative to the fundamental mode, as many as the number of subdominant modes. philmn : float Phase of the lmn overtone, as many as the number of modes. Should also include the information from the azimuthal angle (phi + m*Phi). inclination : {0., float}, optional Inclination of the system in radians. Default is 0 (face on). delta_f : {None, float}, optional The frequency step used to generate the ringdown. If None, it will be set to the inverse of the time at which the amplitude is 1/1000 of the peak amplitude (the minimum of all modes). f_lower: {None, float}, optional The starting frequency of the output frequency series. If None, it will be set to delta_f. f_final : {None, float}, optional The ending frequency of the output frequency series. If None, it will be set to the frequency at which the amplitude is 1/1000 of the peak amplitude (the maximum of all modes). Returns ------- hplustilde: FrequencySeries The plus phase of a ringdown with the lm modes specified and n overtones in frequency domain. hcrosstilde: FrequencySeries The cross phase of a ringdown with the lm modes specified and n overtones in frequency domain. """ input_params = props(template, freqtau_required_args, **kwargs) # Get required args f_0, tau = lm_freqs_taus(**input_params) lmns = input_params['lmns'] for lmn in lmns: if int(lmn[2]) == 0: raise ValueError('Number of overtones (nmodes) must be greater ' 'than zero.') # The following may not be in input_params inc = input_params.pop('inclination', 0.) delta_f = input_params.pop('delta_f', None) f_lower = input_params.pop('f_lower', None) f_final = input_params.pop('f_final', None) if delta_f is None: delta_f = lm_deltaf(tau, lmns) if f_final is None: f_final = lm_ffinal(f_0, tau, lmns) if f_lower is None: f_lower = delta_f kmax = int(f_final / delta_f) + 1 outplustilde = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f) outcrosstilde = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f) for lmn in lmns: l, m, nmodes = int(lmn[0]), int(lmn[1]), int(lmn[2]) hplustilde, hcrosstilde = get_fd_lm(freqs=f_0, taus=tau, l=l, m=m, nmodes=nmodes, inclination=inc, delta_f=delta_f, f_lower=f_lower, f_final=f_final, **input_params) outplustilde.data += hplustilde.data outcrosstilde.data += hcrosstilde.data return outplustilde, outcrosstilde
def get_waveform_filter(out, template=None, **kwargs): """Return a frequency domain waveform filter for the specified approximant """ n = len(out) input_params = props(template, **kwargs) if input_params['approximant'] in filter_approximants(_scheme.mgr.state): wav_gen = filter_wav[type(_scheme.mgr.state)] htilde = wav_gen[input_params['approximant']](out=out, **input_params) htilde.resize(n) htilde.chirp_length = get_waveform_filter_length_in_time( **input_params) htilde.length_in_time = htilde.chirp_length return htilde if input_params['approximant'] in fd_approximants(_scheme.mgr.state): wav_gen = fd_wav[type(_scheme.mgr.state)] hp, hc = wav_gen[input_params['approximant']](**input_params) hp.resize(n) out[0:len(hp)] = hp[:] hp = FrequencySeries(out, delta_f=hp.delta_f, copy=False) hp.chirp_length = get_waveform_filter_length_in_time(**input_params) hp.length_in_time = hp.chirp_length return hp elif input_params['approximant'] in td_approximants(_scheme.mgr.state): # N: number of time samples required N = (n - 1) * 2 delta_f = 1.0 / (N * input_params['delta_t']) wav_gen = td_wav[type(_scheme.mgr.state)] hp, hc = wav_gen[input_params['approximant']](**input_params) # taper the time series hp if required if ('taper' in input_params.keys() and \ input_params['taper'] is not None): hp = wfutils.taper_timeseries(hp, input_params['taper'], return_lal=False) # total duration of the waveform tmplt_length = len(hp) * hp.delta_t # for IMR templates the zero of time is at max amplitude (merger) # thus the start time is minus the duration of the template from # lower frequency cutoff to merger, i.e. minus the 'chirp time' tChirp = -float(hp.start_time) # conversion from LIGOTimeGPS hp.resize(N) k_zero = int(hp.start_time / hp.delta_t) hp.roll(k_zero) htilde = FrequencySeries(out, delta_f=delta_f, copy=False) fft(hp.astype(real_same_precision_as(htilde)), htilde) htilde.length_in_time = tmplt_length htilde.chirp_length = tChirp return htilde else: raise ValueError("Approximant %s not available" % (input_params['approximant']))
def fd_decompress(amp, phase, sample_frequencies, out=None, df=None, f_lower=None, interpolation='linear'): """Decompresses an FD waveform using the given amplitude, phase, and the frequencies at which they are sampled at. Parameters ---------- amp : array The amplitude of the waveform at the sample frequencies. phase : array The phase of the waveform at the sample frequencies. sample_frequencies : array The frequency (in Hz) of the waveform at the sample frequencies. out : {None, FrequencySeries} The output array to save the decompressed waveform to. If this contains slots for frequencies > the maximum frequency in sample_frequencies, the rest of the values are zeroed. If not provided, must provide a df. df : {None, float} The frequency step to use for the decompressed waveform. Must be provided if out is None. f_lower : {None, float} The frequency to start the decompression at. If None, will use whatever the lowest frequency is in sample_frequencies. All values at frequencies less than this will be 0 in the decompressed waveform. interpolation : {'linear', str} The interpolation to use for the amplitude and phase. Default is 'linear'. If 'linear' a custom interpolater is used. Otherwise, ``scipy.interpolate.interp1d`` is used; for other options, see possible values for that function's ``kind`` argument. Returns ------- out : FrqeuencySeries If out was provided, writes to that array. Otherwise, a new FrequencySeries with the decompressed waveform. """ precision = _precision_map[sample_frequencies.dtype.name] if _precision_map[amp.dtype.name] != precision or \ _precision_map[phase.dtype.name] != precision: raise ValueError("amp, phase, and sample_points must all have the " "same precision") if out is None: if df is None: raise ValueError("Either provide output memory or a df") hlen = int(numpy.ceil(sample_frequencies.max()/df+1)) out = FrequencySeries(numpy.zeros(hlen, dtype=_complex_dtypes[precision]), copy=False, delta_f=df) else: # check for precision compatibility if out.precision == 'double' and precision == 'single': raise ValueError("cannot cast single precision to double") df = out.delta_f hlen = len(out) if f_lower is None: imin = 0 f_lower = sample_frequencies[0] else: if f_lower >= sample_frequencies.max(): raise ValueError("f_lower is > than the maximum sample frequency") imin = int(numpy.searchsorted(sample_frequencies, f_lower)) start_index = int(numpy.floor(f_lower/df)) # interpolate the amplitude and the phase if interpolation == "linear": if precision == 'single': code = _linear_decompress_code32 else: code = _linear_decompress_code # use custom interpolation sflen = len(sample_frequencies) h = numpy.array(out.data, copy=False) delta_f = float(df) inline(code, ['h', 'hlen', 'sflen', 'delta_f', 'sample_frequencies', 'amp', 'phase', 'start_index', 'imin'], extra_compile_args=[WEAVE_FLAGS + '-march=native -O3 -w'] +\ omp_flags, libraries=omp_libs) else: # use scipy for fancier interpolation outfreq = out.sample_frequencies.numpy() amp_interp = interpolate.interp1d(sample_frequencies.numpy(), amp.numpy(), kind=interpolation, bounds_error=False, fill_value=0., assume_sorted=True) phase_interp = interpolate.interp1d(sample_frequencies.numpy(), phase.numpy(), kind=interpolation, bounds_error=False, fill_value=0., assume_sorted=True) A = amp_interp(outfreq) phi = phase_interp(outfreq) out.data[:] = A*numpy.cos(phi) + (1j)*A*numpy.sin(phi) return out
def __init__(self, low_frequency_cutoff, high_frequency_cutoff, snr_threshold, tlen, delta_f, dtype, segment_list, template_output, use_cluster, downsample_factor=1, upsample_threshold=1, upsample_method='pruned_fft', gpu_callback_method='none', cluster_function='symmetric'): """ Create a matched filter engine. Parameters ---------- low_frequency_cutoff : {None, float}, optional The frequency to begin the filter calculation. If None, begin at the first frequency after DC. high_frequency_cutoff : {None, float}, optional The frequency to stop the filter calculation. If None, continue to the the nyquist frequency. snr_threshold : float The minimum snr to return when filtering segment_list : list List of FrequencySeries that are the Fourier-transformed data segments template_output : complex64 Array of memory given as the 'out' parameter to waveform.FilterBank use_cluster : boolean If true, cluster triggers above threshold using a window; otherwise, only apply a threshold. downsample_factor : {1, int}, optional The factor by which to reduce the sample rate when doing a heirarchical matched filter upsample_threshold : {1, float}, optional The fraction of the snr_threshold to trigger on the subsampled filter. upsample_method : {pruned_fft, str} The method to upsample or interpolate the reduced rate filter. cluster_function : {symmetric, str}, optional Which method is used to cluster triggers over time. If 'findchirp', a sliding forward window; if 'symmetric', each window's peak is compared to the windows before and after it, and only kept as a trigger if larger than both. """ # Assuming analysis time is constant across templates and segments, also # delta_f is constant across segments. self.tlen = tlen self.flen = self.tlen / 2 + 1 self.delta_f = delta_f self.dtype = dtype self.snr_threshold = snr_threshold self.flow = low_frequency_cutoff self.fhigh = high_frequency_cutoff self.gpu_callback_method = gpu_callback_method if cluster_function not in ['symmetric', 'findchirp']: raise ValueError( "MatchedFilter: 'cluster_function' must be either 'symmetric' or 'findchirp'" ) self.cluster_function = cluster_function self.segments = segment_list self.htilde = template_output if downsample_factor == 1: self.snr_mem = zeros(self.tlen, dtype=self.dtype) self.corr_mem = zeros(self.tlen, dtype=self.dtype) if use_cluster and (cluster_function == 'symmetric'): self.matched_filter_and_cluster = self.full_matched_filter_and_cluster_symm # setup the threasholding/clustering operations for each segment self.threshold_and_clusterers = [] for seg in self.segments: thresh = events.ThresholdCluster(self.snr_mem[seg.analyze]) self.threshold_and_clusterers.append(thresh) elif use_cluster and (cluster_function == 'findchirp'): self.matched_filter_and_cluster = self.full_matched_filter_and_cluster_fc else: self.matched_filter_and_cluster = self.full_matched_filter_thresh_only # Assuming analysis time is constant across templates and segments, also # delta_f is constant across segments. self.kmin, self.kmax = get_cutoff_indices(self.flow, self.fhigh, self.delta_f, self.tlen) # Set up the correlation operations for each analysis segment corr_slice = slice(self.kmin, self.kmax) self.correlators = [] for seg in self.segments: corr = Correlator(self.htilde[corr_slice], seg[corr_slice], self.corr_mem[corr_slice]) self.correlators.append(corr) # setup up the ifft we will do self.ifft = IFFT(self.corr_mem, self.snr_mem) elif downsample_factor >= 1: self.matched_filter_and_cluster = self.heirarchical_matched_filter_and_cluster self.downsample_factor = downsample_factor self.upsample_method = upsample_method self.upsample_threshold = upsample_threshold N_full = self.tlen N_red = N_full / downsample_factor self.kmin_full, self.kmax_full = get_cutoff_indices( self.flow, self.fhigh, self.delta_f, N_full) self.kmin_red, _ = get_cutoff_indices(self.flow, self.fhigh, self.delta_f, N_red) if self.kmax_full < N_red: self.kmax_red = self.kmax_full else: self.kmax_red = N_red - 1 self.snr_mem = zeros(N_red, dtype=self.dtype) self.corr_mem_full = FrequencySeries(zeros(N_full, dtype=self.dtype), delta_f=self.delta_f) self.corr_mem = Array(self.corr_mem_full[0:N_red], copy=False) self.inter_vec = zeros(N_full, dtype=self.dtype) else: raise ValueError("Invalid downsample factor")
def get_two_pol_waveform_filter(outplus, outcross, template, **kwargs): """Return a frequency domain waveform filter for the specified approximant. Unlike get_waveform_filter this function returns both h_plus and h_cross components of the waveform, which are needed for searches where h_plus and h_cross are not related by a simple phase shift. """ n = len(outplus) # If we don't have an inclination column alpha3 might be used if not hasattr(template, 'inclination') and 'inclination' not in kwargs: if hasattr(template, 'alpha3'): kwargs['inclination'] = template.alpha3 input_params = props(template, **kwargs) if input_params['approximant'] in fd_approximants(_scheme.mgr.state): wav_gen = fd_wav[type(_scheme.mgr.state)] hp, hc = wav_gen[input_params['approximant']](**input_params) hp.resize(n) hc.resize(n) outplus[0:len(hp)] = hp[:] hp = FrequencySeries(outplus, delta_f=hp.delta_f, copy=False) outcross[0:len(hc)] = hc[:] hc = FrequencySeries(outcross, delta_f=hc.delta_f, copy=False) hp.chirp_length = get_waveform_filter_length_in_time(**input_params) hp.length_in_time = hp.chirp_length hc.chirp_length = hp.chirp_length hc.length_in_time = hp.length_in_time return hp, hc elif input_params['approximant'] in td_approximants(_scheme.mgr.state): # N: number of time samples required N = (n-1)*2 delta_f = 1.0 / (N * input_params['delta_t']) wav_gen = td_wav[type(_scheme.mgr.state)] hp, hc = wav_gen[input_params['approximant']](**input_params) # taper the time series hp if required if 'taper' in input_params.keys() and \ input_params['taper'] is not None: hp = wfutils.taper_timeseries(hp, input_params['taper'], return_lal=False) hc = wfutils.taper_timeseries(hc, input_params['taper'], return_lal=False) # total duration of the waveform tmplt_length = len(hp) * hp.delta_t # for IMR templates the zero of time is at max amplitude (merger) # thus the start time is minus the duration of the template from # lower frequency cutoff to merger, i.e. minus the 'chirp time' tChirp = - float( hp.start_time ) # conversion from LIGOTimeGPS hp.resize(N) hc.resize(N) k_zero = int(hp.start_time / hp.delta_t) hp.roll(k_zero) hc.roll(k_zero) hp_tilde = FrequencySeries(outplus, delta_f=delta_f, copy=False) hc_tilde = FrequencySeries(outcross, delta_f=delta_f, copy=False) fft(hp.astype(real_same_precision_as(hp_tilde)), hp_tilde) fft(hc.astype(real_same_precision_as(hc_tilde)), hc_tilde) hp_tilde.length_in_time = tmplt_length hp_tilde.chirp_length = tChirp hc_tilde.length_in_time = tmplt_length hc_tilde.chirp_length = tChirp return hp_tilde, hc_tilde else: raise ValueError("Approximant %s not available" % (input_params['approximant']))
def imrphenomc_tmplt(**kwds): """ Return an IMRPhenomC waveform using CUDA to generate the phase and amplitude Main Paper: arXiv:1005.3306 """ # Pull out the input arguments f_min = float128(kwds['f_lower']) f_max = float128(kwds['f_final']) delta_f = float128(kwds['delta_f']) distance = float128(kwds['distance']) mass1 = float128(kwds['mass1']) mass2 = float128(kwds['mass2']) spin1z = float128(kwds['spin1z']) spin2z = float128(kwds['spin2z']) if 'out' in kwds: out = kwds['out'] else: out = None # Calculate binary parameters M = mass1 + mass2 eta = mass1 * mass2 / (M * M) Xi = (mass1 * spin1z / M) + (mass2 * spin2z / M) Xisum = 2.*Xi Xiprod = Xi*Xi Xi2 = Xi*Xi m_sec = M * lal.MTSUN_SI; piM = lal.PI * m_sec; ## The units of distance given as input is taken to pe Mpc. Converting to SI distance *= (1.0e6 * lal.PC_SI / (2. * sqrt(5. / (64.*lal.PI)) * M * lal.MRSUN_SI * M * lal.MTSUN_SI)) # Check if the value of f_max is correctly given, else replace with the fCut # used in the PhenomB code in lalsimulation. The various coefficients come # from Eq.(4.18) of http://arxiv.org/pdf/0710.2335 and # Table I of http://arxiv.org/pdf/0712.0343 if not f_max: f_max = (1.7086 * eta * eta - 0.26592 * eta + 0.28236) / piM # Transform the eta, chi to Lambda parameters, using Eq 5.14, Table II of Main # paper. z101 = -2.417e-03 z102 = -1.093e-03 z111 = -1.917e-02 z110 = 7.267e-02 z120 = -2.504e-01 z201 = 5.962e-01 z202 = -5.600e-02 z211 = 1.520e-01 z210 = -2.970e+00 z220 = 1.312e+01 z301 = -3.283e+01 z302 = 8.859e+00 z311 = 2.931e+01 z310 = 7.954e+01 z320 = -4.349e+02 z401 = 1.619e+02 z402 = -4.702e+01 z411 = -1.751e+02 z410 = -3.225e+02 z420 = 1.587e+03 z501 = -6.320e+02 z502 = 2.463e+02 z511 = 1.048e+03 z510 = 3.355e+02 z520 = -5.115e+03 z601 = -4.809e+01 z602 = -3.643e+02 z611 = -5.215e+02 z610 = 1.870e+03 z620 = 7.354e+02 z701 = 4.149e+00 z702 = -4.070e+00 z711 = -8.752e+01 z710 = -4.897e+01 z720 = 6.665e+02 z801 = -5.472e-02 z802 = 2.094e-02 z811 = 3.554e-01 z810 = 1.151e-01 z820 = 9.640e-01 z901 = -1.235e+00 z902 = 3.423e-01 z911 = 6.062e+00 z910 = 5.949e+00 z920 = -1.069e+01 eta2 = eta*eta Xi2 = Xiprod # Calculate alphas, gamma, deltas from Table II and Eq 5.14 of Main paper a1 = z101 * Xi + z102 * Xi2 + z111 * eta * Xi + z110 * eta + z120 * eta2 a2 = z201 * Xi + z202 * Xi2 + z211 * eta * Xi + z210 * eta + z220 * eta2 a3 = z301 * Xi + z302 * Xi2 + z311 * eta * Xi + z310 * eta + z320 * eta2 a4 = z401 * Xi + z402 * Xi2 + z411 * eta * Xi + z410 * eta + z420 * eta2 a5 = z501 * Xi + z502 * Xi2 + z511 * eta * Xi + z510 * eta + z520 * eta2 a6 = z601 * Xi + z602 * Xi2 + z611 * eta * Xi + z610 * eta + z620 * eta2 g1 = z701 * Xi + z702 * Xi2 + z711 * eta * Xi + z710 * eta + z720 * eta2 del1 = z801 * Xi + z802 * Xi2 + z811 * eta * Xi + z810 * eta + z820 * eta2 del2 = z901 * Xi + z902 * Xi2 + z911 * eta * Xi + z910 * eta + z920 * eta2 # Get the spin of the final BH afin = FinalSpin( Xi, eta ) Q = Qa( abs(afin) ) # Get the fRD frd = fRD( abs(afin), M) Mfrd = frd * m_sec # Define the frequencies where SPA->PM->RD f1 = 0.1 * frd Mf1 = m_sec * f1 f2 = frd Mf2 = m_sec * f2 d1 = 0.005 d2 = 0.005 f0 = 0.98 * frd Mf0 = m_sec * f0 d0 = 0.015 # Now use this frequency for calculation of betas # calculate beta1 and beta2, that appear in Eq 5.7 in the main paper. b2 = ((-5./3.)* a1 * pow(Mfrd,(-8./3.)) - a2/(Mfrd*Mfrd) - \ (a3/3.)*pow(Mfrd,(-4./3.)) + (2./3.)* a5 * pow(Mfrd,(-1./3.)) + a6)/eta psiPMrd = (a1 * pow(Mfrd,(-5./3.)) + a2/Mfrd + a3 * pow(Mfrd,(-1./3.)) + \ a4 + a5 * pow(Mfrd,(2./3.)) + a6 * Mfrd)/eta b1 = psiPMrd - (b2 * Mfrd) ### Calculate the PN coefficients, Eq A3 - A5 of main paper ### pfaN = 3.0/(128.0 * eta) pfa2 = (3715./756.) + (55.*eta/9.0) pfa3 = -16.0*lal.PI + (113./3.)*Xi - 38.*eta*Xisum/3. pfa4 = (152.93365/5.08032) - 50.*Xi2 + eta*(271.45/5.04 + 1.25*Xiprod) + \ 3085.*eta2/72. pfa5 = lal.PI*(386.45/7.56 - 65.*eta/9.) - \ Xi*(735.505/2.268 + 130.*eta/9.) + Xisum*(1285.0*eta/8.1 + 170.*eta2/9.) - \ 10.*Xi2*Xi/3. + 10.*eta*Xi*Xiprod pfa6 = 11583.231236531/4.694215680 - 640.0*lal.PI*lal.PI/3. - \ 6848.0*lal.GAMMA/21. - 684.8*log(64.)/6.3 + \ eta*(2255.*lal.PI*lal.PI/12. - 15737.765635/3.048192) + \ 76.055*eta2/1.728 - (127.825*eta2*eta/1.296) + \ 2920.*lal.PI*Xi/3. - (175. - 1490.*eta)*Xi2/3. - \ (1120.*lal.PI/3. - 1085.*Xi/3.)*eta*Xisum + \ (269.45*eta/3.36 - 2365.*eta2/6.)*Xiprod pfa6log = -6848./63. pfa7 = lal.PI*(770.96675/2.54016 + 378.515*eta/1.512 - 740.45*eta2/7.56) - \ Xi*(20373.952415/3.048192 + 1509.35*eta/2.24 - 5786.95*eta2/4.32) + \ Xisum*(4862.041225*eta/1.524096 + 1189.775*eta2/1.008 - 717.05*eta2*eta/2.16 - 830.*eta*Xi2/3. + 35.*eta2*Xiprod/3.) - \ 560.*lal.PI*Xi2 + 20.*lal.PI*eta*Xiprod + \ Xi2*Xi*(945.55/1.68 - 85.*eta) + Xi*Xiprod*(396.65*eta/1.68 + 255.*eta2) xdotaN = 64.*eta/5. xdota2 = -7.43/3.36 - 11.*eta/4. xdota3 = 4.*lal.PI - 11.3*Xi/1.2 + 19.*eta*Xisum/6. xdota4 = 3.4103/1.8144 + 5*Xi2 + eta*(13.661/2.016 - Xiprod/8.) + 5.9*eta2/1.8 xdota5 = -lal.PI*(41.59/6.72 + 189.*eta/8.) - Xi*(31.571/1.008 - 116.5*eta/2.4) + \ Xisum*(21.863*eta/1.008 - 79.*eta2/6.) - 3*Xi*Xi2/4. + \ 9.*eta*Xi*Xiprod/4. xdota6 = 164.47322263/1.39708800 - 17.12*lal.GAMMA/1.05 + \ 16.*lal.PI*lal.PI/3 - 8.56*log(16.)/1.05 + \ eta*(45.1*lal.PI*lal.PI/4.8 - 561.98689/2.17728) + \ 5.41*eta2/8.96 - 5.605*eta*eta2/2.592 - 80.*lal.PI*Xi/3. + \ eta*Xisum*(20.*lal.PI/3. - 113.5*Xi/3.6) + \ Xi2*(64.153/1.008 - 45.7*eta/3.6) - \ Xiprod*(7.87*eta/1.44 - 30.37*eta2/1.44) xdota6log = -856./105. xdota7 = -lal.PI*(4.415/4.032 - 358.675*eta/6.048 - 91.495*eta2/1.512) - \ Xi*(252.9407/2.7216 - 845.827*eta/6.048 + 415.51*eta2/8.64) + \ Xisum*(158.0239*eta/5.4432 - 451.597*eta2/6.048 + 20.45*eta2*eta/4.32 + 107.*eta*Xi2/6. - 5.*eta2*Xiprod/24.) + \ 12.*lal.PI*Xi2 - Xi2*Xi*(150.5/2.4 + eta/8.) + \ Xi*Xiprod*(10.1*eta/2.4 + 3.*eta2/8.) AN = 8.*eta*sqrt(lal.PI/5.) A2 = (-107. + 55.*eta)/42. A3 = 2.*lal.PI - 4.*Xi/3. + 2.*eta*Xisum/3. A4 = -2.173/1.512 - eta*(10.69/2.16 - 2.*Xiprod) + 2.047*eta2/1.512 A5 = -10.7*lal.PI/2.1 + eta*(3.4*lal.PI/2.1) A5imag = -24.*eta A6 = 270.27409/6.46800 - 8.56*lal.GAMMA/1.05 + \ 2.*lal.PI*lal.PI/3. + \ eta*(4.1*lal.PI*lal.PI/9.6 - 27.8185/3.3264) - \ 20.261*eta2/2.772 + 11.4635*eta*eta2/9.9792 - \ 4.28*log(16.)/1.05 A6log = -428./105. A6imag = 4.28*lal.PI/1.05 ### Define other parameters needed by waveform generation ### kmin = int(f_min / delta_f) kmax = int(f_max / delta_f) n = kmax + 1; if not out: htilde = FrequencySeries(zeros(n,dtype=numpy.complex128), delta_f=delta_f, copy=False) else: if type(out) is not Array: raise TypeError("Output must be an instance of Array") if len(out) < kmax: raise TypeError("Output array is too small") if out.dtype != complex64: raise TypeError("Output array is the wrong dtype") htilde = FrequencySeries(out, delta_f=delta_f, copy=False) phenomC_kernel(htilde.data[kmin:kmax], kmin, delta_f, eta, Xi, distance, m_sec, piM, Mfrd, pfaN, pfa2, pfa3, pfa4, pfa5, pfa6, pfa6log, pfa7, a1, a2, a3, a4, a5, a6, b1, b2, Mf1, Mf2, Mf0, d1, d2, d0, xdota2, xdota3, xdota4, xdota5, xdota6, xdota6log, xdota7, xdotaN, AN, A2, A3, A4, A5, A5imag, A6, A6log, A6imag, g1, del1, del2, Q ) hp = htilde hc = htilde * 1j return hp, hc
def inverse_spectrum_truncation(psd, max_filter_len, low_frequency_cutoff=None, trunc_method=None): """Modify a PSD such that the impulse response associated with its inverse square root is no longer than `max_filter_len` time samples. In practice this corresponds to a coarse graining or smoothing of the PSD. Parameters ---------- psd : FrequencySeries PSD whose inverse spectrum is to be truncated. max_filter_len : int Maximum length of the time-domain filter in samples. low_frequency_cutoff : {None, int} Frequencies below `low_frequency_cutoff` are zeroed in the output. trunc_method : {None, 'hann'} Function used for truncating the time-domain filter. None produces a hard truncation at `max_filter_len`. Returns ------- psd : FrequencySeries PSD whose inverse spectrum has been truncated. Raises ------ ValueError For invalid types or values of `max_filter_len` and `low_frequency_cutoff`. Notes ----- See arXiv:gr-qc/0509116 for details. """ # sanity checks if type(max_filter_len) is not int or max_filter_len <= 0: raise ValueError('max_filter_len must be a positive integer') if low_frequency_cutoff is not None and low_frequency_cutoff < 0 \ or low_frequency_cutoff > psd.sample_frequencies[-1]: raise ValueError('low_frequency_cutoff must be within the bandwidth of the PSD') N = (len(psd)-1)*2 inv_asd = FrequencySeries((1. / psd)**0.5, delta_f=psd.delta_f, \ dtype=complex_same_precision_as(psd)) inv_asd[0] = 0 inv_asd[N/2] = 0 q = TimeSeries(numpy.zeros(N), delta_t=(N / psd.delta_f), \ dtype=real_same_precision_as(psd)) if low_frequency_cutoff: kmin = int(low_frequency_cutoff / psd.delta_f) inv_asd[0:kmin] = 0 ifft(inv_asd, q) trunc_start = max_filter_len / 2 trunc_end = N - max_filter_len / 2 if trunc_method == 'hann': trunc_window = Array(numpy.hanning(max_filter_len), dtype=q.dtype) q[0:trunc_start] *= trunc_window[max_filter_len/2:max_filter_len] q[trunc_end:N] *= trunc_window[0:max_filter_len/2] q[trunc_start:trunc_end] = 0 psd_trunc = FrequencySeries(numpy.zeros(len(psd)), delta_f=psd.delta_f, \ dtype=complex_same_precision_as(psd)) fft(q, psd_trunc) psd_trunc *= psd_trunc.conj() psd_out = 1. / abs(psd_trunc) return psd_out
filter_delta_f = 1.0 / float(options.filter_signal_length) print("Number of Signal Waveforms: ",len(signal_table)) print("Number of Templates : ",len(template_table)) print("Reading and Interpolating PSD") if options.asd_file: psd = pycbc.psd.read.from_txt(options.asd_file, filter_n, filter_delta_f, options.filter_low_frequency_cutoff) elif options.psd: psd = pycbc.psd.analytic.from_string(options.psd, filter_n, filter_delta_f, options.filter_low_frequency_cutoff) psd *= DYN_RANGE_FAC **2 psd = FrequencySeries(psd,delta_f=psd.delta_f,dtype=float32) with ctx: print("Pregenerating Signals") signals = [] index = 0 for signal_params in signal_table: index += 1 update_progress(index/len(signal_table)) stilde = get_waveform(options.signal_approximant, options.signal_phase_order, options.signal_amplitude_order, signal_params, options.signal_start_frequency, options.filter_sample_rate, filter_N)
def fd_decompress(amp, phase, sample_frequencies, out=None, df=None, f_lower=None, interpolation='inline_linear'): """Decompresses an FD waveform using the given amplitude, phase, and the frequencies at which they are sampled at. Parameters ---------- amp : array The amplitude of the waveform at the sample frequencies. phase : array The phase of the waveform at the sample frequencies. sample_frequencies : array The frequency (in Hz) of the waveform at the sample frequencies. out : {None, FrequencySeries} The output array to save the decompressed waveform to. If this contains slots for frequencies > the maximum frequency in sample_frequencies, the rest of the values are zeroed. If not provided, must provide a df. df : {None, float} The frequency step to use for the decompressed waveform. Must be provided if out is None. f_lower : {None, float} The frequency to start the decompression at. If None, will use whatever the lowest frequency is in sample_frequencies. All values at frequencies less than this will be 0 in the decompressed waveform. interpolation : {'inline_linear', str} The interpolation to use for the amplitude and phase. Default is 'inline_linear'. If 'inline_linear' a custom interpolater is used. Otherwise, ``scipy.interpolate.interp1d`` is used; for other options, see possible values for that function's ``kind`` argument. Returns ------- out : FrequencySeries If out was provided, writes to that array. Otherwise, a new FrequencySeries with the decompressed waveform. """ precision = _precision_map[sample_frequencies.dtype.name] if _precision_map[amp.dtype.name] != precision or \ _precision_map[phase.dtype.name] != precision: raise ValueError("amp, phase, and sample_points must all have the " "same precision") if out is None: if df is None: raise ValueError("Either provide output memory or a df") hlen = int(numpy.ceil(sample_frequencies.max()/df+1)) out = FrequencySeries(numpy.zeros(hlen, dtype=_complex_dtypes[precision]), copy=False, delta_f=df) else: # check for precision compatibility if out.precision == 'double' and precision == 'single': raise ValueError("cannot cast single precision to double") df = out.delta_f hlen = len(out) if f_lower is None: imin = 0 # pylint:disable=unused-variable f_lower = sample_frequencies[0] start_index = 0 else: if f_lower >= sample_frequencies.max(): raise ValueError("f_lower is > than the maximum sample frequency") if f_lower < sample_frequencies.min(): raise ValueError("f_lower is < than the minimum sample frequency") imin = int(numpy.searchsorted(sample_frequencies, f_lower, side='right')) - 1 # pylint:disable=unused-variable start_index = int(numpy.ceil(f_lower/df)) if start_index >= hlen: raise ValueError('requested f_lower >= largest frequency in out') # interpolate the amplitude and the phase if interpolation == "inline_linear": # Call the scheme-dependent function inline_linear_interp(amp, phase, sample_frequencies, out, df, f_lower, imin, start_index) else: # use scipy for fancier interpolation sample_frequencies = numpy.array(sample_frequencies) amp = numpy.array(amp) phase = numpy.array(phase) outfreq = out.sample_frequencies.numpy() amp_interp = interpolate.interp1d(sample_frequencies, amp, kind=interpolation, bounds_error=False, fill_value=0., assume_sorted=True) phase_interp = interpolate.interp1d(sample_frequencies, phase, kind=interpolation, bounds_error=False, fill_value=0., assume_sorted=True) A = amp_interp(outfreq) phi = phase_interp(outfreq) out.data[:] = A*numpy.cos(phi) + (1j)*A*numpy.sin(phi) return out
def calculate_acf(data, delta_t=1.0, unbiased=False): """ Calculates the autocorrelation function (ACF) and returns the one-sided ACF. The ACF is defined as the autocovariance divided by the variance. The ACF can be estimated using \hat{R}(k) = \frac{1}{\left( n \sigma^{2}} \right) \sum_{t=1}^{n-k} \left( X_{t} - \mu \right) \left( X_{t+k} - \mu \right) Where \hat{R}(k) is the ACF, X_{t} is the data series at time t, \mu is the mean of X_{t}, and \sigma^{2} is the variance of X_{t}. Parameters ----------- data : {TimeSeries, numpy.array} A TimeSeries or numpy.array of data. delta_t : float The time step of the data series if it is not a TimeSeries instance. unbiased : bool If True the normalization of the autocovariance function is n-k instead of n. This is called the unbiased estimation of the autocovariance. Note that this does not mean the ACF is unbiased. Returns ------- acf : numpy.array If data is a TimeSeries then acf will be a TimeSeries of the one-sided ACF. Else acf is a numpy.array. """ # if given a TimeSeries instance then get numpy.array if isinstance(data, TimeSeries): y = data.numpy() delta_t = data.delta_t else: y = data # FFT data minus the mean fdata = TimeSeries(y-y.mean(), delta_t=delta_t).to_frequencyseries() # correlate # do not need to give the congjugate since correlate function does it cdata = FrequencySeries(zeros(len(fdata), dtype=numpy.complex64), delta_f=fdata.delta_f, copy=False) correlate(fdata, fdata, cdata) # IFFT correlated data to get unnormalized autocovariance time series acf = cdata.to_timeseries() # normalize the autocovariance # note that dividing by acf[0] is the same as ( y.var() * len(acf) ) if unbiased: acf /= ( y.var() * numpy.arange(len(acf), 0, -1) ) else: acf /= acf[0] # return input datatype if isinstance(data, TimeSeries): return TimeSeries(acf, delta_t=delta_t) else: return acf
def get_waveform(wav, approximant, f_min, dt, N): """This function will generate the waveform corresponding to the point taken as input""" #{{{ m1 = wav.mass1 m2 = wav.mass2 s1x = wav.spin1x s1y = wav.spin1y s1z = wav.spin1z s2x = wav.spin2x s2y = wav.spin2y s2z = wav.spin2z ecc = wav.alpha mean_per_ano = wav.alpha1 long_asc_nodes = wav.alpha2 coa_phase = wav.coa_phase inc = wav.inclination dist = wav.distance df = 1. / (dt * N) f_max = min(1. / (2. * dt), 0.15 / ((m1 + m2) * lal.MTSUN_SI)) if approximant in fd_approximants(): try: hptild, hctild = get_fd_waveform(approximant=approximant, mass1=m1, mass2=m2, spin1x=s1x, spin1y=s1y, spin1z=s1z, spin2x=s2x, spin2y=s2y, spin2z=s2z, eccentricity=ecc, mean_per_ano=mean_per_ano, long_asc_nodes=long_asc_nodes, coa_phase=coa_phase, inclination=inc, distance=dist, f_lower=f_min, f_final=f_max, delta_f=df) except RuntimeError as re: for c in dir(wav): if "__" not in c and "get" not in c and "set" not in c and hasattr( wav, c): print(c, getattr(wav, c)) raise RuntimeError(re) hptilde = FrequencySeries(hptild, delta_f=df, dtype=np.complex128, copy=True) hpref_padded = FrequencySeries(zeros(N / 2 + 1), delta_f=df, dtype=np.complex128, copy=True) hpref_padded[0:len(hptilde)] = hptilde hctilde = FrequencySeries(hctild, delta_f=df, dtype=np.complex128, copy=True) hcref_padded = FrequencySeries(zeros(N / 2 + 1), delta_f=df, dtype=np.complex128, copy=True) hcref_padded[0:len(hctilde)] = hctilde href_padded = generate_detector_strain(wav, hpref_padded, hcref_padded) elif approximant in td_approximants(): #raise IOError("Time domain approximants not supported at the moment..") try: hp, hc = get_td_waveform(approximant=approximant, mass1=m1, mass2=m2, spin1x=s1x, spin1y=s1y, spin1z=s1z, spin2x=s2x, spin2y=s2y, spin2z=s2z, eccentricity=ecc, mean_per_ano=mean_per_ano, long_asc_nodes=long_asc_nodes, coa_phase=coa_phase, inclination=inc, distance=dist, f_lower=f_min, delta_t=dt) except RuntimeError as re: for c in dir(wav): if "__" not in c and "get" not in c and "set" not in c and hasattr( wav, c): print(c, getattr(wav, c)) raise RuntimeError(re) hpref_padded = TimeSeries(zeros(N), delta_t=dt, dtype=hp.dtype, copy=True) hpref_padded[:len(hp)] = hp hcref_padded = TimeSeries(zeros(N), delta_t=dt, dtype=hc.dtype, copy=True) hcref_padded[:len(hc)] = hc href_padded_td = generate_detector_strain(wav, hpref_padded, hcref_padded) href_padded = make_frequency_series(href_padded_td) return href_padded
def get_fd_qnm(template=None, **kwargs): """Return a frequency domain damped sinusoid. Parameters ---------- template: object An object that has attached properties. This can be used to substitute for keyword arguments. A common example would be a row in an xml table. f_0 : float The ringdown-frequency. tau : float The damping time of the sinusoid. t_0 : {0, float}, optional The starting time of the ringdown. phi_0 : {0, float}, optional The initial phase of the ringdown. amp : {1, float}, optional The amplitude of the ringdown (constant for now). delta_f : {None, float}, optional The frequency step used to generate the ringdown. If None, it will be set to the inverse of the time at which the amplitude is 1/1000 of the peak amplitude. f_lower: {None, float}, optional The starting frequency of the output frequency series. If None, it will be set to delta_f. f_final : {None, float}, optional The ending frequency of the output frequency series. If None, it will be set to the frequency at which the amplitude is 1/1000 of the peak amplitude. Returns ------- hplustilde: FrequencySeries The plus phase of the ringdown in frequency domain. hcrosstilde: FrequencySeries The cross phase of the ringdown in frequency domain. """ input_params = props_ringdown(template,**kwargs) # get required args try: f_0 = input_params['f_0'] except KeyError: raise ValueError('f_0 is required') try: tau = input_params['tau'] except KeyError: raise ValueError('tau is required') # get optional args # the following have defaults, and so will be populated t_0 = input_params.pop('t_0') phi_0 = input_params.pop('phi_0') amp = input_params.pop('amp') # the following may not be in input_params delta_f = input_params.pop('delta_f', None) f_lower = input_params.pop('f_lower', None) f_final = input_params.pop('f_final', None) if delta_f is None: delta_f = 1. / qnm_time_decay(tau, 1./1000) if f_lower is None: f_lower = delta_f kmin = 0 else: kmin = int(f_lower / delta_f) if f_final is None: f_final = qnm_freq_decay(f_0, tau, 1./1000) kmax = int(f_final / delta_f) + 1 pi = numpy.pi two_pi = 2 * numpy.pi pi_sq = numpy.pi * numpy.pi freqs = numpy.arange(kmin, kmax)*delta_f denominator = 1 + (4j * pi * freqs * tau) - (4 * pi_sq * ( freqs*freqs - f_0*f_0) * tau*tau) norm = amp * tau / denominator if t_0 != 0: time_shift = numpy.exp(-1j * two_pi * freqs * t_0) norm *= time_shift hp_tilde = norm * ( (1 + 2j * pi * freqs * tau) * numpy.cos(phi_0) - two_pi * f_0 * tau * numpy.sin(phi_0) ) hc_tilde = norm * ( (1 + 2j * pi * freqs * tau) * numpy.sin(phi_0) + two_pi * f_0 * tau * numpy.cos(phi_0) ) hplustilde = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f) hcrosstilde = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f) hplustilde.data[kmin:kmax] = hp_tilde hcrosstilde.data[kmin:kmax] = hc_tilde return hplustilde, hcrosstilde
def get_two_pol_waveform_filter(outplus, outcross, template, **kwargs): """Return a frequency domain waveform filter for the specified approximant. Unlike get_waveform_filter this function returns both h_plus and h_cross components of the waveform, which are needed for searches where h_plus and h_cross are not related by a simple phase shift. """ n = len(outplus) # If we don't have an inclination column alpha3 might be used if not hasattr(template, 'inclination')\ and not kwargs.has_key('inclination'): if hasattr(template, 'alpha3'): kwargs['inclination'] = template.alpha3 input_params = props(template, **kwargs) if input_params['approximant'] in fd_approximants(_scheme.mgr.state): wav_gen = fd_wav[type(_scheme.mgr.state)] hp, hc = wav_gen[input_params['approximant']](**input_params) hp.resize(n) hc.resize(n) outplus[0:len(hp)] = hp[:] hp = FrequencySeries(outplus, delta_f=hp.delta_f, copy=False) outcross[0:len(hc)] = hc[:] hc = FrequencySeries(outcross, delta_f=hc.delta_f, copy=False) hp.chirp_length = get_waveform_filter_length_in_time(**input_params) hp.length_in_time = hp.chirp_length hc.chirp_length = hp.chirp_length hc.length_in_time = hp.length_in_time return hp, hc elif input_params['approximant'] in td_approximants(_scheme.mgr.state): # N: number of time samples required N = (n - 1) * 2 delta_f = 1.0 / (N * input_params['delta_t']) wav_gen = td_wav[type(_scheme.mgr.state)] hp, hc = wav_gen[input_params['approximant']](**input_params) # taper the time series hp if required if ('taper' in input_params.keys() and \ input_params['taper'] is not None): hp = wfutils.taper_timeseries(hp, input_params['taper'], return_lal=False) hc = wfutils.taper_timeseries(hc, input_params['taper'], return_lal=False) # total duration of the waveform tmplt_length = len(hp) * hp.delta_t # for IMR templates the zero of time is at max amplitude (merger) # thus the start time is minus the duration of the template from # lower frequency cutoff to merger, i.e. minus the 'chirp time' tChirp = -float(hp.start_time) # conversion from LIGOTimeGPS hp.resize(N) hc.resize(N) k_zero = int(hp.start_time / hp.delta_t) hp.roll(k_zero) hc.roll(k_zero) hp_tilde = FrequencySeries(outplus, delta_f=delta_f, copy=False) hc_tilde = FrequencySeries(outcross, delta_f=delta_f, copy=False) fft(hp.astype(real_same_precision_as(hp_tilde)), hp_tilde) fft(hc.astype(real_same_precision_as(hc_tilde)), hc_tilde) hp_tilde.length_in_time = tmplt_length hp_tilde.chirp_length = tChirp hc_tilde.length_in_time = tmplt_length hc_tilde.chirp_length = tChirp return hp_tilde, hc_tilde else: raise ValueError("Approximant %s not available" % (input_params['approximant']))
def cbrt_lookup(vmax, delta): vec = numpy.arange(0, vmax * 1.2, delta) return FrequencySeries(vec**(1.0 / 3.0), delta_f=delta).astype(float32)
def logv_lookup(vmax, delta): vec = numpy.arange(0, vmax * 1.2, delta) vec[1:len(vec)] = numpy.log(vec[1:len(vec)]) return FrequencySeries(vec, delta_f=delta).astype(float32)
def get_fd_qnm(template=None, **kwargs): """Return a frequency domain damped sinusoid. Parameters ---------- template: object An object that has attached properties. This can be used to substitute for keyword arguments. A common example would be a row in an xml table. f_0 : float The ringdown-frequency. tau : float The damping time of the sinusoid. amp : float The amplitude of the ringdown (constant for now). phi : float The initial phase of the ringdown. Should also include the information from the azimuthal angle (phi_0 + m*Phi). inclination : {None, float}, optional Inclination of the system in radians for the spherical harmonics. l : {2, int}, optional l mode for the spherical harmonics. Default is l=2. m : {2, int}, optional m mode for the spherical harmonics. Default is m=2. t_0 : {0, float}, optional The starting time of the ringdown. delta_f : {None, float}, optional The frequency step used to generate the ringdown. If None, it will be set to the inverse of the time at which the amplitude is 1/1000 of the peak amplitude. f_lower: {None, float}, optional The starting frequency of the output frequency series. If None, it will be set to delta_f. f_final : {None, float}, optional The ending frequency of the output frequency series. If None, it will be set to the frequency at which the amplitude is 1/1000 of the peak amplitude. Returns ------- hplustilde: FrequencySeries The plus phase of the ringdown in frequency domain. hcrosstilde: FrequencySeries The cross phase of the ringdown in frequency domain. """ input_params = props(template, qnm_required_args, **kwargs) f_0 = input_params.pop('f_0') tau = input_params.pop('tau') amp = input_params.pop('amp') phi = input_params.pop('phi') # the following have defaults, and so will be populated t_0 = input_params.pop('t_0') # the following may not be in input_params inc = input_params.pop('inclination', None) l = input_params.pop('l', 2) m = input_params.pop('m', 2) delta_f = input_params.pop('delta_f', None) f_lower = input_params.pop('f_lower', None) f_final = input_params.pop('f_final', None) if not delta_f: delta_f = 1. / qnm_time_decay(tau, 1./1000) if not f_lower: f_lower = delta_f kmin = 0 else: kmin = int(f_lower / delta_f) if not f_final: f_final = qnm_freq_decay(f_0, tau, 1./1000) if f_final > max_freq: f_final = max_freq kmax = int(f_final / delta_f) + 1 freqs = numpy.arange(kmin, kmax)*delta_f if inc is not None: Y_plus, Y_cross = spher_harms(l, m, inc) else: Y_plus, Y_cross = 1, 1 denominator = 1 + (4j * pi * freqs * tau) - (4 * pi_sq * ( freqs*freqs - f_0*f_0) * tau*tau) norm = amp * tau / denominator if t_0 != 0: time_shift = numpy.exp(-1j * two_pi * freqs * t_0) norm *= time_shift # Analytical expression for the Fourier transform of the ringdown (damped sinusoid) hp_tilde = norm * Y_plus * ( (1 + 2j * pi * freqs * tau) * numpy.cos(phi) - two_pi * f_0 * tau * numpy.sin(phi) ) hc_tilde = norm * Y_cross * ( (1 + 2j * pi * freqs * tau) * numpy.sin(phi) + two_pi * f_0 * tau * numpy.cos(phi) ) outplus = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f) outcross = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f) outplus.data[kmin:kmax] = hp_tilde outcross.data[kmin:kmax] = hc_tilde return outplus, outcross
def tensorboard_writer( queue: mp.Queue, log_dir: str, parameters: List[str], labels: List[str], static_args_ini: str, num_basis: int, val_coefficients: Optional[torch.Tensor]=None, val_gts: Optional[torch.Tensor]=None, figure_titles: Optional[List[str]]=None, ): # suppress luminosity distance debug messages logger = logging.getLogger('bilby') logger.propagate = False logger.setLevel(logging.WARNING) if log_dir is None: tb = SummaryWriter() else: tb = SummaryWriter(log_dir) _, static_args = read_ini_config(static_args_ini) ifos = ('H1', 'L1') interferometers = {'H1': 'Hanford', 'L1': 'Livingston', 'V1': 'Virgo', 'K1': 'KAGRA'} basis_dir = Path('/mnt/datahole/daniel/gravflows/datasets/basis/') basis = SVDBasis(basis_dir, static_args_ini, ifos, preload=False) basis.load(time_translations=False, verbose=False) basis.truncate(num_basis) val_coefficients = val_coefficients.numpy() for j in range(val_coefficients.shape[0]): fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(16, 4)) for i, ifo in enumerate(ifos): reconstruction = val_coefficients[j, i] @ basis.Vh[i] reconstruction = FrequencySeries(reconstruction, delta_f=static_args['delta_f']) strain = reconstruction.to_timeseries(delta_t=static_args['delta_t']) ax.plot(strain.sample_times, strain, label=interferometers[ifo], alpha=0.6) ax.set_title(f'Reconstructed {figure_titles[j]} Strain') ax.set_xlabel('Time (s)') ax.set_ylabel('Strain') # units? ax.set_xlim((static_args['seconds_before_event']-1, static_args['seconds_before_event']+1)) ax.legend(loc='upper left') ax.grid('both') # ax.axvline(static_args['seconds_before_event'], color='r', linestyle='--') # merger time marker # ax.set_xticks([static_args['seconds_before_event']], minor=True) # add low frequency cutoff to ticks # ax.set_xticklabels(['$t_{c}$'], minor=True, color='r') tb.add_figure(f'reconstructions/{figure_titles[j]}', fig) del reconstruction del val_coefficients del basis # bilby setup - specify the output directory and the name of the bilby run result = bilby.result.read_in_result( outdir='bilby_runs/GW150914', label='GW150914' ) bilby_parameters = [ 'mass_1', 'mass_2', 'phase', 'geocent_time', 'luminosity_distance', 'a_1', 'a_2', 'tilt_1', 'tilt_2', 'phi_12', 'phi_jl', 'theta_jn', 'psi', 'ra', 'dec' ] bilby_samples = result.posterior[bilby_parameters].values # # Shift the time of coalescence by the trigger time bilby_samples[:,3] = bilby_samples[:,3] - Merger('GW150914').time bilby_df = pd.DataFrame(bilby_samples.astype(np.float32), columns=bilby_parameters) bilby_df = bilby_df.rename(columns={'luminosity_distance': 'distance', 'geocent_time': 'time'}) bilby_df = bilby_df.loc[:, parameters] domain = [ [10, 80], # mass 1 [10, 80], # mass 2 [0, 2*np.pi], # phase [0, 1], # a_1 [0, 1], # a 2 [0, np.pi], # tilt 1 [0, np.pi], # tilt 2 [0, 2*np.pi], # phi_12 [0, 2*np.pi], # phi_jl [0, np.pi], # theta_jn [0, np.pi], # psi [0, 2*np.pi], # ra [-np.pi/2, np.pi/2], # dec # [0.005,0.055], # tc [100,800], # distance ] cosmoprior = bilby.gw.prior.UniformSourceFrame( name='luminosity_distance', minimum=1e2, maximum=1e3, ) while True: try: epoch, scalars, samples = queue.get() if samples is not None: # requires (batch, samples, parameters) assert len(samples.shape) == 3, "samples must be of shape (batch, samples, parameters)" if figure_titles is not None: # to do - better handling of passing figure info through queue assert samples.shape[0] == len(figure_titles), ( "sample.shape[0] and figure_titles must have matching lengths" ) else: figure_titles = ['']*samples.shape[0] for key, value in scalars.items(): tb.add_scalar(key, value, epoch) if samples is not None: assert isinstance(samples, torch.Tensor) for i in range(samples.shape[0]): fig = plt.figure(figsize=(20,21)) if i == 0: # GW150914 ONLY - hardcoded to first position samples_df = pd.DataFrame(samples[i].numpy(), columns=parameters) weights = cosmoprior.prob(samples_df['distance']) weights = weights / np.mean(weights) corner.corner( bilby_df, fig=fig, labels=labels, levels=[0.5, 0.9], quantiles=[0.25, 0.75], color='tab:orange', scale_hist=True, plot_datapoints=False, ) corner.corner( samples_df, fig=fig, levels=[0.5, 0.9], quantiles=[0.25, 0.75], color='tab:blue', scale_hist=True, plot_datapoints=False, show_titles=True, weights=weights * len(bilby_samples) / len(samples_df), range=domain, ) fig.legend( handles=[ mpatches.Patch(color='tab:blue', label='Neural Spline Flow'), mpatches.Patch(color='tab:orange', label='Bilby (dynesty)')], loc='upper right', fontsize=16, ) else: samples_df = pd.DataFrame(samples[i].numpy(), columns=parameters) weights = cosmoprior.prob(samples_df['distance']) weights = weights / np.mean(weights) corner.corner( samples_df, fig=fig, labels=labels, levels=[0.5, 0.9], quantiles=[0.25, 0.75], color='tab:blue', truth_color='tab:orange', scale_hist=True, plot_datapoints=False, show_titles=True, truths=val_gts[i].numpy() if val_gts is not None else None, weights=weights * len(bilby_samples) / len(samples_df), range=domain, ) fig.legend( handles=[ mpatches.Patch(color='tab:blue', label='Neural Spline Flow'), mpatches.Patch(color='tab:orange', label='Ground Truth')], loc='upper right', fontsize=16, ) fig.suptitle(f'{figure_titles[i]} Parameter Estimation', fontsize=18) # fig.savefig(f'gwpe/figures/{figure_titles[i]}.png') tb.add_figure(f'posteriors/{figure_titles[i]}', fig, epoch) tb.flush() except Exception as e: # warning: assertions may not trigger exception to exit process traceback.print_exc() os.kill(os.getpid(), signal.SIGSTOP) # to do: check kill command
def welch(timeseries, seg_len=4096, seg_stride=2048, window='hann', avg_method='median', num_segments=None, require_exact_data_fit=False): """PSD estimator based on Welch's method. Parameters ---------- timeseries : TimeSeries Time series for which the PSD is to be estimated. seg_len : int Segment length in samples. seg_stride : int Separation between consecutive segments, in samples. window : {'hann'} Function used to window segments before Fourier transforming. avg_method : {'median', 'mean', 'median-mean'} Method used for averaging individual segment PSDs. Returns ------- psd : FrequencySeries Frequency series containing the estimated PSD. Raises ------ ValueError For invalid choices of `seg_len`, `seg_stride` `window` and `avg_method` and for inconsistent combinations of len(`timeseries`), `seg_len` and `seg_stride`. Notes ----- See arXiv:gr-qc/0509116 for details. """ window_map = { 'hann': numpy.hanning } # sanity checks if not window in window_map: raise ValueError('Invalid window') if not avg_method in ('mean', 'median', 'median-mean'): raise ValueError('Invalid averaging method') if type(seg_len) is not int or type(seg_stride) is not int \ or seg_len <= 0 or seg_stride <= 0: raise ValueError('Segment length and stride must be positive integers') if timeseries.precision == 'single': fs_dtype = numpy.complex64 elif timeseries.precision == 'double': fs_dtype = numpy.complex128 num_samples = len(timeseries) if num_segments is None: num_segments = int(num_samples // seg_stride) # NOTE: Is this not always true? if (num_segments - 1) * seg_stride + seg_len > num_samples: num_segments -= 1 if not require_exact_data_fit: data_len = (num_segments - 1) * seg_stride + seg_len # Get the correct amount of data if data_len < num_samples: diff = num_samples - data_len start = diff // 2 end = num_samples - diff // 2 # Want this to be integers so if diff is odd, catch it here. if diff % 2: start = start + 1 timeseries = timeseries[start:end] num_samples = len(timeseries) if data_len > num_samples: err_msg = "I was asked to estimate a PSD on %d " %(data_len) err_msg += "data samples. However the data provided only contains " err_msg += "%d data samples." %(num_samples) if num_samples != (num_segments - 1) * seg_stride + seg_len: raise ValueError('Incorrect choice of segmentation parameters') w = Array(window_map[window](seg_len).astype(timeseries.dtype)) # calculate psd of each segment delta_f = 1. / timeseries.delta_t / seg_len segment_tilde = FrequencySeries(numpy.zeros(seg_len / 2 + 1), \ delta_f=delta_f, dtype=fs_dtype) segment_psds = [] for i in xrange(num_segments): segment_start = i * seg_stride segment_end = segment_start + seg_len segment = timeseries[segment_start:segment_end] assert len(segment) == seg_len fft(segment * w, segment_tilde) seg_psd = abs(segment_tilde * segment_tilde.conj()).numpy() #halve the DC and Nyquist components to be consistent with TO10095 seg_psd[0] /= 2 seg_psd[-1] /= 2 segment_psds.append(seg_psd) segment_psds = numpy.array(segment_psds) if avg_method == 'mean': psd = numpy.mean(segment_psds, axis=0) elif avg_method == 'median': psd = numpy.median(segment_psds, axis=0) / median_bias(num_segments) elif avg_method == 'median-mean': odd_psds = segment_psds[::2] even_psds = segment_psds[1::2] odd_median = numpy.median(odd_psds, axis=0) / \ median_bias(len(odd_psds)) even_median = numpy.median(even_psds, axis=0) / \ median_bias(len(even_psds)) psd = (odd_median + even_median) / 2 psd *= 2 * delta_f * seg_len / (w*w).sum() return FrequencySeries(psd, delta_f=delta_f, dtype=timeseries.dtype, epoch=timeseries.start_time)
def fd_decompress(amp, phase, sample_frequencies, out=None, df=None, f_lower=None, interpolation='linear'): """Decompresses an FD waveform using the given amplitude, phase, and the frequencies at which they are sampled at. Parameters ---------- amp : array The amplitude of the waveform at the sample frequencies. phase : array The phase of the waveform at the sample frequencies. sample_frequencies : array The frequency (in Hz) of the waveform at the sample frequencies. out : {None, FrequencySeries} The output array to save the decompressed waveform to. If this contains slots for frequencies > the maximum frequency in sample_frequencies, the rest of the values are zeroed. If not provided, must provide a df. df : {None, float} The frequency step to use for the decompressed waveform. Must be provided if out is None. f_lower : {None, float} The frequency to start the decompression at. If None, will use whatever the lowest frequency is in sample_frequencies. All values at frequencies less than this will be 0 in the decompressed waveform. interpolation : {'linear', str} The interpolation to use for the amplitude and phase. Default is 'linear'. If 'linear' a custom interpolater is used. Otherwise, ``scipy.interpolate.interp1d`` is used; for other options, see possible values for that function's ``kind`` argument. Returns ------- out : FrqeuencySeries If out was provided, writes to that array. Otherwise, a new FrequencySeries with the decompressed waveform. """ precision = _precision_map[sample_frequencies.dtype.name] if _precision_map[amp.dtype.name] != precision or \ _precision_map[phase.dtype.name] != precision: raise ValueError("amp, phase, and sample_points must all have the " "same precision") if out is None: if df is None: raise ValueError("Either provide output memory or a df") hlen = int(numpy.ceil(sample_frequencies.max() / df + 1)) out = FrequencySeries(numpy.zeros(hlen, dtype=_complex_dtypes[precision]), copy=False, delta_f=df) else: # check for precision compatibility if out.precision == 'double' and precision == 'single': raise ValueError("cannot cast single precision to double") df = out.delta_f hlen = len(out) if f_lower is None: imin = 0 f_lower = sample_frequencies[0] else: if f_lower >= sample_frequencies.max(): raise ValueError("f_lower is > than the maximum sample frequency") imin = int(numpy.searchsorted(sample_frequencies, f_lower)) start_index = int(numpy.floor(f_lower / df)) # interpolate the amplitude and the phase if interpolation == "linear": if precision == 'single': code = _linear_decompress_code32 else: code = _linear_decompress_code # use custom interpolation sflen = len(sample_frequencies) h = numpy.array(out.data, copy=False) delta_f = float(df) inline(code, ['h', 'hlen', 'sflen', 'delta_f', 'sample_frequencies', 'amp', 'phase', 'start_index', 'imin'], extra_compile_args=[WEAVE_FLAGS + '-march=native -O3 -w'] +\ omp_flags, libraries=omp_libs) else: # use scipy for fancier interpolation outfreq = out.sample_frequencies.numpy() amp_interp = interpolate.interp1d(sample_frequencies.numpy(), amp.numpy(), kind=interpolation, bounds_error=False, fill_value=0., assume_sorted=True) phase_interp = interpolate.interp1d(sample_frequencies.numpy(), phase.numpy(), kind=interpolation, bounds_error=False, fill_value=0., assume_sorted=True) A = amp_interp(outfreq) phi = phase_interp(outfreq) out.data[:] = A * numpy.cos(phi) + (1j) * A * numpy.sin(phi) return out