Esempio n. 1
0
    def __init__(self, low_frequency_cutoff, high_frequency_cutoff,
                snr_threshold, tlen, delta_f, dtype):
        """
        Create a matched filter engine.

        Parameters
        ----------
        low_frequency_cutoff : {None, float}, optional
            The frequency to begin the filter calculation. If None, begin
            at the first frequency after DC.
        high_frequency_cutoff : {None, float}, optional
            The frequency to stop the filter calculation. If None, continue
            to the nyquist frequency.
        snr_threshold : float
            The minimum snr to return when filtering
        """
        self.tlen = tlen
        self.delta_f = delta_f
        self.dtype = dtype
        self.snr_threshold = snr_threshold
        self.flow = low_frequency_cutoff
        self.fhigh = high_frequency_cutoff

        self.matched_filter_and_cluster = \
                                    self.full_matched_filter_and_cluster
        self.snr_plus_mem = zeros(self.tlen, dtype=self.dtype)
        self.corr_plus_mem = zeros(self.tlen, dtype=self.dtype)
        self.snr_cross_mem = zeros(self.tlen, dtype=self.dtype)
        self.corr_cross_mem = zeros(self.tlen, dtype=self.dtype)
        self.snr_mem = zeros(self.tlen, dtype=self.dtype)
        self.cached_hplus_hcross_correlation = None
        self.cached_hplus_hcross_hplus = None
        self.cached_hplus_hcross_hcross = None
        self.cached_hplus_hcross_psd = None
Esempio n. 2
0
def inline_linear_interp(amps, phases, freqs, output, df, flow, imin, start_index):
    # Note that imin and start_index are ignored in the GPU code; they are only
    # needed for CPU.
    if output.precision == 'double':
        raise NotImplementedError("Double precision linear interpolation not currently supported on CUDA scheme")
    flow = numpy.float32(flow)
    texlen = numpy.int32(len(freqs))
    fmax = numpy.float32(freqs[texlen-1])
    hlen = numpy.int32(len(output))
    (fn1, fn2, ftex, atex, ptex, nt, nb) = get_dckernel(hlen)
    freqs_gpu = gpuarray.to_gpu(freqs)
    freqs_gpu.bind_to_texref_ext(ftex, allow_offset=False)
    amps_gpu = gpuarray.to_gpu(amps)
    amps_gpu.bind_to_texref_ext(atex, allow_offset=False)
    phases_gpu = gpuarray.to_gpu(phases)
    phases_gpu.bind_to_texref_ext(ptex, allow_offset=False)
    fn1 = fn1.prepared_call
    fn2 = fn2.prepared_call
    df = numpy.float32(df)
    g_out = output.data.gpudata
    lower = zeros(nb, dtype=numpy.int32).data.gpudata
    upper = zeros(nb, dtype=numpy.int32).data.gpudata
    fn1((1, 1), (nb, 1, 1), lower, upper, texlen, df, flow, fmax)
    fn2((nb, 1), (nt, 1, 1), g_out, df, hlen, flow, fmax, texlen, lower, upper)
    pycbc.scheme.mgr.state.context.synchronize()
    return output
Esempio n. 3
0
def make_padded_frequency_series(vec,filter_N=None):
    """Pad a TimeSeries with a length of zeros greater than its length, such
    that the total length is the closest power of 2. This prevents the effects 
    of wraparound.
    """
    if filter_N is None:
        power = ceil(log(len(vec),2))+1
        N = 2 ** power
    else:
        N = filter_N
    n = N/2+1    
    
   
    if isinstance(vec,FrequencySeries):
        vectilde = FrequencySeries(zeros(n, dtype=complex_same_precision_as(vec)),
                                   delta_f=1.0,copy=False)
	if len(vectilde) < len(vec):
	    cplen = len(vectilde)
        else:
            cplen = len(vec)
        vectilde[0:cplen] = vec[0:cplen]  
        delta_f = vec.delta_f
    
        
    if isinstance(vec,TimeSeries):  
        vec_pad = TimeSeries(zeros(N),delta_t=vec.delta_t,
                         dtype=real_same_precision_as(vec))
        vec_pad[0:len(vec)] = vec   
        delta_f = 1.0/(vec.delta_t*N)
        vectilde = FrequencySeries(zeros(n),delta_f=1.0, 
                               dtype=complex_same_precision_as(vec))
        fft(vec_pad,vectilde)
        
    vectilde = FrequencySeries(vectilde * DYN_RANGE_FAC,delta_f=delta_f,dtype=complex64)
    return vectilde
Esempio n. 4
0
def bandlimited_interpolate(series, delta_f):
    """Return a new PSD that has been interpolated to the desired delta_f.

    Parameters
    ----------
    series : FrequencySeries
        Frequency series to be interpolated.
    delta_f : float
        The desired delta_f of the output

    Returns
    -------
    interpolated series : FrequencySeries
        A new FrequencySeries that has been interpolated.
    """
    series = FrequencySeries(series, dtype=complex_same_precision_as(series), delta_f=series.delta_f)

    N = (len(series) - 1) * 2
    delta_t = 1.0 / series.delta_f / N

    new_N = int(1.0 / (delta_t * delta_f))
    new_n = new_N / 2 + 1

    series_in_time = TimeSeries(zeros(N), dtype=real_same_precision_as(series), delta_t=delta_t)
    ifft(series, series_in_time)

    padded_series_in_time = TimeSeries(zeros(new_N), dtype=series_in_time.dtype, delta_t=delta_t)
    padded_series_in_time[0:N/2] = series_in_time[0:N/2]
    padded_series_in_time[new_N-N/2:new_N] = series_in_time[N/2:N]

    interpolated_series = FrequencySeries(zeros(new_n), dtype=series.dtype, delta_f=delta_f)
    fft(padded_series_in_time, interpolated_series)

    return interpolated_series
Esempio n. 5
0
def noise_from_psd(length, delta_t, psd, seed=None):
    """ Create noise with a given psd.

    Return noise with a given psd. Note that if unique noise is desired
    a unique seed should be provided.

    Parameters
    ----------
    length : int
        The length of noise to generate in samples.
    delta_t : float
        The time step of the noise.
    psd : FrequencySeries
        The noise weighting to color the noise.
    seed : {0, int}
        The seed to generate the noise.

    Returns
    --------
    noise : TimeSeries
        A TimeSeries containing gaussian noise colored by the given psd.
    """
    noise_ts = TimeSeries(zeros(length), delta_t=delta_t)

    if seed is None:
        seed = numpy.random.randint(2**32)

    randomness = lal.gsl_rng("ranlux", seed)

    N = int (1.0 / delta_t / psd.delta_f)
    n = N/2+1
    stride = N/2

    if n > len(psd):
        raise ValueError("PSD not compatible with requested delta_t")

    psd = (psd[0:n]).lal()
    psd.data.data[n-1] = 0

    segment = TimeSeries(zeros(N), delta_t=delta_t).lal()
    length_generated = 0

    SimNoise(segment, 0, psd, randomness)
    while (length_generated < length):
        if (length_generated + stride) < length:
            noise_ts.data[length_generated:length_generated+stride] = segment.data.data[0:stride]
        else:
            noise_ts.data[length_generated:length] = segment.data.data[0:length-length_generated]

        length_generated += stride
        SimNoise(segment, stride, psd, randomness)

    return noise_ts
Esempio n. 6
0
 def __init__(self, output):
     self.output = output.data.gpudata
     self.df = numpy.float32(output.delta_f)
     self.hlen = numpy.int32(len(output))
     lookups = get_dckernel(self.hlen)
     self.fn1 = lookups[0]
     self.fn2 = lookups[1]
     self.freq_tex = lookups[2]
     self.amp_tex = lookups[3]
     self.phase_tex = lookups[4]
     self.nt = lookups[5]
     self.nb = lookups[6]
     self.lower = zeros(self.nb, dtype=numpy.int32).data.gpudata
     self.upper = zeros(self.nb, dtype=numpy.int32).data.gpudata
Esempio n. 7
0
    def __getitem__(self, index):
        # Make new memory for templates if we aren't given output memory
        if self.out is None:
            tempout = zeros(self.filter_length, dtype=self.dtype)
        else:
            tempout = self.out

        approximant = self.approximant(index)
        f_end = self.end_frequency(index)
        if f_end is None or f_end >= (self.filter_length * self.delta_f):
            f_end = (self.filter_length-1) * self.delta_f

        # Find the start frequency, if variable
        if self.max_template_length is not None:
            f_low = find_variable_start_frequency(approximant,
                                                  self.table[index],
                                                  self.f_lower,
                                                  self.max_template_length)
        else:
            f_low = self.f_lower

        logging.info('%s: generating %s from %s Hz' % (index, approximant, f_low))

        # Clear the storage memory
        poke  = tempout.data
        tempout.clear()

        # Get the waveform filter
        distance = 1.0 / DYN_RANGE_FAC
        htilde = pycbc.waveform.get_waveform_filter(
            tempout[0:self.filter_length], self.table[index],
            approximant=approximant, f_lower=f_low, f_final=f_end,
            delta_f=self.delta_f, delta_t=self.delta_t, distance=distance,
            **self.extra_args)

        # If available, record the total duration (which may
        # include ringdown) and the duration up to merger since they will be 
        # erased by the type conversion below.
        ttotal = template_duration = None
        if hasattr(htilde, 'length_in_time'):
                ttotal = htilde.length_in_time
        if hasattr(htilde, 'chirp_length'):
                template_duration = htilde.chirp_length

        self.table[index].template_duration = template_duration        

        htilde = htilde.astype(numpy.complex64)
        htilde.f_lower = self.f_lower
        htilde.end_frequency = f_end
        htilde.end_idx = int(htilde.end_frequency / htilde.delta_f)
        htilde.params = self.table[index]
        htilde.approximant = approximant
        htilde.chirp_length = template_duration
        htilde.length_in_time = ttotal
        
        # Add sigmasq as a method of this instance
        htilde.sigmasq = types.MethodType(sigma_cached, htilde)
        htilde._sigmasq = {}

        return htilde
Esempio n. 8
0
 def generate_with_delta_f_and_max_freq(self, t_num, max_freq, delta_f,
                                        low_frequency_cutoff=None,
                                        cached_mem=None):
     """Generate the template with index t_num using custom length."""
     approximant = self.approximant(t_num)
     # Don't want to use INTERP waveforms in here
     if approximant.endswith('_INTERP'):
         approximant = approximant.replace('_INTERP', '')
     # Using SPAtmplt here is bad as the stored cbrt and logv get
     # recalculated as we change delta_f values. Fall back to TaylorF2
     # in lalsimulation.
     if approximant == 'SPAtmplt':
         approximant = 'TaylorF2'
     if cached_mem is None:
         wav_len = int(max_freq / delta_f) + 1
         cached_mem = zeros(wav_len, dtype=np.complex64)
     if self.has_compressed_waveforms and self.enable_compressed_waveforms:
         htilde = self.get_decompressed_waveform(cached_mem, t_num,
                                                 f_lower=low_frequency_cutoff,
                                                 approximant=approximant,
                                                 df=delta_f)
     else :
         htilde = pycbc.waveform.get_waveform_filter(
             cached_mem, self.table[t_num], approximant=approximant,
             f_lower=low_frequency_cutoff, f_final=max_freq, delta_f=delta_f,
             distance=1./DYN_RANGE_FAC, delta_t=1./(2.*max_freq))
     return htilde
Esempio n. 9
0
def make_frequency_series(vec):
    """Return a frequency series of the input vector.

    If the input is a frequency series it is returned, else if the input
    vector is a real time series it is fourier transformed and returned as a 
    frequency series. 
    
    Parameters
    ----------
    vector : TimeSeries or FrequencySeries  

    Returns
    -------
    Frequency Series: FrequencySeries
        A frequency domain version of the input vector.
    """
    if isinstance(vec, FrequencySeries):
        return vec
    if isinstance(vec, TimeSeries):
        N = len(vec)
        n = N/2+1    
        delta_f = 1.0 / N / vec.delta_t
        vectilde =  FrequencySeries(zeros(n, dtype=complex_same_precision_as(vec)), 
                                    delta_f=delta_f, copy=False)
        fft(vec, vectilde)   
        return vectilde
    else:
        raise TypeError("Can only convert a TimeSeries to a FrequencySeries")
Esempio n. 10
0
    def __init__(self, size):
        # We'll do some arithmetic with these, so sanity check first:
        if not check_pow_two(size):
            raise ValueError("Only power-of-two sizes supported")

        self.ncpus = _scheme.mgr.state.num_threads
        self.size = size
        self.stilde = zeros(self.size, dtype = complex64)
        self.htilde = zeros(self.size, dtype = complex64)
        self.qtilde = zeros(self.size, dtype = complex64)
        self.snr = zeros(self.size, dtype = complex64)
        self.iptr = self.qtilde.ptr
        self.optr = self.snr.ptr
        self.in1 = self.stilde.data
        self.in2 = self.htilde.data
        self.out = self.qtilde.data
Esempio n. 11
0
def line_model(freq, data, tref, amp=1, phi=0):
    """ Simple time-domain model for a frequency line.

    Parameters
    ----------
    freq: float
        Frequency of the line.
    data: pycbc.types.TimeSeries
        Reference data, to get delta_t, start_time, duration and sample_times.
    tref: float
        Reference time for the line model.
    amp: {1., float}, optional
        Amplitude of the frequency line.
    phi: {0. float}, optional
        Phase of the frequency line (radians).

    Returns
    -------
    freq_line: pycbc.types.TimeSeries
        A timeseries of the line model with frequency 'freq'. The returned
        data are complex to allow measuring the amplitude and phase of the
        corresponding frequency line in the strain data. For extraction, use
        only the real part of the data.
    """
    freq_line = TimeSeries(zeros(len(data)), delta_t=data.delta_t,
                           epoch=data.start_time)

    times = data.sample_times - float(tref)
    alpha = 2 * numpy.pi * freq * times + phi
    freq_line.data = amp * numpy.exp(1.j * alpha)

    return freq_line
Esempio n. 12
0
    def test_spatmplt(self):
        fl = 25
        delta_f = 1.0 / 256

        for m1 in [1, 1.4, 20]:
            for m2 in [1.4, 20]:
                for s1 in  [-2, -1, -0.5, 0, 0.5, 1, 2]:
                    for s2 in [-2, -1, -0.5, 0, 0.5, 1, 2]:
                        # Generate TaylorF2 from lalsimulation, restricting to the capabilities of spatmplt
                        hpr,_ = get_fd_waveform( mass1=m1, mass2=m2, spin1z=s1, spin2z=s2, 
                                                 delta_f=delta_f, f_lower=fl,
                                                 approximant="TaylorF2", amplitude_order=0,
                                                 spin_order=-1, phase_order=-1)
                        hpr=hpr.astype(complex64)

                        with self.context:
                            # Generate the spatmplt waveform
                            out = zeros(len(hpr), dtype=complex64)
                            hp = get_waveform_filter(out, mass1=m1, mass2=m2, spin1z=s1, spin2z=s2,
                                                     delta_f=delta_f, f_lower=fl, approximant="SPAtmplt", 
                                                     amplitude_order=0, spin_order=-1, phase_order=-1)

                            # Check the diff is sane
                            mag = abs(hpr).sum()
                            diff = abs(hp - hpr).sum() / mag
                            self.assertTrue(diff < 0.01)

                            # Point to point overlap (no phase or time maximization)
                            o =  overlap(hp, hpr)
                            self.assertAlmostEqual(1.0, o, places=4)

                            print("checked m1: %s m2:: %s s1z: %s s2z: %s] overlap = %s, diff = %s" % (m1, m2, s1, s2, o, diff))
Esempio n. 13
0
    def __init__(self, frame_src, 
                       channel_name,
                       start_time,
                       max_buffer=2048, 
                       force_update_cache=True,
                       increment_update_cache=None):
        """ Create a rolling buffer of frame data

        Parameters
        ---------
        frame_src: str of list of strings
            Strings that indicate where to read from files from. This can be a
        list of frame files, a glob, etc.
        channel_name: str
            Name of the channel to read from the frame files
        start_time: 
            Time to start reading from.
        max_buffer: {int, 2048}, Optional
            Length of the buffer in seconds
        """
        self.frame_src = frame_src
        self.channel_name = channel_name
        self.read_pos = start_time
        self.force_update_cache = force_update_cache
        self.increment_update_cache = increment_update_cache

        self.update_cache()
        self.channel_type, self.raw_sample_rate = self._retrieve_metadata(self.stream, self.channel_name)

        raw_size = self.raw_sample_rate * max_buffer
        self.raw_buffer = TimeSeries(zeros(raw_size, dtype=numpy.float64),
                                     copy=False,
                                     epoch=start_time - max_buffer,
                                     delta_t=1.0/self.raw_sample_rate)
Esempio n. 14
0
    def __getitem__(self, index):
        # Make new memory for templates if we aren't given output memory
        if self.out is None:
            tempout = zeros(self.filter_length, dtype=self.dtype)
        else:
            tempout = self.out

        if self.approximant is not None:
            if 'params' in self.approximant:
                t = type('t', (object,), {'params' : self.table[index]})
                approximant = str(self.parse_option(t, self.approximant)) 
            else:
                approximant = self.approximant
        else:
            raise ValueError("Reading approximant from template bank not yet supported")

        # Get the end of the waveform if applicable (only for SPAtmplt atm)
        f_end = pycbc.waveform.get_waveform_end_frequency(self.table[index],
                              approximant=approximant, **self.extra_args)

        if f_end is None or f_end >= (self.filter_length * self.delta_f):
            f_end = (self.filter_length-1) * self.delta_f

        poke  = tempout.data
        # Clear the storage memory
        tempout.clear()

        # Get the waveform filter
        distance = 1.0 / DYN_RANGE_FAC
        htilde = pycbc.waveform.get_waveform_filter(
            tempout[0:self.filter_length], self.table[index],
            approximant=approximant, f_lower=self.f_lower, f_final=f_end,
            delta_f=self.delta_f, delta_t=self.delta_t, distance=distance,
            **self.extra_args)

        # If available, record the total duration (which may
        # include ringdown) and the duration up to merger since they will be 
        # erased by the type conversion below.
        # NOTE: If these durations are not available the values in self.table
        #       will continue to take the values in the input file.
        if hasattr(htilde, 'length_in_time'):
            if htilde.length_in_time is not None:
                self.table[index].ttotal = htilde.length_in_time
        if hasattr(htilde, 'chirp_length'):
            if htilde.chirp_length is not None:
                self.table[index].template_duration = htilde.chirp_length

        htilde = htilde.astype(self.dtype)
        htilde.f_lower = self.f_lower
        htilde.end_frequency = f_end
        htilde.end_idx = int(htilde.end_frequency / htilde.delta_f)
        htilde.params = self.table[index]
        htilde.approximant = approximant
        
        # Add sigmasq as a method of this instance
        htilde.sigmasq = types.MethodType(sigma_cached, htilde)
        htilde._sigmasq = {}

        return htilde
Esempio n. 15
0
    def __getitem__(self, index):
        if isinstance(index, slice):
            return self.getslice(index)

        approximant = self.approximant(index)
        f_end = self.end_frequency(index)

        # Determine the length of time of the filter, rounded up to
        # nearest power of two
        min_buffer = .5 + self.minimum_buffer
    
        from pycbc.waveform.waveform import props
        buff_size = pycbc.waveform.get_waveform_filter_length_in_time(approximant, f_lower=self.f_lower, 
                                                                      **props(self.table[index]))
        tlen = self.round_up((buff_size + min_buffer) * self.sample_rate)
        flen = tlen / 2 + 1

        delta_f = self.sample_rate / float(tlen)

        if f_end is None or f_end >= (flen * delta_f):
            f_end = (flen-1) * delta_f

        logging.info("Generating %s, %ss, %i" % (approximant, 1.0/delta_f, index))

        # Get the waveform filter
        distance = 1.0 / DYN_RANGE_FAC
        htilde = pycbc.waveform.get_waveform_filter(
            zeros(flen, dtype=numpy.complex64), self.table[index],
            approximant=approximant, f_lower=self.f_lower, f_final=f_end,
            delta_f=delta_f, delta_t=1.0/self.sample_rate, distance=distance,
            **self.extra_args)

        # If available, record the total duration (which may
        # include ringdown) and the duration up to merger since they will be 
        # erased by the type conversion below.
        ttotal = template_duration = -1
        if hasattr(htilde, 'length_in_time'):
                ttotal = htilde.length_in_time
        if hasattr(htilde, 'chirp_length'):
                template_duration = htilde.chirp_length

        self.table[index].template_duration = template_duration        

        htilde = htilde.astype(numpy.complex64)
        htilde.f_lower = self.f_lower
        htilde.end_frequency = f_end
        htilde.end_idx = int(htilde.end_frequency / htilde.delta_f)
        htilde.params = self.table[index]
        htilde.approximant = approximant
        htilde.chirp_length = template_duration
        htilde.length_in_time = ttotal
        
        # Add sigmasq as a method of this instance
        htilde.sigmasq = types.MethodType(sigma_cached, htilde)
        htilde._sigmasq = {}

        htilde.id = self.id_from_hash(hash((htilde.params.mass1, htilde.params.mass2, 
                          htilde.params.spin1z, htilde.params.spin2z)))
        return htilde
Esempio n. 16
0
def lfilter(coefficients, timeseries):
    """ Apply filter coefficients to a time series
    
    Parameters
    ----------
    coefficients: numpy.ndarray
        Filter coefficients to apply
    timeseries: numpy.ndarray
        Time series to be filtered.

    Returns
    -------
    tseries: numpy.ndarray
        filtered array
    """
    from pycbc.fft import fft, ifft
    from pycbc.filter import correlate

    # If there aren't many points just use the default scipy method
    if len(timeseries) < 2**7:
        if hasattr(timeseries, 'numpy'):
            timeseries = timeseries.numpy()
        series = scipy.signal.lfilter(coefficients, 1.0, timeseries)
        return series
    else:
        cseries = (Array(coefficients[::-1] * 1)).astype(timeseries.dtype)
        cseries.resize(len(timeseries))
        cseries.roll(len(timeseries) - len(coefficients) + 1)
        timeseries = Array(timeseries, copy=False)

        flen = len(cseries) / 2 + 1
        ftype = complex_same_precision_as(timeseries)

        cfreq = zeros(flen, dtype=ftype)
        tfreq = zeros(flen, dtype=ftype)

        fft(Array(cseries), cfreq)
        fft(Array(timeseries), tfreq)

        cout = zeros(flen, ftype)
        out = zeros(len(timeseries), dtype=timeseries)

        correlate(cfreq, tfreq, cout)   
        ifft(cout, out)

        return out.numpy()  / len(out)
Esempio n. 17
0
 def __init__(self, size):
     self.arr = zeros(size, dtype=complex64)
     self.arr[-1] = 0.8+0.8j
     self.thresh = 1.0
     self.winsize = global_winsize
     self.segsize = global_segsize
     self.tcobj = ThreshClusterObject(self.arr, self.thresh, self.winsize, self.segsize)
     self.execute = self.tcobj.execute
Esempio n. 18
0
def fftw_plan(size, nthreads = 1):
    if not _fftw._fftw_threaded_set:
        _fftw.set_threads_backend()
    if nthreads != _fftw._fftw_current_nthreads:
        _fftw._fftw_plan_with_nthreads(nthreads)
    # Convert a measure-level to flags
    flags = _fftw.get_flag(_fftw.get_measure_level(), aligned=True)
    fplan = libfftw3f.fftwf_plan_dft_1d
    fplan.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p,
                      ctypes.c_int, ctypes.c_int]
    fplan.restype = ctypes.c_void_p
    inv = zeros(size, dtype = complex64)
    outv = zeros(size, dtype = complex64)
    res = fplan(size, inv.ptr, outv.ptr, _fftw.FFTW_BACKWARD, flags)
    del inv
    del outv
    return res
Esempio n. 19
0
def interpolate_complex_frequency(series, delta_f, zeros_offset=0, side='right'):
    """Interpolate complex frequency series to desired delta_f.

    Return a new complex frequency series that has been interpolated to the
    desired delta_f.

    Parameters
    ----------
    series : FrequencySeries
        Frequency series to be interpolated.
    delta_f : float
        The desired delta_f of the output
    zeros_offset : optional, {0, int}
        Number of sample to delay the start of the zero padding
    side : optional, {'right', str}
        The side of the vector to zero pad
        
    Returns
    -------
    interpolated series : FrequencySeries
        A new FrequencySeries that has been interpolated.
    """
    new_n = int( (len(series)-1) * series.delta_f / delta_f + 1)
    samples = numpy.arange(0, new_n) * delta_f
    old_N = int( (len(series)-1) * 2 )
    new_N = int( (new_n - 1) * 2 )
    time_series = TimeSeries(zeros(old_N), delta_t =1.0/(series.delta_f*old_N),
                             dtype=real_same_precision_as(series))
                             
    ifft(series, time_series)

    time_series.roll(-zeros_offset)
    time_series.resize(new_N)
    
    if side == 'left':
        time_series.roll(zeros_offset + new_N - old_N)
    elif side == 'right':
        time_series.roll(zeros_offset)

    out_series = FrequencySeries(zeros(new_n), epoch=series.epoch,
                           delta_f=delta_f, dtype=series.dtype)
    fft(time_series, out_series)

    return out_series
Esempio n. 20
0
def td_waveform_to_fd_waveform(waveform, out=None, length=None,
                               buffer_length=100):
    """ Convert a time domain into a frequency domain waveform by FFT.
        As a waveform is assumed to "wrap" in the time domain one must be
        careful to ensure the waveform goes to 0 at both "boundaries". To
        ensure this is done correctly the waveform must have the epoch set such
        the merger time is at t=0 and the length of the waveform should be
        shorter than the desired length of the FrequencySeries (times 2 - 1)
        so that zeroes can be suitably pre- and post-pended before FFTing.
        If given, out is a memory array to be used as the output of the FFT.
        If not given memory is allocated internally.
        If present the length of the returned FrequencySeries is determined
        from the length out. If out is not given the length can be provided
        expicitly, or it will be chosen as the nearest power of 2. If choosing
        length explicitly the waveform length + buffer_length is used when
        choosing the nearest binary number so that some zero padding is always
        added.
    """
    # Figure out lengths and set out if needed
    if out is None:
        if length is None:
            N = pnutils.nearest_larger_binary_number(len(waveform) + \
                                                     buffer_length)
            n = int(N//2) + 1
        else:
            n = length
            N = (n-1)*2
        out = zeros(n, dtype=complex_same_precision_as(waveform))
    else:
        n = len(out)
        N = (n-1)*2
    delta_f =  1. / (N * waveform.delta_t)

    # total duration of the waveform
    tmplt_length = len(waveform) * waveform.delta_t
    if len(waveform) > N:
        err_msg = "The time domain template is longer than the intended "
        err_msg += "duration in the frequency domain. This situation is "
        err_msg += "not supported in this function. Please shorten the "
        err_msg += "waveform appropriately before calling this function or "
        err_msg += "increase the allowed waveform length. "
        err_msg += "Waveform length (in samples): {}".format(len(waveform))
        err_msg += ". Intended length: {}.".format(N)
        raise ValueError(err_msg)
    # for IMR templates the zero of time is at max amplitude (merger)
    # thus the start time is minus the duration of the template from
    # lower frequency cutoff to merger, i.e. minus the 'chirp time'
    tChirp = - float( waveform.start_time )  # conversion from LIGOTimeGPS
    waveform.resize(N)
    k_zero = int(waveform.start_time / waveform.delta_t)
    waveform.roll(k_zero)
    htilde = FrequencySeries(out, delta_f=delta_f, copy=False)
    fft(waveform.astype(real_same_precision_as(htilde)), htilde)
    htilde.length_in_time = tmplt_length
    htilde.chirp_length = tChirp
    return htilde
Esempio n. 21
0
File: mkl.py Progetto: AbhayMK/pycbc
def create_descriptor(size, idtype, odtype, inplace):
    invec = zeros(1, dtype=idtype)
    outvec = zeros(1, dtype=odtype)
    
    desc = ctypes.c_void_p(1)
    f = lib.DftiCreateDescriptor
    f.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_int]
    
    prec = mkl_prec[invec.precision]
    domain = mkl_domain[str(invec.kind)][str(outvec.kind)]
    
    status = f(ctypes.byref(desc), prec, domain, 1, size)
    if inplace:
        lib.DftiSetValue(desc, DFTI_PLACEMENT, DFTI_INPLACE)
    else:
        lib.DftiSetValue(desc, DFTI_PLACEMENT, DFTI_NOT_INPLACE)
    lib.DftiSetValue(desc, DFTI_CONJUGATE_EVEN_STORAGE, DFTI_CCS_FORMAT)
    lib.DftiCommitDescriptor(desc)
    check_status(status)
    return desc
Esempio n. 22
0
def _imrphenombfreq(**p):
    import lalinspiral
    params = lalinspiral.InspiralTemplate()
    m1 = p['mass1']
    m2 = p['mass2']

    mc, et = pnutils.mass1_mass2_to_mchirp_eta(m1, m2)
    params.approximant = lalsimulation.IMRPhenomB
    params.fLower = p['f_lower']
    params.eta = et
    params.distance = p['distance'] * lal.PC_SI * 1e6
    params.mass1 = m1
    params.mass2 = m2
    params.spin1[2] = p['spin1z']
    params.spin2[2] = p['spin2z']
    params.startPhase = p['coa_phase']*2 - lal.PI
    params.startTime = 0

    params.tSampling = 8192
    N = int(params.tSampling / p['delta_f'])
    n = N / 2

    # Create temporary memory to hold the results and call the generator
    hpt = zeros(N, dtype=float32)
    hct = zeros(N, dtype=float32)
    hpt=hpt.lal()
    hct=hct.lal()
    lalinspiral.BBHPhenWaveBFreqDomTemplates(hpt, hct, params)

    # Copy the results to a complex frequencyseries format
    hctc = FrequencySeries(zeros(n, dtype=complex64), delta_f=p['delta_f'])
    hptc = FrequencySeries(zeros(n, dtype=complex64), delta_f=p['delta_f'])

    hptc.data += hpt.data[0:n]
    hptc.data[1:n] += hpt.data[N:N-n:-1] * 1j

    hctc.data += hct.data[0:n]
    hctc.data[1:n] += hct.data[N:N-n:-1] * 1j

    return hptc.astype(complex128),  hctc.astype(complex128)
Esempio n. 23
0
    def __init__(self, size):
        # We'll do some arithmetic with these, so sanity check first:
        if not check_pow_two(size):
            raise ValueError("Only power-of-two sizes supported")

        self.ncpus = _scheme.mgr.state.num_threads
        self.size = size

        self.st = _np.zeros(self.ncpus, dtype = _np.uintp)
        self.ht = _np.zeros(self.ncpus, dtype = _np.uintp)
        self.qt = _np.zeros(self.ncpus, dtype = _np.uintp)
        self.snr = zeros(size, dtype = complex64)
        self.st_list = []
        self.ht_list = []
        self.qt_list = []
        for i in range(self.ncpus):
            st = zeros(size, dtype = complex64)
            self.st_list.append(st)
            self.st[i] = st.ptr
            ht = zeros(size, dtype = complex64)
            self.ht_list.append(ht)
            self.ht[i] = ht.ptr
            qt = zeros(size, dtype = complex64)
            self.qt_list.append(qt)
            self.qt[i] = qt.ptr
        self.plan = _np.zeros(1, dtype = _np.uintp)


        self.support = corr_contig_nostream_support + many_fft_support
        # We in fact only correlate the first half, but the correlation
        # code needs the length as a *real* array so the full length as
        # a *complex* array is the right thing to use here.
        self.support = self.support.replace('NLEN', str(self.size))
        self.code = many_fft_code
        self.code = self.code.replace('NCPUS', str(self.ncpus))
        self.corr_code = many_corr_code
        self.corr_code = self.corr_code.replace('NCPUS', str(self.ncpus))
        self.fft_code = just_fft_code
        self.fft_code = self.fft_code.replace('NCPUS', str(self.ncpus))
Esempio n. 24
0
 def __init__(self, inarray, verbose=0):
     self.inarr = _np.array(inarray.data, copy=False).view(dtype = float32)
     self.howmany = _np.zeros(1, dtype = _np.uint32)
     self.howmany[0] = len(self.inarr)
     self.nstart = _np.zeros(1, dtype = _np.uint32)
     self.nstart[0] = 0
     self.cmplx_mval = zeros(1, dtype = complex64)
     self.mval = _np.array(self.cmplx_mval.data, copy = False).view(dtype = float32)
     self.norm = _np.zeros(1, dtype = float32)
     self.mloc = _np.zeros(1, dtype = _np.uint32)
     self.code = max_only_code
     self.support = thresh_cluster_support
     self.verbose = verbose
Esempio n. 25
0
def qseries(fseries, Q, f0, return_complex=False):
    """Calculate the energy 'TimeSeries' for the given fseries

    Parameters
    ----------
    fseries: 'pycbc FrequencySeries'
        frequency-series data set
    Q:
        q value
    f0:
        central frequency
    return_complex: {False, bool}
        Return the raw complex series instead of the normalized power.

    Returns
    -------
    energy: '~pycbc.types.TimeSeries'
        A 'TimeSeries' of the normalized energy from the Q-transform of
        this tile against the data.
    """
    # normalize and generate bi-square window
    qprime = Q / 11**(1/2.)
    norm = numpy.sqrt(315. * qprime / (128. * f0))
    window_size = 2 * int(f0 / qprime * fseries.duration) + 1
    xfrequencies = numpy.linspace(-1., 1., window_size)

    start = int((f0 - (f0 / qprime)) * fseries.duration)
    end = int(start + window_size)
    center = (start + end) / 2

    windowed = fseries[start:end] * (1 - xfrequencies ** 2) ** 2 * norm

    tlen = (len(fseries)-1) * 2
    windowed.resize(tlen)
    windowed = numpy.roll(windowed, -center)

    # calculate the time series for this q -value
    windowed = FrequencySeries(windowed, delta_f=fseries.delta_f,
                            epoch=fseries.start_time)
    ctseries = TimeSeries(zeros(tlen, dtype=numpy.complex128),
                            delta_t=fseries.delta_t)
    ifft(windowed, ctseries)

    if return_complex:
        return ctseries
    else:
        energy = ctseries.squared_norm()
        medianenergy = numpy.median(energy.numpy())
        return  energy / float(medianenergy)
Esempio n. 26
0
def _fftw_setup(fftobj):
        n = _np.asarray([fftobj.size], dtype=_np.int32)
        inembed = _np.asarray([len(fftobj.invec)], dtype=_np.int32)
        onembed = _np.asarray([len(fftobj.outvec)], dtype=_np.int32)
        nthreads = _scheme.mgr.state.num_threads
        if not _fftw_threaded_set:
            set_threads_backend()
        if nthreads != _fftw_current_nthreads:
            _fftw_plan_with_nthreads(nthreads)  
        mlvl = get_measure_level()
        aligned = fftobj.invec.data.isaligned and fftobj.outvec.data.isaligned
        flags = get_flag(mlvl, aligned)
        plan_func = _plan_funcs_dict[ (str(fftobj.invec.dtype), str(fftobj.outvec.dtype)) ]
        tmpin = zeros(len(fftobj.invec), dtype = fftobj.invec.dtype)
        tmpout = zeros(len(fftobj.outvec), dtype = fftobj.outvec.dtype)
        # C2C, forward
        if fftobj.forward and (fftobj.outvec.dtype in [complex64, complex128]):
            plan = plan_func(1, n.ctypes.data, fftobj.nbatch,
                             tmpin.ptr, inembed.ctypes.data, 1, fftobj.idist,
                             tmpout.ptr, onembed.ctypes.data, 1, fftobj.odist,
                             FFTW_FORWARD, flags)
        # C2C, backward
        elif not fftobj.forward and (fftobj.invec.dtype in [complex64, complex128]):
            plan = plan_func(1, n.ctypes.data, fftobj.nbatch,
                             tmpin.ptr, inembed.ctypes.data, 1, fftobj.idist,
                             tmpout.ptr, onembed.ctypes.data, 1, fftobj.odist,
                             FFTW_BACKWARD, flags)
        # R2C or C2R (hence no direction argument for plan creation)
        else:
            plan = plan_func(1, n.ctypes.data, fftobj.nbatch,
                             tmpin.ptr, inembed.ctypes.data, 1, fftobj.idist,
                             tmpout.ptr, onembed.ctypes.data, 1, fftobj.odist,
                             flags)
        del tmpin
        del tmpout
        return plan
Esempio n. 27
0
def match(vec1, vec2, psd=None, low_frequency_cutoff=None,
          high_frequency_cutoff=None, v1_norm=None, v2_norm=None):
    """ Return the match between the two TimeSeries or FrequencySeries.
    
    Return the match between two waveforms. This is equivelant to the overlap 
    maximized over time and phase. 

    Parameters
    ----------
    vec1 : TimeSeries or FrequencySeries 
        The input vector containing a waveform.
    vec2 : TimeSeries or FrequencySeries 
        The input vector containing a waveform.
    psd : Frequency Series
        A power spectral density to weight the overlap.
    low_frequency_cutoff : {None, float}, optional
        The frequency to begin the match.
    high_frequency_cutoff : {None, float}, optional
        The frequency to stop the match.
    v1_norm : {None, float}, optional
        The normalization of the first waveform. This is equivalent to its
        sigmasq value. If None, it is internally calculated. 
    v2_norm : {None, float}, optional
        The normalization of the second waveform. This is equivalent to its
        sigmasq value. If None, it is internally calculated. 
    Returns
    -------
    match: float
    """

    htilde = make_frequency_series(vec1)
    stilde = make_frequency_series(vec2)

    N = (len(htilde)-1) * 2

    global _snr
    if _snr is None or _snr.dtype != htilde.dtype or len(_snr) != N:
        _snr = zeros(N,dtype=complex_same_precision_as(vec1))
    snr, corr, snr_norm = matched_filter_core(htilde,stilde,psd,low_frequency_cutoff,
                             high_frequency_cutoff, v1_norm, out=_snr)
    maxsnr, max_id = snr.abs_max_loc()
    if v2_norm is None:
        v2_norm = sigmasq(stilde, psd, low_frequency_cutoff, high_frequency_cutoff)
    return maxsnr * snr_norm / sqrt(v2_norm), max_id
Esempio n. 28
0
def sigmasq_series(htilde, psd=None, low_frequency_cutoff=None,
            high_frequency_cutoff=None):
    """Return a cumulative sigmasq frequency series. 

    Return a frequency series containing the accumulated power in the input 
    up to that frequency. 
    
    Parameters
    ----------
    htilde : TimeSeries or FrequencySeries 
        The input vector 
    psd : {None, FrequencySeries}, optional
        The psd used to weight the accumulated power.
    low_frequency_cutoff : {None, float}, optional
        The frequency to begin accumulating power. If None, start at the beginning
        of the vector.
    high_frequency_cutoff : {None, float}, optional
        The frequency to stop considering accumulated power. If None, continue 
        until the end of the input vector.

    Returns
    -------
    Frequency Series: FrequencySeries
        A frequency series containing the cumulative sigmasq.
    """
    htilde = make_frequency_series(htilde)
    N = (len(htilde)-1) * 2 
    norm = 4.0 * htilde.delta_f
    kmin, kmax = get_cutoff_indices(low_frequency_cutoff,
                                   high_frequency_cutoff, htilde.delta_f, N)  
   
    sigma_vec = FrequencySeries(zeros(len(htilde), dtype=real_same_precision_as(htilde)), 
                                delta_f = htilde.delta_f, copy=False)
    
    mag = htilde.squared_norm()
    
    if psd is not None:
        mag /= psd

    sigma_vec[kmin:kmax] = mag[kmin:kmax].cumsum()
        
    return sigma_vec*norm
Esempio n. 29
0
def power_chisq(template, data, num_bins, psd,
                low_frequency_cutoff=None,
                high_frequency_cutoff=None,
                return_bins=False):
    """Calculate the chisq timeseries

    Parameters
    ----------
    template: FrequencySeries or TimeSeries
        A time or frequency series that contains the filter template.
    data: FrequencySeries or TimeSeries
        A time or frequency series that contains the data to filter. The length
        must be commensurate with the template.
        (EXPLAINME - does this mean 'the same as' or something else?)
    num_bins: int
        The number of bins in the chisq. Note that the dof goes as 2*num_bins-2.
    psd: FrequencySeries
        The psd of the data.
    low_frequency_cutoff: {None, float}, optional
        The low frequency cutoff for the filter
    high_frequency_cutoff: {None, float}, optional
        The high frequency cutoff for the filter
    return_bins: {boolean, False}, optional
        Return a list of the individual chisq bins

    Returns
    -------
    chisq: TimeSeries
        TimeSeries containing the chisq values for all times.
    """
    htilde = make_frequency_series(template)
    stilde = make_frequency_series(data)

    bins = power_chisq_bins(htilde, num_bins, psd, low_frequency_cutoff,
                            high_frequency_cutoff)
    corra = zeros((len(htilde)-1)*2, dtype=htilde.dtype)
    total_snr, corr, tnorm = matched_filter_core(htilde, stilde, psd,
                           low_frequency_cutoff, high_frequency_cutoff,
                           corr_out=corra)

    return power_chisq_from_precomputed(corr, total_snr, tnorm, bins, return_bins=return_bins)
Esempio n. 30
0
def from_lalsimulation(func, length, delta_f, low_freq_cutoff):
    """Generate a frequency series containing the specified LALSimulation PSD.

    Parameters
    ----------
    func : function
        LALSimulation PSD function.
    length : int
        Length of the frequency series in samples.
    delta_f : float
        Frequency resolution of the frequency series.
    low_freq_cutoff : float
        Frequencies below this value are set to zero.

    Returns
    -------
    psd : FrequencySeries
        The generated frequency series.
    """
    psd = FrequencySeries(zeros(length), delta_f=delta_f)
    kmin = int(low_freq_cutoff / delta_f)
    psd.data[kmin:] = map(func, numpy.arange(length)[kmin:] * delta_f)
    return psd
Esempio n. 31
0
def taylorf2(**kwds):
    """ Return a TaylorF2 waveform using CUDA to generate the phase and amplitude
    """
    # Pull out the input arguments
    delta_f = kwds['delta_f']
    distance = kwds['distance']
    mass1 = kwds['mass1']
    mass2 = kwds['mass2']
    phase_order = int(kwds['phase_order'])
    amplitude_order = int(kwds['amplitude_order'])
    phi0 = kwds['phi0']

    tC = -1.0 / delta_f

    #Calculate the spin corrections
    beta, sigma, gamma = pycbc.pnutils.mass1_mass2_spin1z_spin2z_to_beta_sigma_gamma(
        mass1, mass2, kwds['spin1z'], kwds['spin2z'])

    #Calculate teh PN terms #TODO: replace with functions in lalsimulation!###
    M = float(mass1) + float(mass2)
    eta = mass1 * mass2 / (M * M)
    theta = -11831. / 9240.
    lambdaa = -1987. / 3080.0
    pfaN = 3.0 / (128.0 * eta)
    pfa2 = 5 * (743.0 / 84 + 11.0 * eta) / 9.0
    pfa3 = -16.0 * lal.PI + 4.0 * beta
    pfa4 = 5.0*(3058.673/7.056 + 5429.0/7.0 * eta + 617.0 * eta*eta)/72.0 - \
            10.0*sigma
    pfa5 = 5.0 / 9.0 * (7729.0 / 84.0 - 13.0 * eta) * lal.PI - gamma
    pfl5 = 5.0 / 3.0 * (7729.0 / 84.0 - 13.0 * eta) * lal.PI - gamma * 3
    pfa6 = (11583.231236531/4.694215680 - 640.0/3.0 * lal.PI * lal.PI- \
            6848.0/21.0*lal.GAMMA) + \
            eta * (-15335.597827/3.048192 + 2255./12. * lal.PI * \
            lal.PI - 1760./3.*theta +12320./9.*lambdaa) + \
            eta*eta * 76055.0/1728.0 - \
            eta*eta*eta*  127825.0/1296.0
    pfl6 = -6848.0 / 21.0
    pfa7 = lal.PI * 5.0/756.0 * ( 15419335.0/336.0 + 75703.0/2.0 * eta - \
            14809.0 * eta*eta)

    FTaN = 32.0 * eta * eta / 5.0
    FTa2 = -(12.47 / 3.36 + 3.5 / 1.2 * eta)
    FTa3 = 4.0 * lal.PI
    FTa4 = -(44.711 / 9.072 - 92.71 / 5.04 * eta - 6.5 / 1.8 * eta * eta)
    FTa5 = -(81.91 / 6.72 + 58.3 / 2.4 * eta) * lal.PI
    FTa6 = (664.3739519 / 6.9854400 + 16.0 / 3.0 * lal.PI * lal.PI -
            17.12 / 1.05 * lal.GAMMA +
            (4.1 / 4.8 * lal.PI * lal.PI - 134.543 / 7.776) * eta -
            94.403 / 3.024 * eta * eta - 7.75 / 3.24 * eta * eta * eta)
    FTl6 = -8.56 / 1.05
    FTa7 = -(162.85/5.04 - 214.745/1.728 * eta - 193.385/3.024 * eta*eta) \
            * lal.PI

    dETaN = 2 * -eta / 2.0
    dETa1 = 2 * -(3.0 / 4.0 + 1.0 / 12.0 * eta)
    dETa2 = 3 * -(27.0 / 8.0 - 19.0 / 8.0 * eta + 1. / 24.0 * eta * eta)
    dETa3 = 4 * -(67.5 / 6.4 -
                  (344.45 / 5.76 - 20.5 / 9.6 * lal.PI * lal.PI) * eta +
                  15.5 / 9.6 * eta * eta + 3.5 / 518.4 * eta * eta * eta)

    amp0 = -4. * mass1 * mass2 / (1.0e+06 * float(distance) * lal.PC_SI )* \
                    lal.MRSUN_SI * lal.MTSUN_SI * sqrt(lal.PI/12.0)

    m_sec = M * lal.MTSUN_SI
    piM = lal.PI * m_sec

    kmin = int(kwds['f_lower'] / float(delta_f))

    vISCO = 1. / sqrt(6.)
    fISCO = vISCO * vISCO * vISCO / piM
    kmax = int(fISCO / delta_f)
    f_max = fISCO
    n = int(f_max / delta_f) + 1

    htilde = FrequencySeries(zeros(n, dtype=numpy.complex128),
                             delta_f=delta_f,
                             copy=False)
    taylorf2_kernel(htilde.data[kmin:kmax], kmin, phase_order, amplitude_order,
                    delta_f, piM, pfaN, pfa2, pfa3, pfa4, pfa5, pfl5, pfa6,
                    pfl6, pfa7, FTaN, FTa2, FTa3, FTa4, FTa5, FTa6, FTl6, FTa7,
                    dETaN, dETa1, dETa2, dETa3, amp0, tC, phi0)

    hp = htilde
    hc = htilde * 1j
    return hp, hc
Esempio n. 32
0
def get_fd_lm(template=None, **kwargs):
    """Return frequency domain lm mode with a given number of overtones.

    Parameters
    ----------
    template: object
        An object that has attached properties. This can be used to substitute
        for keyword arguments. A common example would be a row in an xml table.
    freqs : dict
        {lmn:f_lmn} Dictionary of the central frequencies for each overtone,
        as many as number of modes.
    taus : dict
        {lmn:tau_lmn} Dictionary of the damping times for each overtone,
        as many as number of modes.
    l : int
        l mode (lm modes available: 22, 21, 33, 44, 55).
    m : int
        m mode (lm modes available: 22, 21, 33, 44, 55).
    nmodes: int
        Number of overtones desired (maximum n=8)
    amplmn : float
        Amplitude of the lmn overtone, as many as the number of nmodes.
    philmn : float
        Phase of the lmn overtone, as many as the number of modes. Should also
        include the information from the azimuthal angle (phi + m*Phi).
    inclination : {0., float}, optional
        Inclination of the system in radians. Default is 0 (face on).
    delta_f : {None, float}, optional
        The frequency step used to generate the ringdown.
        If None, it will be set to the inverse of the time at which the
        amplitude is 1/1000 of the peak amplitude (the minimum of all modes).
    f_lower: {None, float}, optional
        The starting frequency of the output frequency series.
        If None, it will be set to delta_f.
    f_final : {None, float}, optional
        The ending frequency of the output frequency series.
        If None, it will be set to the frequency at which the amplitude
        is 1/1000 of the peak amplitude (the maximum of all modes).

    Returns
    -------
    hplustilde: FrequencySeries
        The plus phase of a lm mode with n overtones in frequency domain.
    hcrosstilde: FrequencySeries
        The cross phase of a lm mode with n overtones in frequency domain.
    """

    input_params = props(template, lm_required_args, **kwargs)

    # Get required args
    amps, phis = lm_amps_phases(**input_params)
    f_0 = input_params.pop('freqs')
    tau = input_params.pop('taus')
    l, m = input_params.pop('l'), input_params.pop('m')
    inc = input_params.pop('inclination', 0.)
    nmodes = input_params.pop('nmodes')
    if int(nmodes) == 0:
        raise ValueError('Number of overtones (nmodes) must be greater '
                         'than zero.')
    # The following may not be in input_params
    delta_f = input_params.pop('delta_f', None)
    f_lower = input_params.pop('f_lower', None)
    f_final = input_params.pop('f_final', None)

    if delta_f is None:
        delta_f = lm_deltaf(tau, ['%d%d%d' %(l,m,nmodes)])
    if f_final is None:
        f_final = lm_ffinal(f_0, tau, ['%d%d%d' %(l, m, nmodes)])
    kmax = int(f_final / delta_f) + 1

    outplus = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f)
    outcross = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f)

    for n in range(nmodes):
        hplus, hcross = get_fd_qnm(template=None, f_0=f_0['%d%d%d' %(l,m,n)],
                            tau=tau['%d%d%d' %(l,m,n)], 
                            amp=amps['%d%d%d' %(l,m,n)],
                            phi=phis['%d%d%d' %(l,m,n)],
                            inclination=inc, l=l, m=m, delta_f=delta_f,
                            f_lower=f_lower, f_final=f_final)
        outplus.data += hplus.data
        outcross.data += hcross.data

    return outplus, outcross
Esempio n. 33
0
def get_td_qnm(template=None, taper=None, **kwargs):
    """Return a time domain damped sinusoid.

    Parameters
    ----------
    template: object
        An object that has attached properties. This can be used to substitute
        for keyword arguments. A common example would be a row in an xml table.
    taper: {None, float}, optional
        Tapering at the beginning of the waveform with duration taper * tau.
        This option is recommended with timescales taper=1./2 or 1. for
        time-domain ringdown-only injections.
        The abrupt turn on of the ringdown can cause issues on the waveform
        when doing the fourier transform to the frequency domain. Setting
        taper will add a rapid ringup with timescale tau/10.
    f_0 : float
        The ringdown-frequency.
    tau : float
        The damping time of the sinusoid.
    amp : float
        The amplitude of the ringdown (constant for now).
    phi : float
        The initial phase of the ringdown. Should also include the information
        from the azimuthal angle (phi_0 + m*Phi)
    inclination : {0., float}, optional
        Inclination of the system in radians. Default is 0 (face on).
    l : {2, int}, optional
        l mode for the spherical harmonics. Default is l=2.
    m : {2, int}, optional
        m mode for the spherical harmonics. Default is m=2.
    delta_t : {None, float}, optional
        The time step used to generate the ringdown.
        If None, it will be set to the inverse of the frequency at which the
        amplitude is 1/1000 of the peak amplitude.
    t_final : {None, float}, optional
        The ending time of the output time series.
        If None, it will be set to the time at which the amplitude is 
        1/1000 of the peak amplitude.

    Returns
    -------
    hplus: TimeSeries
        The plus phase of the ringdown in time domain.
    hcross: TimeSeries
        The cross phase of the ringdown in time domain.
    """

    input_params = props(template, qnm_required_args, **kwargs)
    
    f_0 = input_params.pop('f_0')
    tau = input_params.pop('tau')
    amp = input_params.pop('amp')
    phi = input_params.pop('phi')
    # the following may not be in input_params
    inc = input_params.pop('inclination', 0.)
    l = input_params.pop('l', 2)
    m = input_params.pop('m', 2)
    delta_t = input_params.pop('delta_t', None)
    t_final = input_params.pop('t_final', None)

    if delta_t is None:
        delta_t = 1. / qnm_freq_decay(f_0, tau, 1./1000)
        if delta_t < min_dt:
            delta_t = min_dt
    if t_final is None:
        t_final = qnm_time_decay(tau, 1./1000)

    kmax = int(t_final / delta_t) + 1
    times = numpy.arange(kmax) * delta_t
    Y_plus, Y_cross = spher_harms(l, m, inc)

    hplus = amp * Y_plus * numpy.exp(-times/tau) * \
                                numpy.cos(two_pi*f_0*times + phi)
    hcross = amp * Y_cross * numpy.exp(-times/tau) * \
                                numpy.sin(two_pi*f_0*times + phi)

    if taper is not None and delta_t < taper*tau:
        taper_window = int(taper*tau/delta_t)
        kmax += taper_window

    outplus = TimeSeries(zeros(kmax), delta_t=delta_t)
    outcross = TimeSeries(zeros(kmax), delta_t=delta_t)

    # If size of tapering window is less than delta_t, do not apply taper.
    if taper is None or delta_t > taper*tau:
        outplus.data[:kmax] = hplus
        outcross.data[:kmax] = hcross

        return outplus, outcross

    else:
        taper_hp, taper_hc = apply_taper(delta_t, taper, f_0, tau, amp, phi,
                                                                    l, m, inc)
        start = - taper * tau
        outplus.data[:taper_window] = taper_hp
        outplus.data[taper_window:] = hplus
        outcross.data[:taper_window] = taper_hc
        outcross.data[taper_window:] = hcross
        outplus._epoch, outcross._epoch = start, start

        return outplus, outcross
Esempio n. 34
0
    def __getitem__(self, index):
        # Make new memory for templates if we aren't given output memory
        if self.out_plus is None:
            tempoutplus = zeros(self.filter_length, dtype=self.dtype)
        else:
            tempoutplus = self.out_plus
        if self.out_cross is None:
            tempoutcross = zeros(self.filter_length, dtype=self.dtype)
        else:
            tempoutcross = self.out_cross

        approximant = self.approximant(index)

        # Get the end of the waveform if applicable (only for SPAtmplt atm)
        f_end = self.end_frequency(index)
        if f_end is None or f_end >= (self.filter_length * self.delta_f):
            f_end = (self.filter_length - 1) * self.delta_f

        # Find the start frequency, if variable
        if self.max_template_length is not None:
            f_low = find_variable_start_frequency(approximant,
                                                  self.table[index],
                                                  self.f_lower,
                                                  self.max_template_length)
        else:
            f_low = self.f_lower

        logging.info('%s: generating %s from %s Hz', index, approximant, f_low)

        # What does this do???
        poke1 = tempoutplus.data  # pylint:disable=unused-variable
        poke2 = tempoutcross.data  # pylint:disable=unused-variable

        # Clear the storage memory
        tempoutplus.clear()
        tempoutcross.clear()

        # Get the waveform filter
        distance = 1.0 / DYN_RANGE_FAC
        hplus, hcross = pycbc.waveform.get_two_pol_waveform_filter(
            tempoutplus[0:self.filter_length],
            tempoutcross[0:self.filter_length],
            self.table[index],
            approximant=approximant,
            f_lower=f_low,
            f_final=f_end,
            delta_f=self.delta_f,
            delta_t=self.delta_t,
            distance=distance,
            **self.extra_args)

        if hasattr(hplus, 'chirp_length') and hplus.chirp_length is not None:
            self.table[index].template_duration = hplus.chirp_length

        hplus = hplus.astype(self.dtype)
        hcross = hcross.astype(self.dtype)
        hplus.f_lower = f_low
        hcross.f_lower = f_low
        hplus.min_f_lower = self.min_f_lower
        hcross.min_f_lower = self.min_f_lower
        hplus.end_frequency = f_end
        hcross.end_frequency = f_end
        hplus.end_idx = int(hplus.end_frequency / hplus.delta_f)
        hcross.end_idx = int(hplus.end_frequency / hplus.delta_f)
        hplus.params = self.table[index]
        hcross.params = self.table[index]
        hplus.approximant = approximant
        hcross.approximant = approximant

        # Add sigmasq as a method of this instance
        hplus.sigmasq = types.MethodType(sigma_cached, hplus)
        hplus._sigmasq = {}
        hcross.sigmasq = types.MethodType(sigma_cached, hcross)
        hcross._sigmasq = {}

        return hplus, hcross
Esempio n. 35
0
def get_td_from_freqtau(template=None, taper=None, **kwargs):
    """Return time domain ringdown with all the modes specified.

    Parameters
    ----------
    template: object
        An object that has attached properties. This can be used to substitute
        for keyword arguments. A common example would be a row in an xml table.
    taper: {None, float}, optional
        Tapering at the beginning of the waveform with duration taper * tau.
        This option is recommended with timescales taper=1./2 or 1. for
        time-domain ringdown-only injections.
        The abrupt turn on of the ringdown can cause issues on the waveform
        when doing the fourier transform to the frequency domain. Setting
        taper will add a rapid ringup with timescale tau/10.
        Each mode and overtone will have a different taper depending on its tau,
        the final taper being the superposition of all the tapers.
    lmns : list
        Desired lmn modes as strings (lm modes available: 22, 21, 33, 44, 55).
        The n specifies the number of overtones desired for the corresponding
        lm pair (maximum n=8).
        Example: lmns = ['223','331'] are the modes 220, 221, 222, and 330
    f_lmn: float
        Central frequency of the lmn overtone, as many as number of modes.
    tau_lmn: float
        Damping time of the lmn overtone, as many as number of modes.
    amp220 : float
        Amplitude of the fundamental 220 mode.
    amplmn : float
        Fraction of the amplitude of the lmn overtone relative to the 
        fundamental mode, as many as the number of subdominant modes.
    philmn : float
        Phase of the lmn overtone, as many as the number of modes. Should also
        include the information from the azimuthal angle (phi + m*Phi).
    inclination : {0., float}, optional
        Inclination of the system in radians. Default is 0 (face on).
    delta_t : {None, float}, optional
        The time step used to generate the ringdown.
        If None, it will be set to the inverse of the frequency at which the
        amplitude is 1/1000 of the peak amplitude (the minimum of all modes).
    t_final : {None, float}, optional
        The ending time of the output frequency series.
        If None, it will be set to the time at which the amplitude
        is 1/1000 of the peak amplitude (the maximum of all modes).

    Returns
    -------
    hplustilde: FrequencySeries
        The plus phase of a ringdown with the lm modes specified and
        n overtones in frequency domain.
    hcrosstilde: FrequencySeries
        The cross phase of a ringdown with the lm modes specified and
        n overtones in frequency domain.
    """

    input_params = props(template, freqtau_required_args, **kwargs)

    # Get required args
    f_0, tau = lm_freqs_taus(**input_params)
    lmns = input_params['lmns']
    for lmn in lmns:
        if int(lmn[2]) == 0:
            raise ValueError('Number of overtones (nmodes) must be greater '
                             'than zero.')
    # following may not be in input_params
    inc = input_params.pop('inclination', 0.)
    delta_t = input_params.pop('delta_t', None)
    t_final = input_params.pop('t_final', None)

    if delta_t is None:
        delta_t = lm_deltat(f_0, tau, lmns)
    if t_final is None:
        t_final = lm_tfinal(tau, lmns)

    kmax = int(t_final / delta_t) + 1
    # Different overtones will have different tapering window-size
    # Find maximum window size to create long enough output vector
    if taper is not None:
        taper_window = int(taper*max(tau.values())/delta_t)
        kmax += taper_window

    outplus = TimeSeries(zeros(kmax, dtype=float64), delta_t=delta_t)
    outcross = TimeSeries(zeros(kmax, dtype=float64), delta_t=delta_t)
    if taper is not None:
        start = - taper * max(tau.values())
        outplus._epoch, outcross._epoch = start, start

    for lmn in lmns:
        l, m, nmodes = int(lmn[0]), int(lmn[1]), int(lmn[2])
        hplus, hcross = get_td_lm(freqs=f_0, taus=tau, l=l, m=m, nmodes=nmodes,
                             taper=taper, inclination=inc, delta_t=delta_t,
                             t_final=t_final, **input_params)
        if taper is None:
            outplus.data += hplus.data
            outcross.data += hcross.data
        else:
            outplus = taper_shift(hplus, outplus)
            outcross = taper_shift(hcross, outcross)

    return outplus, outcross
Esempio n. 36
0
def get_fd_qnm(template=None, **kwargs):
    """Return a frequency domain damped sinusoid.

    Parameters
    ----------
    template: object
        An object that has attached properties. This can be used to substitute
        for keyword arguments. A common example would be a row in an xml table.
    f_0 : float
        The ringdown-frequency.
    tau : float
        The damping time of the sinusoid.
    phi : float
        The initial phase of the ringdown.
    amp : float
        The amplitude of the ringdown (constant for now).
    t_0 :  {0, float}, optional
        The starting time of the ringdown.
    delta_f : {None, float}, optional
        The frequency step used to generate the ringdown.
        If None, it will be set to the inverse of the time at which the
        amplitude is 1/1000 of the peak amplitude.
    f_lower: {None, float}, optional
        The starting frequency of the output frequency series.
        If None, it will be set to delta_f.
    f_final : {None, float}, optional
        The ending frequency of the output frequency series.
        If None, it will be set to the frequency at which the amplitude is 
        1/1000 of the peak amplitude.

    Returns
    -------
    hplustilde: FrequencySeries
        The plus phase of the ringdown in frequency domain.
    hcrosstilde: FrequencySeries
        The cross phase of the ringdown in frequency domain.
    """

    input_params = props(template, qnm_required_args, **kwargs)

    f_0 = input_params.pop('f_0')
    tau = input_params.pop('tau')
    amp = input_params.pop('amp')
    phi = input_params.pop('phi')
    # the following have defaults, and so will be populated
    t_0 = input_params.pop('t_0')
    # the following may not be in input_params
    delta_f = input_params.pop('delta_f', None)
    f_lower = input_params.pop('f_lower', None)
    f_final = input_params.pop('f_final', None)

    if delta_f is None:
        delta_f = 1. / qnm_time_decay(tau, 1. / 1000)
    if f_lower is None:
        f_lower = delta_f
        kmin = 0
    else:
        kmin = int(f_lower / delta_f)
    if f_final is None:
        f_final = qnm_freq_decay(f_0, tau, 1. / 1000)
    if f_final > max_freq:
        f_final = max_freq
    kmax = int(f_final / delta_f) + 1

    freqs = numpy.arange(kmin, kmax) * delta_f

    denominator = 1 + (4j * pi * freqs *
                       tau) - (4 * pi_sq *
                               (freqs * freqs - f_0 * f_0) * tau * tau)
    norm = amp * tau / denominator
    if t_0 != 0:
        time_shift = numpy.exp(-1j * two_pi * freqs * t_0)
        norm *= time_shift

    # Anallytical expression for the Fourier transform of the ringdown (damped sinusoid)
    hp_tilde = norm * ((1 + 2j * pi * freqs * tau) * numpy.cos(phi) -
                       two_pi * f_0 * tau * numpy.sin(phi))
    hc_tilde = norm * ((1 + 2j * pi * freqs * tau) * numpy.sin(phi) +
                       two_pi * f_0 * tau * numpy.cos(phi))

    hplustilde = FrequencySeries(zeros(kmax, dtype=complex128),
                                 delta_f=delta_f)
    hcrosstilde = FrequencySeries(zeros(kmax, dtype=complex128),
                                  delta_f=delta_f)
    hplustilde.data[kmin:kmax] = hp_tilde
    hcrosstilde.data[kmin:kmax] = hc_tilde

    return hplustilde, hcrosstilde
Esempio n. 37
0
    def values(self, corr_plus, corr_cross, snrv, psd, indices, template_plus,
               template_cross, u_vals, hplus_cross_corr, hpnorm, hcnorm):
        """ Calculate the chisq at points given by indices.

        Returns
        -------
        chisq: Array
            Chisq values, one for each sample index

        chisq_dof: Array
            Number of statistical degrees of freedom for the chisq test
            in the given template
        """
        if self.do:
            num_above = len(indices)
            if self.snr_threshold:
                above = abs(snrv) > self.snr_threshold
                num_above = above.sum()
                logging.info('%s above chisq activation threshold' % num_above)
                above_indices = indices[above]
                above_snrv = snrv[above]
                u_vals = u_vals[above]
                rchisq = numpy.zeros(len(indices), dtype=numpy.float32)
                dof = -100
            else:
                above_indices = indices
                above_snrv = snrv

            if num_above > 0:
                chisq = []
                curr_tmplt_mult_fac = 0.
                curr_corr_mult_fac = 0.
                if self.template_mem is None or \
                        (not len(self.template_mem) == len(template_plus)):
                    self.template_mem = zeros(
                        len(template_plus),
                        dtype=complex_same_precision_as(corr_plus))
                if self.corr_mem is None or \
                                (not len(self.corr_mem) == len(corr_plus)):
                    self.corr_mem = zeros(
                        len(corr_plus),
                        dtype=complex_same_precision_as(corr_plus))

                tmplt_data = template_cross.data
                corr_data = corr_cross.data
                numpy.copyto(self.template_mem.data, template_cross.data)
                numpy.copyto(self.corr_mem.data, corr_cross.data)
                template_cross._data = self.template_mem.data
                corr_cross._data = self.corr_mem.data

                for lidx, index in enumerate(above_indices):
                    above_local_indices = numpy.array([index])
                    above_local_snr = numpy.array([above_snrv[lidx]])
                    local_u_val = u_vals[lidx]
                    # Construct template from _plus and _cross
                    # Note that this modifies in place, so we store that and
                    # revert on the next pass.
                    template = template_cross.multiply_and_add(
                        template_plus, local_u_val - curr_tmplt_mult_fac)
                    curr_tmplt_mult_fac = local_u_val

                    template.f_lower = template_plus.f_lower
                    template.params = template_plus.params
                    # Construct the corr vector
                    norm_fac = local_u_val * local_u_val + 1
                    norm_fac += 2 * local_u_val * hplus_cross_corr
                    norm_fac = hcnorm / (norm_fac**0.5)
                    hp_fac = local_u_val * hpnorm / hcnorm
                    corr = corr_cross.multiply_and_add(
                        corr_plus, hp_fac - curr_corr_mult_fac)
                    curr_corr_mult_fac = hp_fac

                    bins = self.calculate_chisq_bins(template, psd)
                    dof = (len(bins) - 1) * 2 - 2
                    curr_chisq = power_chisq_at_points_from_precomputed(
                        corr, above_local_snr / norm_fac, norm_fac, bins,
                        above_local_indices)
                    chisq.append(curr_chisq[0])
                chisq = numpy.array(chisq)
                # Must reset corr and template to original values!
                template_cross._data = tmplt_data
                corr_cross._data = corr_data

            if self.snr_threshold:
                if num_above > 0:
                    rchisq[above] = chisq
            else:
                rchisq = chisq

            return rchisq, numpy.repeat(
                dof, len(indices))  # dof * numpy.ones_like(indices)
        else:
            return None, None
Esempio n. 38
0
def compute_max_snr_over_sky_loc_stat(hplus,
                                      hcross,
                                      hphccorr,
                                      hpnorm=None,
                                      hcnorm=None,
                                      out=None,
                                      thresh=0,
                                      analyse_slice=None):
    """
    Compute the maximized over sky location statistic.

    Parameters
    -----------
    hplus : TimeSeries
        This is the IFFTed complex SNR time series of (h+, data). If not
        normalized, supply the normalization factor so this can be done!
        It is recommended to normalize this before sending through this
        function
    hcross : TimeSeries
        This is the IFFTed complex SNR time series of (hx, data). If not
        normalized, supply the normalization factor so this can be done!
    hphccorr : float
        The real component of the overlap between the two polarizations
        Re[(h+, hx)]. Note that the imaginary component does not enter the
        detection statistic. This must be normalized and is sign-sensitive.
    thresh : float
        Used for optimization. If we do not care about the value of SNR
        values below thresh we can calculate a quick statistic that will
        always overestimate SNR and then only calculate the proper, more
        expensive, statistic at points where the quick SNR is above thresh.
    hpsigmasq : float
        The normalization factor (h+, h+). Default = None (=1, already 
        normalized)
    hcsigmasq : float
        The normalization factor (hx, hx). Default = None (=1, already 
        normalized)
    out : TimeSeries (optional, default=None)
        If given, use this array to store the output.

    Returns
    --------
    det_stat : TimeSeries
        The SNR maximized over sky location
    """
    # NOTE: Not much optimization has been done here! This may need to be
    # C-ified using scipy.weave.

    if out is None:
        out = zeros(len(hplus))
        out.non_zero_locs = numpy.array([], dtype=out.dtype)
    else:
        if not hasattr(out, 'non_zero_locs'):
            # Doing this every time is not a zero-cost operation
            out.data[:] = 0
            out.non_zero_locs = numpy.array([], dtype=out.dtype)
        else:
            # Only set non zero locations to zero
            out.data[out.non_zero_locs] = 0

    # If threshold is given we can limit the points at which to compute the
    # full statistic
    if thresh:
        # This is the statistic that always overestimates the SNR...
        # It allows some unphysical freedom that the full statistic does not
        idx_p, _ = events.threshold(hplus[analyse_slice],
                                    thresh / (2**0.5 * hpnorm))
        idx_c, _ = events.threshold(hcross[analyse_slice],
                                    thresh / (2**0.5 * hcnorm))
        idx_p = idx_p + analyse_slice.start
        idx_c = idx_c + analyse_slice.start
        hp_red = hplus[idx_p] * hpnorm
        hc_red = hcross[idx_p] * hcnorm
        stat_p = hp_red.real**2 + hp_red.imag**2 + \
                     hc_red.real**2 + hc_red.imag**2
        locs_p = idx_p[stat_p > (thresh * thresh)]
        hp_red = hplus[idx_c] * hpnorm
        hc_red = hcross[idx_c] * hcnorm
        stat_c = hp_red.real**2 + hp_red.imag**2 + \
                     hc_red.real**2 + hc_red.imag**2
        locs_c = idx_c[stat_c > (thresh * thresh)]
        locs = numpy.unique(numpy.concatenate((locs_p, locs_c)))

        hplus = hplus[locs]
        hcross = hcross[locs]

    hplus = hplus * hpnorm
    hcross = hcross * hcnorm

    # Calculate and sanity check the denominator
    denom = 1 - hphccorr * hphccorr
    if denom < 0:
        if hphccorr > 1:
            err_msg = "Overlap between hp and hc is given as %f. " % (hphccorr)
            err_msg += "How can an overlap be bigger than 1?"
            raise ValueError(err_msg)
        else:
            err_msg = "There really is no way to raise this error!?! "
            err_msg += "If you're seeing this, it is bad."
            raise ValueError(err_msg)
    if denom == 0:
        # This case, of hphccorr==1, makes the statistic degenerate
        # This case should not physically be possible luckily.
        err_msg = "You have supplied a real overlap between hp and hc of 1. "
        err_msg += "Ian is reasonably certain this is physically impossible "
        err_msg += "so why are you seeing this?"
        raise ValueError(err_msg)

    assert (len(hplus) == len(hcross))

    # Now the stuff where comp. cost may be a problem
    hplus_magsq = numpy.real(hplus) * numpy.real(hplus) + \
                       numpy.imag(hplus) * numpy.imag(hplus)
    hcross_magsq = numpy.real(hcross) * numpy.real(hcross) + \
                       numpy.imag(hcross) * numpy.imag(hcross)
    rho_pluscross = numpy.real(hplus) * numpy.real(hcross) + numpy.imag(
        hplus) * numpy.imag(hcross)

    sqroot = (hplus_magsq - hcross_magsq)**2
    sqroot += 4 * (hphccorr * hplus_magsq - rho_pluscross) * \
                  (hphccorr * hcross_magsq - rho_pluscross)
    # Sometimes this can be less than 0 due to numeric imprecision, catch this.
    if (sqroot < 0).any():
        indices = numpy.arange(len(sqroot))[sqroot < 0]
        # This should not be *much* smaller than 0 due to numeric imprecision
        if (sqroot[indices] < -0.0001).any():
            err_msg = "Square root has become negative. Something wrong here!"
            raise ValueError(err_msg)
        sqroot[indices] = 0
    sqroot = numpy.sqrt(sqroot)
    det_stat_sq = 0.5 * (hplus_magsq + hcross_magsq - \
                         2 * rho_pluscross*hphccorr + sqroot)

    det_stat = numpy.sqrt(det_stat_sq)

    if thresh:
        out.data[locs] = det_stat
        out.non_zero_locs = locs
        return out
    else:
        return Array(det_stat, copy=False)
def imrphenomc_tmplt(**kwds):
    """ Return an IMRPhenomC waveform using CUDA to generate the phase and amplitude
      Main Paper: arXiv:1005.3306
    """
    # Pull out the input arguments
    f_min = float128(kwds['f_lower'])
    f_max = float128(kwds['f_final'])
    delta_f = float128(kwds['delta_f'])
    distance = float128(kwds['distance'])
    mass1 = float128(kwds['mass1'])
    mass2 = float128(kwds['mass2'])
    spin1z = float128(kwds['spin1z'])
    spin2z = float128(kwds['spin2z'])

    if 'out' in kwds:
        out = kwds['out']
    else:
        out = None

    # Calculate binary parameters
    M = mass1 + mass2
    eta = mass1 * mass2 / (M * M)
    Xi = (mass1 * spin1z / M) + (mass2 * spin2z / M)
    Xisum = 2. * Xi
    Xiprod = Xi * Xi
    Xi2 = Xi * Xi

    m_sec = M * lal.MTSUN_SI
    piM = lal.PI * m_sec

    ## The units of distance given as input is taken to pe Mpc. Converting to SI
    distance *= (
        1.0e6 * lal.PC_SI /
        (2. * sqrt(5. / (64. * lal.PI)) * M * lal.MRSUN_SI * M * lal.MTSUN_SI))

    # Check if the value of f_max is correctly given, else replace with the fCut
    # used in the PhenomB code in lalsimulation. The various coefficients come
    # from Eq.(4.18) of http://arxiv.org/pdf/0710.2335 and
    # Table I of http://arxiv.org/pdf/0712.0343
    if not f_max:
        f_max = (1.7086 * eta * eta - 0.26592 * eta + 0.28236) / piM

    # Transform the eta, chi to Lambda parameters, using Eq 5.14, Table II of Main
    # paper.
    z101 = -2.417e-03
    z102 = -1.093e-03
    z111 = -1.917e-02
    z110 = 7.267e-02
    z120 = -2.504e-01

    z201 = 5.962e-01
    z202 = -5.600e-02
    z211 = 1.520e-01
    z210 = -2.970e+00
    z220 = 1.312e+01

    z301 = -3.283e+01
    z302 = 8.859e+00
    z311 = 2.931e+01
    z310 = 7.954e+01
    z320 = -4.349e+02

    z401 = 1.619e+02
    z402 = -4.702e+01
    z411 = -1.751e+02
    z410 = -3.225e+02
    z420 = 1.587e+03

    z501 = -6.320e+02
    z502 = 2.463e+02
    z511 = 1.048e+03
    z510 = 3.355e+02
    z520 = -5.115e+03

    z601 = -4.809e+01
    z602 = -3.643e+02
    z611 = -5.215e+02
    z610 = 1.870e+03
    z620 = 7.354e+02

    z701 = 4.149e+00
    z702 = -4.070e+00
    z711 = -8.752e+01
    z710 = -4.897e+01
    z720 = 6.665e+02

    z801 = -5.472e-02
    z802 = 2.094e-02
    z811 = 3.554e-01
    z810 = 1.151e-01
    z820 = 9.640e-01

    z901 = -1.235e+00
    z902 = 3.423e-01
    z911 = 6.062e+00
    z910 = 5.949e+00
    z920 = -1.069e+01

    eta2 = eta * eta
    Xi2 = Xiprod

    # Calculate alphas, gamma, deltas from Table II and Eq 5.14 of Main paper
    a1 = z101 * Xi + z102 * Xi2 + z111 * eta * Xi + z110 * eta + z120 * eta2
    a2 = z201 * Xi + z202 * Xi2 + z211 * eta * Xi + z210 * eta + z220 * eta2
    a3 = z301 * Xi + z302 * Xi2 + z311 * eta * Xi + z310 * eta + z320 * eta2
    a4 = z401 * Xi + z402 * Xi2 + z411 * eta * Xi + z410 * eta + z420 * eta2
    a5 = z501 * Xi + z502 * Xi2 + z511 * eta * Xi + z510 * eta + z520 * eta2
    a6 = z601 * Xi + z602 * Xi2 + z611 * eta * Xi + z610 * eta + z620 * eta2

    g1 = z701 * Xi + z702 * Xi2 + z711 * eta * Xi + z710 * eta + z720 * eta2

    del1 = z801 * Xi + z802 * Xi2 + z811 * eta * Xi + z810 * eta + z820 * eta2
    del2 = z901 * Xi + z902 * Xi2 + z911 * eta * Xi + z910 * eta + z920 * eta2

    # Get the spin of the final BH
    afin = FinalSpin(Xi, eta)
    Q = Qa(abs(afin))

    # Get the fRD
    frd = fRD(abs(afin), M)
    Mfrd = frd * m_sec

    # Define the frequencies where SPA->PM->RD
    f1 = 0.1 * frd
    Mf1 = m_sec * f1
    f2 = frd
    Mf2 = m_sec * f2
    d1 = 0.005
    d2 = 0.005
    f0 = 0.98 * frd
    Mf0 = m_sec * f0
    d0 = 0.015

    # Now use this frequency for calculation of betas
    # calculate beta1 and beta2, that appear in Eq 5.7 in the main paper.
    b2 = ((-5./3.)* a1 * pow(Mfrd,(-8./3.)) - a2/(Mfrd*Mfrd) - \
      (a3/3.)*pow(Mfrd,(-4./3.)) + (2./3.)* a5 * pow(Mfrd,(-1./3.)) + a6)/eta

    psiPMrd = (a1 * pow(Mfrd,(-5./3.)) + a2/Mfrd + a3 * pow(Mfrd,(-1./3.)) + \
      a4 + a5 * pow(Mfrd,(2./3.)) + a6 * Mfrd)/eta
    b1 = psiPMrd - (b2 * Mfrd)

    ### Calculate the PN coefficients, Eq A3 - A5 of main paper ###
    pfaN = 3.0 / (128.0 * eta)
    pfa2 = (3715. / 756.) + (55. * eta / 9.0)
    pfa3 = -16.0 * lal.PI + (113. / 3.) * Xi - 38. * eta * Xisum / 3.
    pfa4 = (152.93365/5.08032) - 50.*Xi2 + eta*(271.45/5.04 + 1.25*Xiprod) + \
        3085.*eta2/72.
    pfa5 = lal.PI*(386.45/7.56 - 65.*eta/9.) - \
        Xi*(735.505/2.268 + 130.*eta/9.) + Xisum*(1285.0*eta/8.1 + 170.*eta2/9.) - \
        10.*Xi2*Xi/3. + 10.*eta*Xi*Xiprod
    pfa6 = 11583.231236531/4.694215680 - 640.0*lal.PI*lal.PI/3. - \
        6848.0*lal.GAMMA/21. - 684.8*log(64.)/6.3 + \
        eta*(2255.*lal.PI*lal.PI/12. - 15737.765635/3.048192) + \
        76.055*eta2/1.728 - (127.825*eta2*eta/1.296) + \
        2920.*lal.PI*Xi/3. - (175. - 1490.*eta)*Xi2/3. - \
        (1120.*lal.PI/3. - 1085.*Xi/3.)*eta*Xisum + \
        (269.45*eta/3.36 - 2365.*eta2/6.)*Xiprod

    pfa6log = -6848. / 63.

    pfa7 = lal.PI*(770.96675/2.54016 + 378.515*eta/1.512 - 740.45*eta2/7.56) - \
        Xi*(20373.952415/3.048192 + 1509.35*eta/2.24 - 5786.95*eta2/4.32) + \
        Xisum*(4862.041225*eta/1.524096 + 1189.775*eta2/1.008 - 717.05*eta2*eta/2.16 - 830.*eta*Xi2/3. + 35.*eta2*Xiprod/3.) - \
        560.*lal.PI*Xi2 + 20.*lal.PI*eta*Xiprod + \
        Xi2*Xi*(945.55/1.68 - 85.*eta) + Xi*Xiprod*(396.65*eta/1.68 + 255.*eta2)

    xdotaN = 64. * eta / 5.
    xdota2 = -7.43 / 3.36 - 11. * eta / 4.
    xdota3 = 4. * lal.PI - 11.3 * Xi / 1.2 + 19. * eta * Xisum / 6.
    xdota4 = 3.4103 / 1.8144 + 5 * Xi2 + eta * (13.661 / 2.016 -
                                                Xiprod / 8.) + 5.9 * eta2 / 1.8
    xdota5 = -lal.PI*(41.59/6.72 + 189.*eta/8.) - Xi*(31.571/1.008 - 116.5*eta/2.4) + \
          Xisum*(21.863*eta/1.008 - 79.*eta2/6.) - 3*Xi*Xi2/4. + \
          9.*eta*Xi*Xiprod/4.
    xdota6 = 164.47322263/1.39708800 - 17.12*lal.GAMMA/1.05 + \
          16.*lal.PI*lal.PI/3 - 8.56*log(16.)/1.05 + \
          eta*(45.1*lal.PI*lal.PI/4.8 - 561.98689/2.17728) + \
          5.41*eta2/8.96 - 5.605*eta*eta2/2.592 - 80.*lal.PI*Xi/3. + \
          eta*Xisum*(20.*lal.PI/3. - 113.5*Xi/3.6) + \
          Xi2*(64.153/1.008 - 45.7*eta/3.6) - \
          Xiprod*(7.87*eta/1.44 - 30.37*eta2/1.44)

    xdota6log = -856. / 105.

    xdota7 = -lal.PI*(4.415/4.032 - 358.675*eta/6.048 - 91.495*eta2/1.512) - \
          Xi*(252.9407/2.7216 - 845.827*eta/6.048 + 415.51*eta2/8.64) + \
          Xisum*(158.0239*eta/5.4432 - 451.597*eta2/6.048 + 20.45*eta2*eta/4.32 + 107.*eta*Xi2/6. - 5.*eta2*Xiprod/24.) + \
          12.*lal.PI*Xi2 - Xi2*Xi*(150.5/2.4 + eta/8.) + \
          Xi*Xiprod*(10.1*eta/2.4 + 3.*eta2/8.)

    AN = 8. * eta * sqrt(lal.PI / 5.)
    A2 = (-107. + 55. * eta) / 42.
    A3 = 2. * lal.PI - 4. * Xi / 3. + 2. * eta * Xisum / 3.
    A4 = -2.173 / 1.512 - eta * (10.69 / 2.16 -
                                 2. * Xiprod) + 2.047 * eta2 / 1.512
    A5 = -10.7 * lal.PI / 2.1 + eta * (3.4 * lal.PI / 2.1)

    A5imag = -24. * eta

    A6 = 270.27409/6.46800 - 8.56*lal.GAMMA/1.05 + \
      2.*lal.PI*lal.PI/3. + \
      eta*(4.1*lal.PI*lal.PI/9.6 - 27.8185/3.3264) - \
      20.261*eta2/2.772 + 11.4635*eta*eta2/9.9792 - \
      4.28*log(16.)/1.05

    A6log = -428. / 105.

    A6imag = 4.28 * lal.PI / 1.05

    ### Define other parameters needed by waveform generation ###
    kmin = int(f_min / delta_f)
    kmax = int(f_max / delta_f)
    n = kmax + 1

    if not out:
        htilde = FrequencySeries(zeros(n, dtype=numpy.complex128),
                                 delta_f=delta_f,
                                 copy=False)
    else:
        if type(out) is not Array:
            raise TypeError("Output must be an instance of Array")
        if len(out) < kmax:
            raise TypeError("Output array is too small")
        if out.dtype != complex64:
            raise TypeError("Output array is the wrong dtype")
        htilde = FrequencySeries(out, delta_f=delta_f, copy=False)

    phenomC_kernel(htilde.data[kmin:kmax], kmin, delta_f, eta, Xi, distance,
                   m_sec, piM, Mfrd, pfaN, pfa2, pfa3, pfa4, pfa5, pfa6,
                   pfa6log, pfa7, a1, a2, a3, a4, a5, a6, b1, b2, Mf1, Mf2,
                   Mf0, d1, d2, d0, xdota2, xdota3, xdota4, xdota5, xdota6,
                   xdota6log, xdota7, xdotaN, AN, A2, A3, A4, A5, A5imag, A6,
                   A6log, A6imag, g1, del1, del2, Q)
    hp = htilde
    hc = htilde * 1j
    return hp, hc
Esempio n. 40
0
def get_fd_lm_allmodes(template=None, **kwargs):
    """Return frequency domain ringdown with all the modes specified.
    Parameters
    ----------
    template: object
        An object that has attached properties. This can be used to substitute
        for keyword arguments. A common example would be a row in an xml table.
    final_mass : float
        Mass of the final black hole.
    final_spin : float
        Spin of the final black hole.
    lmns : list
        Desired lmn modes as strings (lm modes available: 22, 21, 33, 44, 55).
        The n specifies the number of overtones desired for the corresponding
        lm pair (maximum n=8).
        Example: lmns = ['223','331'] are the modes 220, 221, 222, and 330
    amplmn : float
        Amplitude of the lmn overtone, as many as the number of modes.
    philmn : float
        Phase of the lmn overtone, as many as the number of modes.
    delta_f : {None, float}, optional
        The frequency step used to generate the ringdown.
        If None, it will be set to the inverse of the time at which the
        amplitude is 1/1000 of the peak amplitude (the minimum of all modes).
    f_lower: {None, float}, optional
        The starting frequency of the output frequency series.
        If None, it will be set to delta_f.
    f_final : {None, float}, optional
        The ending frequency of the output frequency series.
        If None, it will be set to the frequency at which the amplitude
        is 1/1000 of the peak amplitude (the maximum of all modes).
    Returns
    -------
    hplustilde: FrequencySeries
        The plus phase of a ringdown with the lm modes specified and
        n overtones in frequency domain.
    hcrosstilde: FrequencySeries
        The cross phase of a ringdown with the lm modes specified and
        n overtones in frequency domain.
    """

    input_params = props(template, lm_allmodes_required_args, **kwargs)

    # Get required args
    final_mass = input_params['final_mass']
    final_spin = input_params['final_spin']
    lmns = input_params['lmns']
    # The following may not be in input_params
    delta_f = input_params.pop('delta_f', None)
    f_lower = input_params.pop('f_lower', None)
    f_final = input_params.pop('f_final', None)

    if delta_f is None:
        delta_f = lm_deltaf(final_mass, final_spin, lmns)
    if f_final is None:
        f_final = lm_ffinal(final_mass, final_spin, lmns)
    if f_lower is None:
        f_lower = delta_f
    kmax = int(f_final / delta_f) + 1

    outplustilde = FrequencySeries(zeros(kmax, dtype=complex128),
                                   delta_f=delta_f)
    outcrosstilde = FrequencySeries(zeros(kmax, dtype=complex128),
                                    delta_f=delta_f)
    for lmn in lmns:
        l, m, nmodes = int(lmn[0]), int(lmn[1]), int(lmn[2])
        hplustilde, hcrosstilde = get_fd_lm(l=l,
                                            m=m,
                                            nmodes=nmodes,
                                            delta_f=delta_f,
                                            f_lower=f_lower,
                                            f_final=f_final,
                                            **input_params)
        outplustilde.data += hplustilde.data
        outcrosstilde.data += hcrosstilde.data

    return outplustilde, outcrosstilde
Esempio n. 41
0
def get_fd_lm(template=None, **kwargs):
    """Return frequency domain lm mode with a given number of overtones.
    Parameters
    ----------
    template: object
        An object that has attached properties. This can be used to substitute
        for keyword arguments. A common example would be a row in an xml table.
    final_mass : float
        Mass of the final black hole.
    final_spin : float
        Spin of the final black hole.
    l : int
        l mode (lm modes available: 22, 21, 33, 44, 55).
    m : int
        m mode (lm modes available: 22, 21, 33, 44, 55).
    nmodes: int
        Number of overtones desired (maximum n=8)
    amplmn : float
        Amplitude of the lmn overtone, as many as the number of nmodes.
    philmn : float
        Phase of the lmn overtone, as many as the number of nmodes.
    delta_f : {None, float}, optional
        The frequency step used to generate the ringdown.
        If None, it will be set to the inverse of the time at which the
        amplitude is 1/1000 of the peak amplitude (the minimum of all modes).
    f_lower: {None, float}, optional
        The starting frequency of the output frequency series.
        If None, it will be set to delta_f.
    f_final : {None, float}, optional
        The ending frequency of the output frequency series.
        If None, it will be set to the frequency at which the amplitude
        is 1/1000 of the peak amplitude (the maximum of all modes).
    Returns
    -------
    hplustilde: FrequencySeries
        The plus phase of a lm mode with n overtones in frequency domain.
    hcrosstilde: FrequencySeries
        The cross phase of a lm mode with n overtones in frequency domain.
    """

    input_params = props(template, lm_required_args, **kwargs)

    # Get required args
    amps, phis = lm_amps_phases(**input_params)
    final_mass = input_params.pop('final_mass')
    final_spin = input_params.pop('final_spin')
    l, m = input_params.pop('l'), input_params.pop('m')
    nmodes = input_params.pop('nmodes')
    # The following may not be in input_params
    delta_f = input_params.pop('delta_f', None)
    f_lower = input_params.pop('f_lower', None)
    f_final = input_params.pop('f_final', None)

    if delta_f is None:
        delta_f = lm_deltaf(final_mass, final_spin,
                            ['%d%d%d' % (l, m, nmodes)])
    if f_final is None:
        f_final = lm_ffinal(final_mass, final_spin,
                            ['%d%d%d' % (l, m, nmodes)])
    kmax = int(f_final / delta_f) + 1

    outplus = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f)
    outcross = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f)

    f_0, tau = get_lm_f0tau(final_mass, final_spin, l, m, nmodes)
    for n in range(nmodes):
        hplus, hcross = get_fd_qnm(template=None,
                                   f_0=f_0[n],
                                   tau=tau[n],
                                   phi=phis['%d%d%d' % (l, m, n)],
                                   amp=amps['%d%d%d' % (l, m, n)],
                                   delta_f=delta_f,
                                   f_lower=f_lower,
                                   f_final=f_final)
        outplus.data += hplus.data
        outcross.data += hcross.data

    return outplus, outcross
Esempio n. 42
0
def bank_chisq_from_filters(tmplt_snr,
                            tmplt_norm,
                            bank_snrs,
                            bank_norms,
                            tmplt_bank_matches,
                            indices=None):
    """ This function calculates and returns a TimeSeries object containing the
    bank veto calculated over a segment.

    Parameters
    ----------
    tmplt_snr: TimeSeries
        The SNR time series from filtering the segment against the current
        search template
    tmplt_norm: float
        The normalization factor for the search template
    bank_snrs: list of TimeSeries
        The precomputed list of SNR time series between each of the bank veto
        templates and the segment
    bank_norms: list of floats
        The normalization factors for the list of bank veto templates
        (usually this will be the same for all bank veto templates)
    tmplt_bank_matches: list of floats
        The complex overlap between the search template and each
        of the bank templates
    indices: {None, Array}, optional
        Array of indices into the snr time series. If given, the bank chisq
        will only be calculated at these values.

    Returns
    -------
    bank_chisq: TimeSeries of the bank vetos
    """
    if indices is not None:
        tmplt_snr = Array(tmplt_snr, copy=False)
        bank_snrs_tmp = []
        for bank_snr in bank_snrs:
            bank_snrs_tmp.append(bank_snr.take(indices))
        bank_snrs = bank_snrs_tmp

    # Initialise bank_chisq as 0s everywhere
    bank_chisq = zeros(len(tmplt_snr), dtype=real_same_precision_as(tmplt_snr))

    # Loop over all the bank templates
    for i in range(len(bank_snrs)):
        bank_match = tmplt_bank_matches[i]
        if (abs(bank_match) > 0.99):
            # Not much point calculating bank_chisquared if the bank template
            # is very close to the filter template. Can also hit numerical
            # error due to approximations made in this calculation.
            # The value of 2 is the expected addition to the chisq for this
            # template
            bank_chisq += 2.
            continue
        bank_norm = sqrt((1 - bank_match * bank_match.conj()).real)

        bank_SNR = bank_snrs[i] * (bank_norms[i] / bank_norm)
        tmplt_SNR = tmplt_snr * (bank_match.conj() * tmplt_norm / bank_norm)

        bank_SNR = Array(bank_SNR, copy=False)
        tmplt_SNR = Array(tmplt_SNR, copy=False)

        bank_chisq += (bank_SNR - tmplt_SNR).squared_norm()

    if indices is not None:
        return bank_chisq
    else:
        return TimeSeries(bank_chisq,
                          delta_t=tmplt_snr.delta_t,
                          epoch=tmplt_snr.start_time,
                          copy=False)
Esempio n. 43
0
    def __getitem__(self, index):
        approximant = self.approximant(index)
        f_end = self.end_frequency(index)

        # Determine the length of time of the filter, rounded up to
        # nearest power of two
        min_buffer = 1.0 + self.minimum_buffer

        from pycbc.waveform.waveform import props
        buff_size = pycbc.waveform.get_waveform_filter_length_in_time(
            approximant, f_lower=self.f_lower, **props(self.table[index]))
        tlen = nearest_larger_binary_number(
            (buff_size + min_buffer) * self.sample_rate)
        flen = tlen / 2 + 1

        delta_f = self.sample_rate / float(tlen)

        if f_end is None or f_end >= (flen * delta_f):
            f_end = (flen - 1) * delta_f

        logging.info("Generating %s, %ss, %i" %
                     (approximant, 1.0 / delta_f, index))

        # Get the waveform filter
        distance = 1.0 / DYN_RANGE_FAC
        htilde = pycbc.waveform.get_waveform_filter(
            zeros(flen, dtype=numpy.complex64),
            self.table[index],
            approximant=approximant,
            f_lower=self.f_lower,
            f_final=f_end,
            delta_f=delta_f,
            delta_t=1.0 / self.sample_rate,
            distance=distance,
            **self.extra_args)

        # If available, record the total duration (which may
        # include ringdown) and the duration up to merger since they will be
        # erased by the type conversion below.
        # NOTE: If these durations are not available the values in self.table
        #       will continue to take the values in the input file.
        if hasattr(htilde, 'length_in_time'):
            if htilde.length_in_time is not None:
                self.table[index].ttotal = htilde.length_in_time
        if hasattr(htilde, 'chirp_length'):
            if htilde.chirp_length is not None:
                self.table[index].template_duration = htilde.chirp_length

        htilde = htilde.astype(numpy.complex64)
        htilde.f_lower = self.f_lower
        htilde.end_frequency = f_end
        htilde.end_idx = int(htilde.end_frequency / htilde.delta_f)
        htilde.params = self.table[index]
        htilde.approximant = approximant
        htilde.chirp_length = htilde.params.template_duration
        htilde.length_in_time = htilde.params.ttotal

        # Add sigmasq as a method of this instance
        htilde.sigmasq = types.MethodType(sigma_cached, htilde)
        htilde._sigmasq = {}

        return htilde
Esempio n. 44
0
def inverse_spectrum_truncation(psd,
                                max_filter_len,
                                low_frequency_cutoff=None,
                                trunc_method=None):
    """Modify a PSD such that the impulse response associated with its inverse
    square root is no longer than `max_filter_len` time samples. In practice
    this corresponds to a coarse graining or smoothing of the PSD.

    Parameters
    ----------
    psd : FrequencySeries
        PSD whose inverse spectrum is to be truncated.
    max_filter_len : int
        Maximum length of the time-domain filter in samples.
    low_frequency_cutoff : {None, int}
        Frequencies below `low_frequency_cutoff` are zeroed in the output.
    trunc_method : {None, 'hann'}
        Function used for truncating the time-domain filter.
        None produces a hard truncation at `max_filter_len`.

    Returns
    -------
    psd : FrequencySeries
        PSD whose inverse spectrum has been truncated.

    Raises
    ------
    ValueError
        For invalid types or values of `max_filter_len` and `low_frequency_cutoff`.

    Notes
    -----
    See arXiv:gr-qc/0509116 for details.
    """
    # sanity checks
    if type(max_filter_len) is not int or max_filter_len <= 0:
        raise ValueError('max_filter_len must be a positive integer')
    if low_frequency_cutoff is not None and \
            (low_frequency_cutoff < 0 or
             low_frequency_cutoff > psd.sample_frequencies[-1]):
        raise ValueError(
            'low_frequency_cutoff must be within the bandwidth of the PSD')

    N = (len(psd) - 1) * 2

    inv_asd = FrequencySeries(zeros(len(psd)), delta_f=psd.delta_f, \
        dtype=complex_same_precision_as(psd))

    kmin = 1
    if low_frequency_cutoff:
        kmin = int(low_frequency_cutoff / psd.delta_f)

    inv_asd[kmin:N // 2] = (1.0 / psd[kmin:N // 2])**0.5
    q = TimeSeries(numpy.zeros(N), delta_t=(N / psd.delta_f), \
        dtype=real_same_precision_as(psd))
    ifft(inv_asd, q)

    trunc_start = max_filter_len // 2
    trunc_end = N - max_filter_len // 2
    if trunc_end < trunc_start:
        raise ValueError('Invalid value in inverse_spectrum_truncation')

    if trunc_method == 'hann':
        trunc_window = Array(numpy.hanning(max_filter_len), dtype=q.dtype)
        q[0:trunc_start] *= trunc_window[-trunc_start:]
        q[trunc_end:N] *= trunc_window[0:max_filter_len // 2]

    if trunc_start < trunc_end:
        q[trunc_start:trunc_end] = 0
    psd_trunc = FrequencySeries(numpy.zeros(len(psd)), delta_f=psd.delta_f, \
                                dtype=complex_same_precision_as(psd))
    fft(q, psd_trunc)
    psd_trunc *= psd_trunc.conj()
    psd_out = 1. / abs(psd_trunc)

    return psd_out
Esempio n. 45
0
def matched_filter_core(template,
                        data,
                        psd=None,
                        low_frequency_cutoff=None,
                        high_frequency_cutoff=None,
                        h_norm=None,
                        out=None,
                        corr_out=None):
    """ Return the complex snr and normalization. 
    
    Return the complex snr, along with its associated normalization of the template,
    matched filtered against the data. 

    Parameters
    ----------
    template : TimeSeries or FrequencySeries 
        The template waveform
    data : TimeSeries or FrequencySeries 
        The strain data to be filtered.
    psd : {FrequencySeries}, optional
        The noise weighting of the filter.
    low_frequency_cutoff : {None, float}, optional
        The frequency to begin the filter calculation. If None, begin at the
        first frequency after DC.
    high_frequency_cutoff : {None, float}, optional
        The frequency to stop the filter calculation. If None, continue to the 
        the nyquist frequency.
    h_norm : {None, float}, optional
        The template normalization. If none, this value is calculated internally.
    out : {None, Array}, optional
        An array to use as memory for snr storage. If None, memory is allocated 
        internally.
    corr_out : {None, Array}, optional
        An array to use as memory for correlation storage. If None, memory is allocated 
        internally. If provided, management of the vector is handled externally by the
        caller. No zero'ing is done internally. 

    Returns
    -------
    snr : TimeSeries
        A time series containing the complex snr. 
    corrrelation: FrequencySeries
        A frequency series containing the correlation vector. 
    norm : float
        The normalization of the complex snr.  
    """
    if corr_out is not None:
        _qtilde = corr_out
    else:
        global _qtilde_t
        _qtilde = _qtilde_t

    htilde = make_frequency_series(template)
    stilde = make_frequency_series(data)

    if len(htilde) != len(stilde):
        raise ValueError("Length of template and data must match")

    N = (len(stilde) - 1) * 2
    kmin, kmax = get_cutoff_indices(low_frequency_cutoff,
                                    high_frequency_cutoff, stilde.delta_f, N)

    if out is None:
        _q = zeros(N, dtype=complex_same_precision_as(data))
    elif (len(out) == N) and type(out) is Array and out.kind == 'complex':
        _q = out
    else:
        raise TypeError('Invalid Output Vector: wrong length or dtype')

    if corr_out:
        pass
    elif (_qtilde is None) or (len(_qtilde) !=
                               N) or _qtilde.dtype != data.dtype:
        _qtilde_t = _qtilde = zeros(N, dtype=complex_same_precision_as(data))
    else:
        _qtilde.clear()

    correlate(htilde[kmin:kmax], stilde[kmin:kmax], _qtilde[kmin:kmax])

    if psd is not None:
        if isinstance(psd, FrequencySeries):
            if psd.delta_f == stilde.delta_f:
                _qtilde[kmin:kmax] /= psd[kmin:kmax]
            else:
                raise TypeError("PSD delta_f does not match data")
        else:
            raise TypeError("PSD must be a FrequencySeries")

    ifft(_qtilde, _q)

    if h_norm is None:
        h_norm = sigmasq(htilde, psd, low_frequency_cutoff,
                         high_frequency_cutoff)

    norm = (4.0 * stilde.delta_f) / sqrt(h_norm)
    delta_t = 1.0 / (N * stilde.delta_f)

    return (TimeSeries(_q, epoch=stilde._epoch, delta_t=delta_t, copy=False),
            FrequencySeries(_qtilde,
                            epoch=stilde._epoch,
                            delta_f=htilde.delta_f,
                            copy=False), norm)
Esempio n. 46
0
#=================================================================================
# Plot a time domain and fourier domain waveform together in the time domain.
# Note that without special cleanup the Fourier domain waveform will exhibit
# the Gibb's phenomenon. (http://en.wikipedia.org/wiki/Gibbs_phenomenon)

# Code taken from <https://ligo-cbc.github.io/pycbc/latest/html/waveform.html>
#=================================================================================
from pycbc import types, fft, waveform

# Get a time domain waveform
hp, hc = waveform.get_td_waveform(approximant="EOBNRv2",
                             mass1=6, mass2=6, delta_t=1.0/4096, f_lower=40)

# Get a frequency domain waveform
sptilde, sctilde = waveform. get_fd_waveform(approximant="TaylorF2",
                             mass1=6, mass2=6, delta_f=1.0/4, f_lower=40)

# FFT it to the time-domain
tlen = int(1.0 / hp.delta_t / sptilde.delta_f)
sptilde.resize(tlen/2 + 1)
sp = types.TimeSeries(types.zeros(tlen), delta_t=hp.delta_t)
fft.ifft(sptilde, sp)

print 60*'='
print "EOBNRv2", hc[2321]
print "SpinTaylorF2 sptilde", sptilde[2321]
print "SpinTaylorF2 sp", sp[2321]
print 60*'='
Esempio n. 47
0
    def __init__(self,
                 low_frequency_cutoff,
                 high_frequency_cutoff,
                 snr_threshold,
                 tlen,
                 delta_f,
                 dtype,
                 segment_list,
                 template_output,
                 use_cluster,
                 downsample_factor=1,
                 upsample_threshold=1,
                 upsample_method='pruned_fft',
                 gpu_callback_method='none'):
        """ Create a matched filter engine.

        Parameters
        ----------
        low_frequency_cutoff : {None, float}, optional
            The frequency to begin the filter calculation. If None, begin at the
            first frequency after DC.
        high_frequency_cutoff : {None, float}, optional
            The frequency to stop the filter calculation. If None, continue to the
            the nyquist frequency.
        snr_threshold : float
            The minimum snr to return when filtering
        segment_list : list
            List of FrequencySeries that are the Fourier-transformed data segments
        template_output : complex64
            Array of memory given as the 'out' parameter to waveform.FilterBank
        use_cluster : boolean
            If true, cluster triggers above threshold using a window; otherwise,
            only apply a threshold.
        downsample_factor : {1, int}, optional
            The factor by which to reduce the sample rate when doing a heirarchical
            matched filter
        upsample_threshold : {1, float}, optional
            The fraction of the snr_threshold to trigger on the subsampled filter.
        upsample_method : {pruned_fft, str}
            The method to upsample or interpolate the reduced rate filter.
        """
        # Assuming analysis time is constant across templates and segments, also
        # delta_f is constant across segments.
        self.tlen = tlen
        self.flen = self.tlen / 2 + 1
        self.delta_f = delta_f
        self.dtype = dtype
        self.snr_threshold = snr_threshold
        self.flow = low_frequency_cutoff
        self.fhigh = high_frequency_cutoff
        self.gpu_callback_method = gpu_callback_method

        if downsample_factor == 1:
            self.snr_mem = zeros(self.tlen, dtype=self.dtype)
            self.corr_mem = zeros(self.tlen, dtype=self.dtype)
            self.segments = segment_list

            if use_cluster:
                self.matched_filter_and_cluster = self.full_matched_filter_and_cluster
                # setup the threasholding/clustering operations for each segment
                self.threshold_and_clusterers = []
                for seg in self.segments:
                    thresh = events.ThresholdCluster(self.snr_mem[seg.analyze])
                    self.threshold_and_clusterers.append(thresh)
            else:
                self.matched_filter_and_cluster = self.full_matched_filter_thresh_only

            # Assuming analysis time is constant across templates and segments, also
            # delta_f is constant across segments.
            self.htilde = template_output
            self.kmin, self.kmax = get_cutoff_indices(self.flow, self.fhigh,
                                                      self.delta_f, self.tlen)

            # Set up the correlation operations for each analysis segment
            corr_slice = slice(self.kmin, self.kmax)
            self.correlators = []
            for seg in self.segments:
                corr = Correlator(self.htilde[corr_slice], seg[corr_slice],
                                  self.corr_mem[corr_slice])
                self.correlators.append(corr)

            # setup up the ifft we will do
            self.ifft = IFFT(self.corr_mem, self.snr_mem)

        elif downsample_factor >= 1:
            self.matched_filter_and_cluster = self.heirarchical_matched_filter_and_cluster
            self.downsample_factor = downsample_factor
            self.upsample_method = upsample_method
            self.upsample_threshold = upsample_threshold

            N_full = self.tlen
            N_red = N_full / downsample_factor
            self.kmin_full, self.kmax_full = get_cutoff_indices(
                self.flow, self.fhigh, self.delta_f, N_full)

            self.kmin_red, _ = get_cutoff_indices(self.flow, self.fhigh,
                                                  self.delta_f, N_red)

            if self.kmax_full < N_red:
                self.kmax_red = self.kmax_full
            else:
                self.kmax_red = N_red - 1

            self.snr_mem = zeros(N_red, dtype=self.dtype)
            self.corr_mem_full = FrequencySeries(zeros(N_full,
                                                       dtype=self.dtype),
                                                 delta_f=self.delta_f)
            self.corr_mem = Array(self.corr_mem_full[0:N_red], copy=False)
            self.inter_vec = zeros(N_full, dtype=self.dtype)

        else:
            raise ValueError("Invalid downsample factor")
Esempio n. 48
0
    def template_segment_checker(self, bank, t_num, segment, start_time):
        """Test if injections in segment are worth filtering with template.

        Using the current template, current segment, and injections within that
        segment. Test if the injections and sufficiently "similar" to any of
        the injections to justify actually performing a matched-filter call.
        Ther are two parts to this test: First we check if the chirp time of
        the template is within a provided window of any of the injections. If
        not then stop here, it is not worth filtering this template, segment
        combination for this injection set. If this check passes we compute a
        match between a coarse representation of the template and a coarse
        representation of each of the injections. If that match is above a
        user-provided value for any of the injections then filtering can
        proceed. This is currently only available if using frequency-domain
        templates.

        Parameters
        -----------
        FIXME

        Returns
        --------
        FIXME
        """
        if not self.enabled:
            # If disabled, always filter (ie. return True)
            return True

        # Get times covered by segment analyze
        sample_rate = 2. * (len(segment) - 1) * segment.delta_f
        cum_ind = segment.cumulative_index
        diff = segment.analyze.stop - segment.analyze.start
        seg_start_time = cum_ind / sample_rate + start_time
        seg_end_time = (cum_ind + diff) / sample_rate + start_time
        # And add buffer
        seg_start_time = seg_start_time - self.seg_buffer
        seg_end_time = seg_end_time + self.seg_buffer

        # Chirp time test
        if self.chirp_time_window is not None:
            m1 = bank.table[t_num]['mass1']
            m2 = bank.table[t_num]['mass2']
            tau0_temp, _ = mass1_mass2_to_tau0_tau3(m1, m2, self.f_lower)
            for inj in self.injection_params.table:
                end_time = inj.geocent_end_time + \
                    1E-9 * inj.geocent_end_time_ns
                if not(seg_start_time <= end_time <= seg_end_time):
                    continue
                tau0_inj, _ = \
                    mass1_mass2_to_tau0_tau3(inj.mass1, inj.mass2,
                                             self.f_lower)
                tau_diff = abs(tau0_temp - tau0_inj)
                if tau_diff <= self.chirp_time_window:
                    break
            else:
                # Get's here if all injections are outside chirp-time window
                return False

        # Coarse match test
        if self.match_threshold:
            if self._short_template_mem is None:
                # Set the memory for the short templates
                wav_len = 1 + int(self.coarsematch_fmax /
                                  self.coarsematch_deltaf)
                self._short_template_mem = zeros(wav_len, dtype=np.complex64)

            # Set the current short PSD to red_psd
            try:
                red_psd = self._short_psd_storage[id(segment.psd)]
            except KeyError:
                # PSD doesn't exist yet, so make it!
                curr_psd = segment.psd.numpy()
                step_size = int(self.coarsematch_deltaf / segment.psd.delta_f)
                max_idx = int(self.coarsematch_fmax / segment.psd.delta_f) + 1
                red_psd_data = curr_psd[:max_idx:step_size]
                red_psd = FrequencySeries(red_psd_data, copy=False,
                                          delta_f=self.coarsematch_deltaf)
                self._short_psd_storage[id(curr_psd)] = red_psd

            # Set htilde to be the current short template
            if not t_num == self._short_template_id:
                # Set the memory for the short templates if unset
                if self._short_template_mem is None:
                    wav_len = 1 + int(self.coarsematch_fmax /
                                      self.coarsematch_deltaf)
                    self._short_template_mem = zeros(wav_len,
                                                     dtype=np.complex64)
                # Generate short waveform
                htilde = bank.generate_with_delta_f_and_max_freq(
                    t_num, self.coarsematch_fmax, self.coarsematch_deltaf,
                    low_frequency_cutoff=self.f_lower,
                    cached_mem=self._short_template_mem)
                self._short_template_id = t_num
                self._short_template_wav = htilde
            else:
                htilde = self._short_template_wav

            for inj in self.injection_params.table:
                end_time = inj.geocent_end_time + \
                    1E-9 * inj.geocent_end_time_ns
                if not(seg_start_time < end_time < seg_end_time):
                    continue
                curr_inj = self.short_injections[inj.simulation_id]
                o, _ = match(htilde, curr_inj, psd=red_psd,
                             low_frequency_cutoff=self.f_lower)
                if o > self.match_threshold:
                    break
            else:
                # Get's here if all injections are outside match threshold
                return False

        return True
Esempio n. 49
0
def power_chisq_from_precomputed(corr,
                                 snr,
                                 snr_norm,
                                 bins,
                                 indices=None,
                                 return_bins=False):
    """Calculate the chisq timeseries from precomputed values.

    This function calculates the chisq at all times by performing an
    inverse FFT of each bin.

    Parameters
    ----------

    corr: FrequencySeries
        The produce of the template and data in the frequency domain.
    snr: TimeSeries
        The unnormalized snr time series.
    snr_norm:
        The snr normalization factor (true snr = snr * snr_norm) EXPLAINME - define 'true snr'?
    bins: List of integers
        The edges of the chisq bins.
    indices: {Array, None}, optional
        Index values into snr that indicate where to calculate
        chisq values. If none, calculate chisq for all possible indices.
    return_bins: {boolean, False}, optional
        Return a list of the SNRs for each chisq bin.

    Returns
    -------
    chisq: TimeSeries
    """
    # Get workspace memory
    global _q_l, _qtilde_l, _chisq_l

    bin_snrs = []

    if _q_l is None or len(_q_l) != len(snr):
        q = zeros(len(snr), dtype=complex_same_precision_as(snr))
        qtilde = zeros(len(snr), dtype=complex_same_precision_as(snr))
        _q_l = q
        _qtilde_l = qtilde
    else:
        q = _q_l
        qtilde = _qtilde_l

    if indices is not None:
        snr = snr.take(indices)

    if _chisq_l is None or len(_chisq_l) < len(snr):
        chisq = zeros(len(snr), dtype=real_same_precision_as(snr))
        _chisq_l = chisq
    else:
        chisq = _chisq_l[0:len(snr)]
        chisq.clear()

    num_bins = len(bins) - 1

    for j in range(num_bins):
        k_min = int(bins[j])
        k_max = int(bins[j + 1])

        qtilde[k_min:k_max] = corr[k_min:k_max]
        pycbc.fft.ifft(qtilde, q)
        qtilde[k_min:k_max].clear()

        if return_bins:
            bin_snrs.append(
                TimeSeries(q * snr_norm * num_bins**0.5,
                           delta_t=snr.delta_t,
                           epoch=snr.start_time))

        if indices is not None:
            chisq_accum_bin(chisq, q.take(indices))
        else:
            chisq_accum_bin(chisq, q)

    chisq = (chisq * num_bins - snr.squared_norm()) * (snr_norm**2.0)

    if indices is None:
        chisq = TimeSeries(chisq,
                           delta_t=snr.delta_t,
                           epoch=snr.start_time,
                           copy=False)

    if return_bins:
        return chisq, bin_snrs
    else:
        return chisq
Esempio n. 50
0
def plan(size, idtype, odtype, direction, mlvl, aligned, nthreads, inplace):
    if not _fftw_threaded_set:
        set_threads_backend()
    if nthreads != _fftw_current_nthreads:
        _fftw_plan_with_nthreads(nthreads)
    # Convert a measure-level to flags
    flags = get_flag(mlvl, aligned)

    # We make our arrays of the necessary type and size.  Things can be
    # tricky, especially for in-place transforms with one of input or
    # output real.
    if (idtype == odtype):
        # We're in the complex-to-complex case, so lengths are the same
        ip = zeros(size, dtype=idtype)
        if inplace:
            op = ip
        else:
            op = zeros(size, dtype=odtype)
    elif (idtype.kind == 'c') and (odtype.kind == 'f'):
        # Complex-to-real (reverse), so size is length of real array.
        # However the complex array may be larger (in bytes) and
        # should therefore be allocated first and reused for an in-place
        # transform
        ip = zeros(size / 2 + 1, dtype=idtype)
        if inplace:
            op = ip.view(dtype=odtype)[0:size]
        else:
            op = zeros(size, dtype=odtype)
    else:
        # Real-to-complex (forward), and size is still that of real.
        # However it is still true that the complex array may be larger
        # (in bytes) and should therefore be allocated first and reused
        # for an in-place transform
        op = zeros(size / 2 + 1, dtype=odtype)
        if inplace:
            ip = op.view(dtype=idtype)[0:size]
        else:
            ip = zeros(size, dtype=idtype)

    # Get the plan function
    idtype = _np.dtype(idtype)
    odtype = _np.dtype(odtype)
    f = plan_function[str(idtype)][str(odtype)]
    f.restype = ctypes.c_void_p

    # handle the C2C cases (forward and reverse)
    if idtype.kind == odtype.kind:
        f.argtypes = [
            ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int,
            ctypes.c_int
        ]
        theplan = f(size, ip.ptr, op.ptr, direction, flags)
    # handle the R2C and C2R case
    else:
        f.argtypes = [
            ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int
        ]
        theplan = f(size, ip.ptr, op.ptr, flags)

    # We don't need ip or op anymore
    del ip, op

    # Make the destructors
    if idtype.char in ['f', 'F']:
        destroy = float_lib.fftwf_destroy_plan
    else:
        destroy = double_lib.fftw_destroy_plan

    destroy.argtypes = [ctypes.c_void_p]
    return theplan, destroy
Esempio n. 51
0
def get_td_qnm(template=None, **kwargs):
    """Return a time domain damped sinusoid.

    Parameters
    ----------
    template: object
        An object that has attached properties. This can be used to substitute
        for keyword arguments. A common example would be a row in an xml table.
    f_0 : float
        The ringdown-frequency.
    tau : float
        The damping time of the sinusoid.
    phi : float
        The initial phase of the ringdown.
    amp : float
        The amplitude of the ringdown (constant for now).
    delta_t : {None, float}, optional
        The time step used to generate the ringdown.
        If None, it will be set to the inverse of the frequency at which the
        amplitude is 1/1000 of the peak amplitude.
    t_final : {None, float}, optional
        The ending time of the output time series.
        If None, it will be set to the time at which the amplitude is 
        1/1000 of the peak amplitude.

    Returns
    -------
    hplus: TimeSeries
        The plus phase of the ringdown in time domain.
    hcross: TimeSeries
        The cross phase of the ringdown in time domain.
    """

    input_params = props(template, qnm_required_args, **kwargs)

    f_0 = input_params.pop('f_0')
    tau = input_params.pop('tau')
    amp = input_params.pop('amp')
    phi = input_params.pop('phi')
    # the following may not be in input_params
    delta_t = input_params.pop('delta_t', None)
    t_final = input_params.pop('t_final', None)

    if delta_t is None:
        delta_t = 1. / qnm_freq_decay(f_0, tau, 1. / 1000)
        if delta_t < min_dt:
            delta_t = min_dt
    if t_final is None:
        t_final = qnm_time_decay(tau, 1. / 1000)
    kmax = int(t_final / delta_t) + 1

    times = numpy.arange(kmax) * delta_t

    hp = amp * numpy.exp(-times / tau) * numpy.cos(two_pi * f_0 * times + phi)
    hc = amp * numpy.exp(-times / tau) * numpy.sin(two_pi * f_0 * times + phi)

    hplus = TimeSeries(zeros(kmax), delta_t=delta_t)
    hcross = TimeSeries(zeros(kmax), delta_t=delta_t)
    hplus.data[:kmax] = hp
    hcross.data[:kmax] = hc

    return hplus, hcross
Esempio n. 52
0
def spa_tmplt(**kwds):
    """ Generate a minimal TaylorF2 approximant with optimations for the sin/cos
    """
    # Pull out the input arguments
    f_lower = kwds['f_lower']
    delta_f = kwds['delta_f']
    distance = kwds['distance']
    mass1 = kwds['mass1']
    mass2 = kwds['mass2']
    s1z = kwds['spin1z']
    s2z = kwds['spin2z']
    phase_order = int(kwds['phase_order'])
    #amplitude_order = int(kwds['amplitude_order'])
    spin_order = int(kwds['spin_order'])

    if 'out' in kwds:
        out = kwds['out']
    else:
        out = None

    amp_factor = spa_amplitude_factor(mass1=mass1, mass2=mass2) / distance

    lal_pars = lal.CreateDict()
    if phase_order != -1:
        lalsimulation.SimInspiralWaveformParamsInsertPNPhaseOrder(
            lal_pars, phase_order)

    if spin_order != -1:
        lalsimulation.SimInspiralWaveformParamsInsertPNSpinOrder(
            lal_pars, spin_order)

    #Calculate the PN terms
    phasing = lalsimulation.SimInspiralTaylorF2AlignedPhasing(
        float(mass1), float(mass2), float(s1z), float(s2z), lal_pars)

    pfaN = phasing.v[0]
    pfa2 = phasing.v[2] / pfaN
    pfa3 = phasing.v[3] / pfaN
    pfa4 = phasing.v[4] / pfaN
    pfa5 = phasing.v[5] / pfaN
    pfa6 = (phasing.v[6] - phasing.vlogv[6] * log(4)) / pfaN
    pfa7 = phasing.v[7] / pfaN

    pfl5 = phasing.vlogv[5] / pfaN
    pfl6 = phasing.vlogv[6] / pfaN

    piM = lal.PI * (mass1 + mass2) * lal.MTSUN_SI

    kmin = int(f_lower / float(delta_f))

    vISCO = 1. / sqrt(6.)
    fISCO = vISCO * vISCO * vISCO / piM
    kmax = int(fISCO / delta_f)
    f_max = ceilpow2(fISCO)
    n = int(f_max / delta_f) + 1

    if not out:
        htilde = FrequencySeries(zeros(n, dtype=numpy.complex64),
                                 delta_f=delta_f,
                                 copy=False)
    else:
        if type(out) is not Array:
            raise TypeError("Output must be an instance of Array")
        if len(out) < kmax:
            kmax = len(out)
        if out.dtype != complex64:
            raise TypeError("Output array is the wrong dtype")
        htilde = FrequencySeries(out, delta_f=delta_f, copy=False)

    spa_tmplt_engine(htilde[kmin:kmax], kmin, phase_order, delta_f, piM, pfaN,
                     pfa2, pfa3, pfa4, pfa5, pfl5, pfa6, pfl6, pfa7,
                     amp_factor)
    return htilde
Esempio n. 53
0
    def __getitem__(self, index):
        if isinstance(index, slice):
            return self.getslice(index)

        approximant = self.approximant(index)
        f_end = self.end_frequency(index)
        flow = self.table[index].f_lower

        # Determine the length of time of the filter, rounded up to
        # nearest power of two
        min_buffer = .5 + self.minimum_buffer

        from pycbc.waveform.waveform import props
        p = props(self.table[index])
        p.pop('approximant')
        buff_size = pycbc.waveform.get_waveform_filter_length_in_time(
            approximant, **p)

        tlen = self.round_up((buff_size + min_buffer) * self.sample_rate)
        flen = int(tlen / 2 + 1)

        delta_f = self.sample_rate / float(tlen)

        if f_end is None or f_end >= (flen * delta_f):
            f_end = (flen - 1) * delta_f

        logging.info("Generating %s, %ss, %i, starting from %s Hz",
                     approximant, 1.0 / delta_f, index, flow)

        # Get the waveform filter
        distance = 1.0 / DYN_RANGE_FAC
        htilde = pycbc.waveform.get_waveform_filter(zeros(flen,
                                                          dtype=np.complex64),
                                                    self.table[index],
                                                    approximant=approximant,
                                                    f_lower=flow,
                                                    f_final=f_end,
                                                    delta_f=delta_f,
                                                    delta_t=1.0 /
                                                    self.sample_rate,
                                                    distance=distance,
                                                    **self.extra_args)

        # If available, record the total duration (which may
        # include ringdown) and the duration up to merger since they will be
        # erased by the type conversion below.
        ttotal = template_duration = -1
        if hasattr(htilde, 'length_in_time'):
            ttotal = htilde.length_in_time
        if hasattr(htilde, 'chirp_length'):
            template_duration = htilde.chirp_length

        self.table[index].template_duration = template_duration

        htilde = htilde.astype(np.complex64)
        htilde.f_lower = flow
        htilde.min_f_lower = self.min_f_lower
        htilde.end_idx = int(f_end / htilde.delta_f)
        htilde.params = self.table[index]
        htilde.chirp_length = template_duration
        htilde.length_in_time = ttotal
        htilde.approximant = approximant
        htilde.end_frequency = f_end

        # Add sigmasq as a method of this instance
        htilde.sigmasq = types.MethodType(sigma_cached, htilde)
        htilde._sigmasq = {}

        htilde.id = self.id_from_hash(
            hash((htilde.params.mass1, htilde.params.mass2,
                  htilde.params.spin1z, htilde.params.spin2z)))
        return htilde
Esempio n. 54
0
def calc_psd_variation(strain, psd_short_segment, psd_long_segment,
                       short_psd_duration, short_psd_stride, psd_avg_method,
                       low_freq, high_freq):
    """Calculates time series of PSD variability

    This function first splits the segment up into 512 second chunks. It
    then calculates the PSD over this 512 second period as well as in 4
    second chunks throughout each 512 second period. Next the function
    estimates how different the 4 second PSD is to the 512 second PSD and
    produces a timeseries of this variability.

    Parameters
    ----------
    strain : TimeSeries
        Input strain time series to estimate PSDs
    psd_short_segment : {float, 8}
        Duration of the short segments for PSD estimation in seconds.
    psd_long_segment : {float, 512}
        Duration of the long segments for PSD estimation in seconds.
    short_psd_duration : {float, 4}
        Duration of the segments for PSD estimation in seconds.
    short_psd_stride : {float, 2}
        Separation between PSD estimation segments in seconds.
    psd_avg_method : {string, 'median'}
        Method for averaging PSD estimation segments.
    low_freq : {float, 20}
        Minimum frequency to consider the comparison between PSDs.
    high_freq : {float, 480}
        Maximum frequency to consider the comparison between PSDs.

    Returns
    -------
    psd_var : TimeSeries
        Time series of the variability in the PSD estimation
    """

    # Calculate strain precision
    if strain.precision == 'single':
        fs_dtype = numpy.float32
    elif strain.precision == 'double':
        fs_dtype = numpy.float64

    # Convert start and end times immediately to floats
    start_time = numpy.float(strain.start_time)
    end_time = numpy.float(strain.end_time)

    # Find the times of the long segments
    times_long = numpy.arange(start_time, end_time, psd_long_segment)

    # Set up the empty time series for the PSD variation estimate
    psd_var = TimeSeries(zeros(
        int(numpy.ceil((end_time - start_time) / psd_short_segment))),
                         delta_t=psd_short_segment,
                         copy=False,
                         epoch=start_time)

    ind = 0
    for tlong in times_long:
        # Calculate PSD for long segment and separate the long segment in to
        # overlapping shorter segments
        if tlong + psd_long_segment <= end_time:
            psd_long = pycbc.psd.welch(
                strain.time_slice(tlong, tlong + psd_long_segment),
                seg_len=int(short_psd_duration * strain.sample_rate),
                seg_stride=int(short_psd_stride * strain.sample_rate),
                avg_method=psd_avg_method)
            times_short = numpy.arange(tlong, tlong + psd_long_segment,
                                       psd_short_segment)
        else:
            psd_long = pycbc.psd.welch(
                strain.time_slice(end_time - psd_long_segment, end_time),
                seg_len=int(short_psd_duration * strain.sample_rate),
                seg_stride=int(short_psd_stride * strain.sample_rate),
                avg_method=psd_avg_method)
            times_short = numpy.arange(tlong, end_time, psd_short_segment)

        # Calculate the PSD of the shorter segments
        psd_short = []
        for tshort in times_short:
            if tshort + psd_short_segment <= end_time:
                pshort = pycbc.psd.welch(
                    strain.time_slice(tshort, tshort + psd_short_segment),
                    seg_len=int(short_psd_duration * strain.sample_rate),
                    seg_stride=int(short_psd_stride * strain.sample_rate),
                    avg_method=psd_avg_method)
            else:
                pshort = pycbc.psd.welch(
                    strain.time_slice(tshort - psd_short_segment, end_time),
                    seg_len=int(short_psd_duration * strain.sample_rate),
                    seg_stride=int(short_psd_stride * strain.sample_rate),
                    avg_method=psd_avg_method)
            psd_short.append(pshort)

        # Estimate the range of the PSD to compare
        kmin = int(low_freq / psd_long.delta_f)
        kmax = int(high_freq / psd_long.delta_f)
        # Compare the PSD of the short segment to the long segment
        # The weight factor gives the rough response of a cbc template across
        # the defined frequency range given the expected PSD (i.e. long PSD)
        # Then integrate the weighted ratio of the actual PSD (i.e. short PSD)
        # with the expected PSD (i.e. long PSD) over the specified frequency
        # range
        freqs = FrequencySeries(psd_long.sample_frequencies,
                                delta_f=psd_long.delta_f,
                                epoch=psd_long.epoch,
                                dtype=fs_dtype)
        weight = numpy.array(freqs[kmin:kmax]**(-7. / 3.) /
                             psd_long[kmin:kmax])
        weight /= weight.sum()
        diff = numpy.array([
            (weight *
             numpy.array(p_short[kmin:kmax] / psd_long[kmin:kmax])).sum()
            for p_short in psd_short
        ])

        # Store variation value
        for i, val in enumerate(diff):
            psd_var[ind + i] = val

        ind = ind + len(diff)
    return psd_var
Esempio n. 55
0
def get_fd_from_final_mass_spin(template=None, **kwargs):
    """Return frequency domain ringdown with all the modes specified.

    Parameters
    ----------
    template: object
        An object that has attached properties. This can be used to substitute
        for keyword arguments. A common example would be a row in an xml table.
    final_mass : float
        Mass of the final black hole.
    final_spin : float
        Spin of the final black hole.
    lmns : list
        Desired lmn modes as strings (lm modes available: 22, 21, 33, 44, 55).
        The n specifies the number of overtones desired for the corresponding
        lm pair (maximum n=8).
        Example: lmns = ['223','331'] are the modes 220, 221, 222, and 330
    amp220 : float
        Amplitude of the fundamental 220 mode.
    amplmn : float
        Fraction of the amplitude of the lmn overtone relative to the 
        fundamental mode, as many as the number of subdominant modes.
    philmn : float
        Phase of the lmn overtone, as many as the number of modes. Should also
        include the information from the azimuthal angle (phi + m*Phi).
    inclination : {0., float}, optional
        Inclination of the system in radians. Default is 0 (face on).
    delta_f : {None, float}, optional
        The frequency step used to generate the ringdown.
        If None, it will be set to the inverse of the time at which the
        amplitude is 1/1000 of the peak amplitude (the minimum of all modes).
    f_lower: {None, float}, optional
        The starting frequency of the output frequency series.
        If None, it will be set to delta_f.
    f_final : {None, float}, optional
        The ending frequency of the output frequency series.
        If None, it will be set to the frequency at which the amplitude
        is 1/1000 of the peak amplitude (the maximum of all modes).

    Returns
    -------
    hplustilde: FrequencySeries
        The plus phase of a ringdown with the lm modes specified and
        n overtones in frequency domain.
    hcrosstilde: FrequencySeries
        The cross phase of a ringdown with the lm modes specified and
        n overtones in frequency domain.
    """

    input_params = props(template, mass_spin_required_args, **kwargs)

    # Get required args
    final_mass = input_params['final_mass']
    final_spin = input_params['final_spin']
    inc = input_params.pop('inclination', 0.)
    lmns = input_params['lmns']
    for lmn in lmns:
        if int(lmn[2]) == 0:
            raise ValueError('Number of overtones (nmodes) must be greater '
                             'than zero.')
    # The following may not be in input_params
    delta_f = input_params.pop('delta_f', None)
    f_lower = input_params.pop('f_lower', None)
    f_final = input_params.pop('f_final', None)

    f_0, tau = get_lm_f0tau_allmodes(final_mass, final_spin, lmns)

    if delta_f is None:
        delta_f = lm_deltaf(tau, lmns)
    if f_final is None:
        f_final = lm_ffinal(f_0, tau, lmns)
    if f_lower is None:
        f_lower = delta_f
    kmax = int(f_final / delta_f) + 1

    outplustilde = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f)
    outcrosstilde = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f)
    for lmn in lmns:
        l, m, nmodes = int(lmn[0]), int(lmn[1]), int(lmn[2])
        hplustilde, hcrosstilde = get_fd_lm(freqs=f_0, taus=tau,
                                        inclination=inc, l=l, m=m, nmodes=nmodes,
                                        delta_f=delta_f, f_lower=f_lower,
                                        f_final=f_final, **input_params)
        outplustilde.data += hplustilde.data
        outcrosstilde.data += hcrosstilde.data

    return outplustilde, outcrosstilde
Esempio n. 56
0
def calc_filt_psd_variation(strain, segment, short_segment, psd_long_segment,
                            psd_duration, psd_stride, psd_avg_method, low_freq,
                            high_freq):
    """ Calculates time series of PSD variability

    This function first splits the segment up into 512 second chunks. It
    then calculates the PSD over this 512 second. The PSD is used to
    to create a filter that is the composition of three filters:
    1. Bandpass filter between f_low and f_high.
    2. Weighting filter which gives the rough response of a CBC template.
    3. Whitening filter.
    Next it makes the convolution of this filter with the stretch of data.
    This new time series is given to the "mean_square" function, which
    computes the mean square of the timeseries within an 8 seconds window,
    once per second.
    The result, which is the variance of the S/N in that stride for the
    Parseval theorem, is then stored in a timeseries.

    Parameters
    ----------
    strain : TimeSeries
        Input strain time series to estimate PSDs
    segment : {float, 8}
        Duration of the segments for the mean square estimation in seconds.
    short_segment : {float, 0.25}
        Duration of the short segments for the outliers removal.
    psd_long_segment : {float, 512}
        Duration of the long segments for PSD estimation in seconds.
    psd_duration : {float, 8}
        Duration of FFT segments for long term PSD estimation, in seconds.
    psd_stride : {float, 4}
        Separation between FFT segments for long term PSD estimation, in
        seconds.
    psd_avg_method : {string, 'median'}
        Method for averaging PSD estimation segments.
    low_freq : {float, 20}
        Minimum frequency to consider the comparison between PSDs.
    high_freq : {float, 480}
        Maximum frequency to consider the comparison between PSDs.

    Returns
    -------
    psd_var : TimeSeries
        Time series of the variability in the PSD estimation
    """
    # Calculate strain precision
    if strain.precision == 'single':
        fs_dtype = numpy.float32
    elif strain.precision == 'double':
        fs_dtype = numpy.float64

    # Convert start and end times immediately to floats
    start_time = numpy.float(strain.start_time)
    end_time = numpy.float(strain.end_time)

    # Resample the data
    strain = resample_to_delta_t(strain, 1.0 / 2048)
    srate = int(strain.sample_rate)

    # Fix the step for the PSD estimation and the time to remove at the
    # edge of the time series.
    step = 1.0
    strain_crop = 8.0

    # Find the times of the long segments
    times_long = numpy.arange(
        start_time, end_time,
        psd_long_segment - 2 * strain_crop - segment + step)

    # Set up the empty time series for the PSD variation estimate
    ts_duration = end_time - start_time - 2 * strain_crop - segment + 1
    psd_var = TimeSeries(zeros(int(numpy.floor(ts_duration / step))),
                         delta_t=step,
                         copy=False,
                         epoch=start_time + strain_crop + segment)

    # Create a bandpass filter between low_freq and high_freq
    filt = sig.firwin(4 * srate, [low_freq, high_freq],
                      pass_zero=False,
                      window='hann',
                      nyq=srate / 2)
    filt.resize(int(psd_duration * srate))
    # Fourier transform the filter and take the absolute value to get
    # rid of the phase. Save the filter as a frequency series.
    filt = abs(rfft(filt))
    my_filter = FrequencySeries(filt,
                                delta_f=1. / psd_duration,
                                dtype=fs_dtype)

    ind = 0
    for tlong in times_long:
        # Calculate PSD for long segment
        if tlong + psd_long_segment <= float(end_time):
            astrain = strain.time_slice(tlong, tlong + psd_long_segment)
            plong = pycbc.psd.welch(
                astrain,
                seg_len=int(psd_duration * strain.sample_rate),
                seg_stride=int(psd_stride * strain.sample_rate),
                avg_method=psd_avg_method)
        else:
            astrain = strain.time_slice(tlong, end_time)
            plong = pycbc.psd.welch(
                strain.time_slice(end_time - psd_long_segment, end_time),
                seg_len=int(psd_duration * strain.sample_rate),
                seg_stride=int(psd_stride * strain.sample_rate),
                avg_method=psd_avg_method)

        # Make the weighting filter - bandpass, which weight by f^-7/6,
        # and whiten. The normalization is chosen so that the variance
        # will be one if this filter is applied to white noise which
        # already has a variance of one.
        freqs = FrequencySeries(plong.sample_frequencies,
                                delta_f=plong.delta_f,
                                epoch=plong.epoch,
                                dtype=fs_dtype)
        fweight = freqs**(-7. / 6.) * my_filter / numpy.sqrt(plong)
        fweight[0] = 0.
        norm = (sum(abs(fweight)**2) / (len(fweight) - 1.))**-0.5
        fweight = norm * fweight
        fwhiten = numpy.sqrt(2. / srate) / numpy.sqrt(plong)
        fwhiten[0] = 0.
        full_filt = sig.hann(int(psd_duration * srate)) * numpy.roll(
            irfft(fwhiten * fweight),
            int(psd_duration / 2) * srate)
        # Convolve the filter with long segment of data
        wstrain = TimeSeries(sig.fftconvolve(astrain, full_filt, mode='same'),
                             delta_t=strain.delta_t,
                             epoch=astrain.start_time)
        wstrain = wstrain[int(strain_crop * srate):-int(strain_crop * srate)]
        # compute the mean square of the chunk of data
        delta_t = wstrain.end_time.gpsSeconds - wstrain.start_time.gpsSeconds
        variation = mean_square(wstrain, delta_t, short_segment, segment)

        # Store variation value
        for i, val in enumerate(variation):
            psd_var[ind + i] = val

        ind = ind + len(variation)
    return psd_var
Esempio n. 57
0
def get_td_lm(template=None, taper=None, **kwargs):
    """Return time domain lm mode with the given number of overtones.

    Parameters
    ----------
    template: object
        An object that has attached properties. This can be used to substitute
        for keyword arguments. A common example would be a row in an xml table.
    taper: {None, float}, optional
        Tapering at the beginning of the waveform with duration taper * tau.
        This option is recommended with timescales taper=1./2 or 1. for
        time-domain ringdown-only injections.
        The abrupt turn on of the ringdown can cause issues on the waveform
        when doing the fourier transform to the frequency domain. Setting
        taper will add a rapid ringup with timescale tau/10.
        Each overtone will have a different taper depending on its tau, the
        final taper being the superposition of all the tapers.
    freqs : dict
        {lmn:f_lmn} Dictionary of the central frequencies for each overtone,
        as many as number of modes. 
    taus : dict
        {lmn:tau_lmn} Dictionary of the damping times for each overtone,
        as many as number of modes.
    l : int
        l mode (lm modes available: 22, 21, 33, 44, 55).
    m : int
        m mode (lm modes available: 22, 21, 33, 44, 55).
    nmodes: int
        Number of overtones desired (maximum n=8)
    amp220 : float
        Amplitude of the fundamental 220 mode, needed for any lm.
    amplmn : float
        Fraction of the amplitude of the lmn overtone relative to the 
        fundamental mode, as many as the number of subdominant modes.
    philmn : float
        Phase of the lmn overtone, as many as the number of modes. Should also
        include the information from the azimuthal angle (phi + m*Phi).
    inclination : {0., float}, optional
        Inclination of the system in radians. Default is 0 (face on).
    delta_t : {None, float}, optional
        The time step used to generate the ringdown.
        If None, it will be set to the inverse of the frequency at which the
        amplitude is 1/1000 of the peak amplitude (the minimum of all modes).
    t_final : {None, float}, optional
        The ending time of the output time series.
        If None, it will be set to the time at which the amplitude is 
        1/1000 of the peak amplitude (the maximum of all modes).

    Returns
    -------
    hplus: TimeSeries
        The plus phase of a lm mode with overtones (n) in time domain.
    hcross: TimeSeries
        The cross phase of a lm mode with overtones (n) in time domain.
    """

    input_params = props(template, lm_required_args, **kwargs)

    # Get required args
    amps, phis = lm_amps_phases(**input_params)
    f_0 = input_params.pop('freqs')
    tau = input_params.pop('taus')
    inc = input_params.pop('inclination', 0.)
    l, m = input_params.pop('l'), input_params.pop('m')
    nmodes = input_params.pop('nmodes')
    if int(nmodes) == 0:
        raise ValueError('Number of overtones (nmodes) must be greater '
                         'than zero.')
    # The following may not be in input_params
    delta_t = input_params.pop('delta_t', None)
    t_final = input_params.pop('t_final', None)

    if delta_t is None:
        delta_t = lm_deltat(f_0, tau, ['%d%d%d' %(l,m,nmodes)]) 
    if t_final is None:
        t_final = lm_tfinal(tau, ['%d%d%d' %(l, m, nmodes)])

    kmax = int(t_final / delta_t) + 1
    # Different overtones will have different tapering window-size
    # Find maximum window size to create long enough output vector
    if taper is not None:
        taper_window = int(taper*max(tau.values())/delta_t)
        kmax += taper_window

    outplus = TimeSeries(zeros(kmax, dtype=float64), delta_t=delta_t)
    outcross = TimeSeries(zeros(kmax, dtype=float64), delta_t=delta_t)
    if taper is not None:
        start = - taper * max(tau.values())
        outplus._epoch, outcross._epoch = start, start

    for n in range(nmodes):
        hplus, hcross = get_td_qnm(template=None, taper=taper,
                            f_0=f_0['%d%d%d' %(l,m,n)],
                            tau=tau['%d%d%d' %(l,m,n)],
                            phi=phis['%d%d%d' %(l,m,n)],
                            amp=amps['%d%d%d' %(l,m,n)],
                            inclination=inc, l=l, m=m,
                            delta_t=delta_t, t_final=t_final)
        if taper is None:
            outplus.data += hplus.data
            outcross.data += hcross.data
        else:
            outplus = taper_shift(hplus, outplus)
            outcross = taper_shift(hcross, outcross)

    return outplus, outcross
Esempio n. 58
0
    def heirarchical_matched_filter_and_cluster(self, htilde, template_norm,
                                                stilde, window):
        """ Return the complex snr and normalization. 
    
        Calculated the matched filter, threshold, and cluster. 

        Parameters
        ----------
        htilde : FrequencySeries 
            The template waveform. Must come from the FilterBank class.
        template_norm : float
            The htilde, template normalization factor.
        stilde : FrequencySeries 
            The strain data to be filtered.
        window : int
            The size of the cluster window in samples.

        Returns
        -------
        snr : TimeSeries
            A time series containing the complex snr at the reduced sample rate.
        norm : float
            The normalization of the complex snr.  
        corrrelation: FrequencySeries
            A frequency series containing the correlation vector. 
        idx : Array
            List of indices of the triggers.
        snrv : Array
            The snr values at the trigger locations.
        """
        from pycbc.fft.fftw_pruned import pruned_c2cifft, fft_transpose

        norm = (4.0 * stilde.delta_f) / sqrt(template_norm)

        correlate(htilde[self.kmin_red:self.kmax_red],
                  stilde[self.kmin_red:self.kmax_red],
                  self.corr_mem[self.kmin_red:self.kmax_red])

        ifft(self.corr_mem, self.snr_mem)

        if not hasattr(stilde, 'red_analyze'):
            stilde.red_analyze = \
                             slice(stilde.analyze.start/self.downsample_factor,
                                   stilde.analyze.stop/self.downsample_factor)

        idx_red, snrv_red = events.threshold(
            self.snr_mem[stilde.red_analyze],
            self.snr_threshold / norm * self.upsample_threshold)
        if len(idx_red) == 0:
            return [], None, [], [], []

        idx_red, _ = events.cluster_reduce(idx_red, snrv_red,
                                           window / self.downsample_factor)
        logging.info("%s points above threshold at reduced resolution"\
                      %(str(len(idx_red)),))

        # The fancy upsampling is here
        if self.upsample_method == 'pruned_fft':
            idx = (idx_red + stilde.analyze.start/self.downsample_factor)\
                   * self.downsample_factor

            idx = smear(idx, self.downsample_factor)

            # cache transposed  versions of htilde and stilde
            if not hasattr(self.corr_mem_full, 'transposed'):
                self.corr_mem_full.transposed = zeros(len(self.corr_mem_full),
                                                      dtype=self.dtype)

            if not hasattr(htilde, 'transposed'):
                htilde.transposed = zeros(len(self.corr_mem_full),
                                          dtype=self.dtype)
                htilde.transposed[self.kmin_full:self.kmax_full] = htilde[
                    self.kmin_full:self.kmax_full]
                htilde.transposed = fft_transpose(htilde.transposed)

            if not hasattr(stilde, 'transposed'):
                stilde.transposed = zeros(len(self.corr_mem_full),
                                          dtype=self.dtype)
                stilde.transposed[self.kmin_full:self.kmax_full] = stilde[
                    self.kmin_full:self.kmax_full]
                stilde.transposed = fft_transpose(stilde.transposed)

            correlate(htilde.transposed, stilde.transposed,
                      self.corr_mem_full.transposed)
            snrv = pruned_c2cifft(self.corr_mem_full.transposed,
                                  self.inter_vec,
                                  idx,
                                  pretransposed=True)
            idx = idx - stilde.analyze.start
            idx2, snrv = events.threshold(Array(snrv, copy=False),
                                          self.snr_threshold / norm)

            if len(idx2) > 0:
                correlate(htilde[self.kmax_red:self.kmax_full],
                          stilde[self.kmax_red:self.kmax_full],
                          self.corr_mem_full[self.kmax_red:self.kmax_full])
                idx, snrv = events.cluster_reduce(idx[idx2], snrv, window)
            else:
                idx, snrv = [], []

            logging.info("%s points at full rate and clustering" % len(idx))
            return self.snr_mem, norm, self.corr_mem_full, idx, snrv
        else:
            raise ValueError("Invalid upsample method")
Esempio n. 59
0
def calculate_acf(data, delta_t=1.0, unbiased=False):
    r"""Calculates the one-sided autocorrelation function.

    Calculates the autocorrelation function (ACF) and returns the one-sided
    ACF. The ACF is defined as the autocovariance divided by the variance. The
    ACF can be estimated using

    .. math::

        \hat{R}(k) = \frac{1}{n \sigma^{2}} \sum_{t=1}^{n-k} \left( X_{t} - \mu \right) \left( X_{t+k} - \mu \right) 

    Where :math:`\hat{R}(k)` is the ACF, :math:`X_{t}` is the data series at
    time t, :math:`\mu` is the mean of :math:`X_{t}`, and :math:`\sigma^{2}` is
    the variance of :math:`X_{t}`.

    Parameters
    -----------
    data : TimeSeries or numpy.array
        A TimeSeries or numpy.array of data.
    delta_t : float
        The time step of the data series if it is not a TimeSeries instance.
    unbiased : bool
        If True the normalization of the autocovariance function is n-k
        instead of n. This is called the unbiased estimation of the
        autocovariance. Note that this does not mean the ACF is unbiased.

    Returns
    -------
    acf : numpy.array
        If data is a TimeSeries then acf will be a TimeSeries of the
        one-sided ACF. Else acf is a numpy.array.
    """

    # if given a TimeSeries instance then get numpy.array
    if isinstance(data, TimeSeries):
        y = data.numpy()
        delta_t = data.delta_t
    else:
        y = data

    # Zero mean
    y = y - y.mean()
    ny_orig = len(y)

    npad = 1
    while npad < 2*ny_orig:
        npad = npad << 1
    ypad = numpy.zeros(npad)
    ypad[:ny_orig] = y    
        
    # FFT data minus the mean
    fdata = TimeSeries(ypad, delta_t=delta_t).to_frequencyseries()

    # correlate
    # do not need to give the congjugate since correlate function does it
    cdata = FrequencySeries(zeros(len(fdata), dtype=numpy.complex64),
                           delta_f=fdata.delta_f, copy=False)
    correlate(fdata, fdata, cdata)

    # IFFT correlated data to get unnormalized autocovariance time series
    acf = cdata.to_timeseries()
    acf = acf[:ny_orig]

    # normalize the autocovariance
    # note that dividing by acf[0] is the same as ( y.var() * len(acf) )
    if unbiased:
        acf /= ( y.var() * numpy.arange(len(acf), 0, -1) )
    else:
        acf /= acf[0]

    # return input datatype
    if isinstance(data, TimeSeries):
        return TimeSeries(acf, delta_t=delta_t)
    else:
        return acf
Esempio n. 60
0
    def __getitem__(self, index):
        # Make new memory for templates if we aren't given output memory
        if self.out is None:
            tempout = zeros(self.filter_length, dtype=self.dtype)
        else:
            tempout = self.out

        approximant = self.approximant(index)
        f_end = self.end_frequency(index)
        if f_end is None or f_end >= (self.filter_length * self.delta_f):
            f_end = (self.filter_length - 1) * self.delta_f

        # Find the start frequency, if variable
        if self.max_template_length is not None:
            f_low = find_variable_start_frequency(approximant,
                                                  self.table[index],
                                                  self.f_lower,
                                                  self.max_template_length)
        else:
            f_low = self.f_lower

        logging.info('%s: generating %s from %s Hz' %
                     (index, approximant, f_low))

        # Clear the storage memory
        poke = tempout.data
        tempout.clear()

        # Get the waveform filter
        distance = 1.0 / DYN_RANGE_FAC
        htilde = pycbc.waveform.get_waveform_filter(
            tempout[0:self.filter_length],
            self.table[index],
            approximant=approximant,
            f_lower=f_low,
            f_final=f_end,
            delta_f=self.delta_f,
            delta_t=self.delta_t,
            distance=distance,
            **self.extra_args)

        # If available, record the total duration (which may
        # include ringdown) and the duration up to merger since they will be
        # erased by the type conversion below.
        # NOTE: If these durations are not available the values in self.table
        #       will continue to take the values in the input file.
        if hasattr(htilde, 'length_in_time'):
            if htilde.length_in_time is not None:
                self.table[index].ttotal = htilde.length_in_time
        if hasattr(htilde, 'chirp_length'):
            if htilde.chirp_length is not None:
                self.table[index].template_duration = htilde.chirp_length

        htilde = htilde.astype(self.dtype)
        htilde.f_lower = f_low
        htilde.end_frequency = f_end
        htilde.end_idx = int(htilde.end_frequency / htilde.delta_f)
        htilde.params = self.table[index]
        htilde.approximant = approximant
        htilde.chirp_length = htilde.params.template_duration
        htilde.length_in_time = htilde.params.ttotal

        # Add sigmasq as a method of this instance
        htilde.sigmasq = types.MethodType(sigma_cached, htilde)
        htilde._sigmasq = {}

        return htilde