Exemple #1
0
def qtransform(fseries, Q, f0):
    """Calculate the energy 'TimeSeries' for the given fseries

    Parameters
    ----------
    fseries: 'pycbc FrequencySeries'
        frequency-series data set
    Q:
        q value
    f0:
        central frequency

    Returns
    -------
    norm_energy: '~pycbc.types.aligned.ArrayWithAligned'
        A 'TimeSeries' of the normalized energy from the Q-transform of
        this tile against the data.
    cenergy: '~pycbc.types.aligned.ArrayWithAligned'
        A 'TimeSeries' of the complex energy from the Q-transform of 
        this tile against the data.
    """
    # q-transform data for each (Q, frequency) tile
    # initialize parameters
    qprime = Q / 11**(1 / 2.)  # ... self.qprime
    dur = 1.0 / fseries.delta_f

    # check for sampling rate
    sampling = (len(fseries) - 1) * 2 * fseries.delta_f

    # choice of output sampling rate
    output_sampling = sampling  # Can lower this to highest bandwidth
    output_samples = int(dur * output_sampling)

    # window fft
    window_size = 2 * int(f0 / qprime * dur) + 1

    # get start and end indices
    start = int((f0 - (f0 / qprime)) * dur)
    end = int(start + window_size)

    # apply window to fft
    # normalize and generate bi-square window
    norm = np.sqrt(315. * qprime / (128. * f0))
    windowed = fseries[start:end].numpy() * (bisquare(window_size) * norm)

    # pad data, move negative frequencies to the end, and IFFT
    padded = np.pad(windowed,
                    padding(window_size, output_samples),
                    mode='constant')
    wenergy = npfft.ifftshift(padded)

    # return a 'TimeSeries'
    wenergy = FrequencySeries(wenergy, delta_f=1. / dur)
    cenergy = TimeSeries(zeros(output_samples, dtype=np.complex128),
                         delta_t=1. / sampling)
    ifft(wenergy, cenergy)
    energy = cenergy.squared_norm()
    medianenergy = np.median(energy.numpy())
    norm_energy = energy / float(medianenergy)
    return norm_energy, cenergy
Exemple #2
0
def bandlimited_interpolate(series, delta_f):
    """Return a new PSD that has been interpolated to the desired delta_f.

    Parameters
    ----------
    series : FrequencySeries
        Frequency series to be interpolated.
    delta_f : float
        The desired delta_f of the output

    Returns
    -------
    interpolated series : FrequencySeries
        A new FrequencySeries that has been interpolated.
    """
    series = FrequencySeries(series, dtype=complex_same_precision_as(series), delta_f=series.delta_f)

    N = (len(series) - 1) * 2
    delta_t = 1.0 / series.delta_f / N

    new_N = int(1.0 / (delta_t * delta_f))
    new_n = new_N / 2 + 1

    series_in_time = TimeSeries(zeros(N), dtype=real_same_precision_as(series), delta_t=delta_t)
    ifft(series, series_in_time)

    padded_series_in_time = TimeSeries(zeros(new_N), dtype=series_in_time.dtype, delta_t=delta_t)
    padded_series_in_time[0:N/2] = series_in_time[0:N/2]
    padded_series_in_time[new_N-N/2:new_N] = series_in_time[N/2:N]

    interpolated_series = FrequencySeries(zeros(new_n), dtype=series.dtype, delta_f=delta_f)
    fft(padded_series_in_time, interpolated_series)

    return interpolated_series
Exemple #3
0
def lfilter(coefficients, timeseries):
    """ Apply filter coefficients to a time series

    Parameters
    ----------
    coefficients: numpy.ndarray
        Filter coefficients to apply
    timeseries: numpy.ndarray
        Time series to be filtered.

    Returns
    -------
    tseries: numpy.ndarray
        filtered array
    """
    from pycbc.filter import correlate
    fillen = len(coefficients)

    # If there aren't many points just use the default scipy method
    if len(timeseries) < 2**7:
        series = scipy.signal.lfilter(coefficients, 1.0, timeseries)
        return TimeSeries(series,
                          epoch=timeseries.start_time,
                          delta_t=timeseries.delta_t)
    elif (len(timeseries) < fillen * 10) or (len(timeseries) < 2**18):
        cseries = (Array(coefficients[::-1] * 1)).astype(timeseries.dtype)
        cseries.resize(len(timeseries))
        cseries.roll(len(timeseries) - fillen + 1)

        flen = len(cseries) // 2 + 1
        ftype = complex_same_precision_as(timeseries)

        cfreq = zeros(flen, dtype=ftype)
        tfreq = zeros(flen, dtype=ftype)

        fft(Array(cseries), cfreq)
        fft(Array(timeseries), tfreq)

        cout = zeros(flen, ftype)
        out = zeros(len(timeseries), dtype=timeseries)

        correlate(cfreq, tfreq, cout)
        ifft(cout, out)

        return TimeSeries(out.numpy() / len(out),
                          epoch=timeseries.start_time,
                          delta_t=timeseries.delta_t)
    else:
        # recursively perform which saves a bit on memory usage
        # but must keep within recursion limit
        chunksize = max(fillen * 5, len(timeseries) // 128)
        part1 = lfilter(coefficients, timeseries[0:chunksize])
        part2 = lfilter(coefficients, timeseries[chunksize - fillen:])
        out = timeseries.copy()
        out[:len(part1)] = part1
        out[len(part1):] = part2[fillen:]
        return out
Exemple #4
0
def matched_filter(template,
                   data,
                   psd=None,
                   low_frequency_cutoff=None,
                   high_frequency_cutoff=None,
                   h_norm=None):
    """Return the complex SNR and normalization (SNR,norm) of the template 
       filtered against the data, where the normalized SNR is SNR' = SNR * norm.
    """
    global _q
    global _qtilde

    htilde = make_frequency_series(template)
    stilde = make_frequency_series(data)

    if len(htilde) != len(stilde):
        raise ValueError("Length of template and data must match")

    N = (len(stilde) - 1) * 2
    kmin, kmax = get_cutoff_indices(low_frequency_cutoff,
                                    high_frequency_cutoff, stilde.delta_f, N)

    if (_q is None) or (len(_q) != N) or _q.dtype != data.dtype:
        _q = zeros(N, dtype=complex_same_precision_as(data))
    else:
        pass

    if (_qtilde is None) or (len(_qtilde) != N) or _qtilde.dtype != data.dtype:
        _qtilde = zeros(N, dtype=complex_same_precision_as(data))
    else:
        _qtilde.fill(0)

    correlate(htilde[kmin:kmax], stilde[kmin:kmax], _qtilde[kmin:kmax])

    # Only weight by the psd if it was explictly given.
    # In most cases, the expectation is to prewhiten the data
    if psd is not None:
        if isinstance(psd, FrequencySeries):
            if psd.delta_f == stilde.delta_f:
                _qtilde[kmin:kmax] /= psd[kmin:kmax]
            else:
                raise TypeError("PSD delta_f does not match data")
        else:
            raise TypeError("PSD must be a FrequencySeries")

    ifft(_qtilde, _q)

    # Only calculate the normalization if needed. For SPA waveforms
    # this can be done ahead of time.
    if h_norm is None:
        h_norm = sigmasq(htilde, psd, low_frequency_cutoff,
                         high_frequency_cutoff)

    norm = (4.0 / (N * N * stilde.delta_f)) / sqrt(h_norm)

    return _q, norm
def IFFT_to_TD(b, beta, sp, sc, delta_t):
    hp = ppE_to_h(b, beta, sp, sc)[0]
    hc = ppE_to_h(b, beta, sp, sc)[1]

    tlen_sp = int(1.0 / delta_t / sp.delta_f)
    tlen_sc = int(1.0 / delta_t / sc.delta_f)
    hp.resize(tlen_sp / 2 + 1)
    hc.resize(tlen_sc / 2 + 1)

    tp, tc = types.TimeSeries(types.zeros(tlen), delta_t)
    fft.ifft(hp, tp)
    fft.ifft(hc, tc)
    return tp, tc
Exemple #6
0
def qseries(fseries, Q, f0, return_complex=False):
    """Calculate the energy 'TimeSeries' for the given fseries

    Parameters
    ----------
    fseries: 'pycbc FrequencySeries'
        frequency-series data set
    Q:
        q value
    f0:
        central frequency
    return_complex: {False, bool}
        Return the raw complex series instead of the normalized power.

    Returns
    -------
    energy: '~pycbc.types.TimeSeries'
        A 'TimeSeries' of the normalized energy from the Q-transform of
        this tile against the data.
    """
    # normalize and generate bi-square window
    qprime = Q / 11**(1 / 2.)
    norm = numpy.sqrt(315. * qprime / (128. * f0))
    window_size = 2 * int(f0 / qprime * fseries.duration) + 1
    xfrequencies = numpy.linspace(-1., 1., window_size)

    start = int((f0 - (f0 / qprime)) * fseries.duration)
    end = int(start + window_size)
    center = (start + end) / 2

    windowed = fseries[start:end] * (1 - xfrequencies**2)**2 * norm

    tlen = (len(fseries) - 1) * 2
    windowed.resize(tlen)
    windowed = numpy.roll(windowed, -center)

    # calculate the time series for this q -value
    windowed = FrequencySeries(windowed,
                               delta_f=fseries.delta_f,
                               epoch=fseries.start_time)
    ctseries = TimeSeries(zeros(tlen, dtype=numpy.complex128),
                          delta_t=fseries.delta_t)
    ifft(windowed, ctseries)

    if return_complex:
        return ctseries
    else:
        energy = ctseries.squared_norm()
        medianenergy = numpy.median(energy.numpy())
        return energy / float(medianenergy)
Exemple #7
0
def interpolate_complex_frequency(series,
                                  delta_f,
                                  zeros_offset=0,
                                  side='right'):
    """Interpolate complex frequency series to desired delta_f.

    Return a new complex frequency series that has been interpolated to the
    desired delta_f.

    Parameters
    ----------
    series : FrequencySeries
        Frequency series to be interpolated.
    delta_f : float
        The desired delta_f of the output
    zeros_offset : optional, {0, int}
        Number of sample to delay the start of the zero padding
    side : optional, {'right', str}
        The side of the vector to zero pad
        
    Returns
    -------
    interpolated series : FrequencySeries
        A new FrequencySeries that has been interpolated.
    """
    new_n = int((len(series) - 1) * series.delta_f / delta_f + 1)
    old_N = int((len(series) - 1) * 2)
    new_N = int((new_n - 1) * 2)
    time_series = TimeSeries(zeros(old_N),
                             delta_t=1.0 / (series.delta_f * old_N),
                             dtype=real_same_precision_as(series))

    ifft(series, time_series)

    time_series.roll(-zeros_offset)
    time_series.resize(new_N)

    if side == 'left':
        time_series.roll(zeros_offset + new_N - old_N)
    elif side == 'right':
        time_series.roll(zeros_offset)

    out_series = FrequencySeries(zeros(new_n),
                                 epoch=series.epoch,
                                 delta_f=delta_f,
                                 dtype=series.dtype)
    fft(time_series, out_series)

    return out_series
Exemple #8
0
def qseries(fseries, Q, f0, return_complex=False):
    """Calculate the energy 'TimeSeries' for the given fseries

    Parameters
    ----------
    fseries: 'pycbc FrequencySeries'
        frequency-series data set
    Q:
        q value
    f0:
        central frequency
    return_complex: {False, bool}
        Return the raw complex series instead of the normalized power.

    Returns
    -------
    energy: '~pycbc.types.TimeSeries'
        A 'TimeSeries' of the normalized energy from the Q-transform of
        this tile against the data.
    """
    # normalize and generate bi-square window
    qprime = Q / 11**(1/2.)
    norm = numpy.sqrt(315. * qprime / (128. * f0))
    window_size = 2 * int(f0 / qprime * fseries.duration) + 1
    xfrequencies = numpy.linspace(-1., 1., window_size)

    start = int((f0 - (f0 / qprime)) * fseries.duration)
    end = int(start + window_size)
    center = (start + end) / 2

    windowed = fseries[start:end] * (1 - xfrequencies ** 2) ** 2 * norm

    tlen = (len(fseries)-1) * 2
    windowed.resize(tlen)
    windowed = numpy.roll(windowed, -center)

    # calculate the time series for this q -value
    windowed = FrequencySeries(windowed, delta_f=fseries.delta_f,
                            epoch=fseries.start_time)
    ctseries = TimeSeries(zeros(tlen, dtype=numpy.complex128),
                            delta_t=fseries.delta_t)
    ifft(windowed, ctseries)

    if return_complex:
        return ctseries
    else:
        energy = ctseries.squared_norm()
        medianenergy = numpy.median(energy.numpy())
        return  energy / float(medianenergy)
Exemple #9
0
def lfilter(coefficients, timeseries):
    """ Apply filter coefficients to a time series
    
    Parameters
    ----------
    coefficients: numpy.ndarray
        Filter coefficients to apply
    timeseries: numpy.ndarray
        Time series to be filtered.

    Returns
    -------
    tseries: numpy.ndarray
        filtered array
    """
    from pycbc.fft import fft, ifft
    from pycbc.filter import correlate

    # If there aren't many points just use the default scipy method
    if len(timeseries) < 2**7:
        if hasattr(timeseries, 'numpy'):
            timeseries = timeseries.numpy()
        series = scipy.signal.lfilter(coefficients, 1.0, timeseries)
        return series
    else:
        cseries = (Array(coefficients[::-1] * 1)).astype(timeseries.dtype)
        cseries.resize(len(timeseries))
        cseries.roll(len(timeseries) - len(coefficients) + 1)
        timeseries = Array(timeseries, copy=False)

        flen = len(cseries) / 2 + 1
        ftype = complex_same_precision_as(timeseries)

        cfreq = zeros(flen, dtype=ftype)
        tfreq = zeros(flen, dtype=ftype)

        fft(Array(cseries), cfreq)
        fft(Array(timeseries), tfreq)

        cout = zeros(flen, ftype)
        out = zeros(len(timeseries), dtype=timeseries)

        correlate(cfreq, tfreq, cout)
        ifft(cout, out)

        return out.numpy() / len(out)
Exemple #10
0
def lfilter(coefficients, timeseries):
    """ Apply filter coefficients to a time series
    
    Parameters
    ----------
    coefficients: numpy.ndarray
        Filter coefficients to apply
    timeseries: numpy.ndarray
        Time series to be filtered.

    Returns
    -------
    tseries: numpy.ndarray
        filtered array
    """
    from pycbc.fft import fft, ifft
    from pycbc.filter import correlate

    # If there aren't many points just use the default scipy method
    if len(timeseries) < 2**7:
        if hasattr(timeseries, 'numpy'):
            timeseries = timeseries.numpy()
        series = scipy.signal.lfilter(coefficients, 1.0, timeseries)
        return series
    else:
        cseries = (Array(coefficients[::-1] * 1)).astype(timeseries.dtype)
        cseries.resize(len(timeseries))
        cseries.roll(len(timeseries) - len(coefficients) + 1)
        timeseries = Array(timeseries, copy=False)

        flen = len(cseries) / 2 + 1
        ftype = complex_same_precision_as(timeseries)

        cfreq = zeros(flen, dtype=ftype)
        tfreq = zeros(flen, dtype=ftype)

        fft(Array(cseries), cfreq)
        fft(Array(timeseries), tfreq)

        cout = zeros(flen, ftype)
        out = zeros(len(timeseries), dtype=timeseries)

        correlate(cfreq, tfreq, cout)   
        ifft(cout, out)

        return out.numpy()  / len(out)
Exemple #11
0
    def to_timeseries(self, delta_t=None):
        """ Return the Fourier transform of this time series.

        Note that this assumes even length time series!

        Parameters
        ----------
        delta_t : {None, float}, optional
            The time resolution of the returned series. By default the
        resolution is determined by length and delta_f of this frequency
        series.

        Returns
        -------
        TimeSeries:
            The inverse fourier transform of this frequency series.
        """
        from pycbc.fft import ifft
        from pycbc.types import TimeSeries, real_same_precision_as
        nat_delta_t =  1.0 / ((len(self)-1)*2) / self.delta_f
        if not delta_t:
            delta_t = nat_delta_t

        # add 0.5 to round integer
        tlen  = int(1.0 / self.delta_f / delta_t + 0.5)
        flen = int(tlen / 2 + 1)

        if flen < len(self):
            raise ValueError("The value of delta_t (%s) would be "
                             "undersampled. Maximum delta_t "
                             "is %s." % (delta_t, nat_delta_t))
        if not delta_t:
            tmp = self
        else:
            tmp = FrequencySeries(zeros(flen, dtype=self.dtype),
                             delta_f=self.delta_f, epoch=self.epoch)
            tmp[:len(self)] = self[:]

        f = TimeSeries(zeros(tlen,
                           dtype=real_same_precision_as(self)),
                           delta_t=delta_t)
        ifft(tmp, f)
        f._delta_t = delta_t
        return f
Exemple #12
0
def interpolate_complex_frequency(series, delta_f, zeros_offset=0, side='right'):
    """Interpolate complex frequency series to desired delta_f.

    Return a new complex frequency series that has been interpolated to the
    desired delta_f.

    Parameters
    ----------
    series : FrequencySeries
        Frequency series to be interpolated.
    delta_f : float
        The desired delta_f of the output
    zeros_offset : optional, {0, int}
        Number of sample to delay the start of the zero padding
    side : optional, {'right', str}
        The side of the vector to zero pad
        
    Returns
    -------
    interpolated series : FrequencySeries
        A new FrequencySeries that has been interpolated.
    """
    new_n = int( (len(series)-1) * series.delta_f / delta_f + 1)
    samples = numpy.arange(0, new_n) * delta_f
    old_N = int( (len(series)-1) * 2 )
    new_N = int( (new_n - 1) * 2 )
    time_series = TimeSeries(zeros(old_N), delta_t =1.0/(series.delta_f*old_N),
                             dtype=real_same_precision_as(series))
                             
    ifft(series, time_series)

    time_series.roll(-zeros_offset)
    time_series.resize(new_N)
    
    if side == 'left':
        time_series.roll(zeros_offset + new_N - old_N)
    elif side == 'right':
        time_series.roll(zeros_offset)

    out_series = FrequencySeries(zeros(new_n), epoch=series.epoch,
                           delta_f=delta_f, dtype=series.dtype)
    fft(time_series, out_series)

    return out_series
Exemple #13
0
    def to_timeseries(self, delta_t=None):
        """ Return the Fourier transform of this time series.

        Note that this assumes even length time series!
        
        Parameters
        ----------
        delta_t : {None, float}, optional
            The time resolution of the returned series. By default the 
        resolution is determined by length and delta_f of this frequency 
        series.
        
        Returns
        -------        
        TimeSeries: 
            The inverse fourier transform of this frequency series. 
        """
        from pycbc.fft import ifft
        from pycbc.types import TimeSeries, real_same_precision_as
        nat_delta_t =  1.0 / ((len(self)-1)*2) / self.delta_f
        if not delta_t:
            delta_t = nat_delta_t

        # add 0.5 to round integer
        tlen  = int(1.0 / self.delta_f / delta_t + 0.5)
        flen = tlen / 2 + 1
        
        if flen < len(self):
            raise ValueError("The value of delta_t (%s) would be "
                             "undersampled. Maximum delta_t "
                             "is %s." % (delta_t, nat_delta_t))
        if not delta_t:
            tmp = self
        else:
            tmp = FrequencySeries(zeros(flen, dtype=self.dtype), 
                             delta_f=self.delta_f, epoch=self.epoch)
            tmp[:len(self)] = self[:]
        
        f = TimeSeries(zeros(tlen, 
                           dtype=real_same_precision_as(self)),
                           delta_t=delta_t)
        ifft(tmp, f)
        return f
Exemple #14
0
 def setUp(self):
     self.scheme = _scheme
     self.context = _context
     self.psd_len = 1024
     self.psd_delta_f = 0.1
     self.psd_low_freq_cutoff = 10.
     # generate 1/f noise for testing PSD estimation
     noise_size = 524288
     sample_freq = 4096.
     delta_f = sample_freq / noise_size
     numpy.random.seed(132435)
     noise = numpy.random.normal(loc=0, scale=1, size=noise_size/2+1) + \
         1j * numpy.random.normal(loc=0, scale=1, size=noise_size/2+1)
     noise_model = 1. / numpy.linspace(1., 100., noise_size / 2 + 1)
     noise *= noise_model / numpy.sqrt(delta_f) / 2
     noise[0] = noise[0].real
     noise_fs = FrequencySeries(noise, delta_f=delta_f)
     self.noise = TimeSeries(numpy.zeros(noise_size), delta_t=1./sample_freq)
     ifft(noise_fs, self.noise)
Exemple #15
0
 def setUp(self):
     self.scheme = _scheme
     self.context = _context
     self.psd_len = 1024
     self.psd_delta_f = 0.1
     self.psd_low_freq_cutoff = 10.
     # generate 1/f noise for testing PSD estimation
     noise_size = 524288
     sample_freq = 4096.
     delta_f = sample_freq / noise_size
     numpy.random.seed(132435)
     noise = numpy.random.normal(loc=0, scale=1, size=noise_size//2+1) + \
         1j * numpy.random.normal(loc=0, scale=1, size=noise_size//2+1)
     noise_model = 1. / numpy.linspace(1., 100., noise_size // 2 + 1)
     noise *= noise_model / numpy.sqrt(delta_f) / 2
     noise[0] = noise[0].real
     noise_fs = FrequencySeries(noise, delta_f=delta_f)
     self.noise = TimeSeries(numpy.zeros(noise_size),
                             delta_t=1. / sample_freq)
     ifft(noise_fs, self.noise)
Exemple #16
0
    def __init__(self, ifos, coinc_results, **kwargs):
        """Initialize a ligolw xml representation of a zerolag trigger
        for upload from pycbc live to gracedb.

        Parameters
        ----------
        ifos: list of strs
            A list of the ifos pariticipating in this trigger
        coinc_results: dict of values
            A dictionary of values. The format is define
            in pycbc/events/coinc.py
        and matches the on disk representation in the hdf file for this time.
        """
        self.ifos = ifos
        self.template_id = coinc_results['foreground/%s/template_id' %
                                         self.ifos[0]]

        # remember if this should be marked as HWINJ
        self.is_hardware_injection = False
        if 'HWINJ' in coinc_results:
            self.is_hardware_injection = True

        # Set up the bare structure of the xml document
        outdoc = ligolw.Document()
        outdoc.appendChild(ligolw.LIGO_LW())

        proc_id = ligolw_process.register_to_xmldoc(
            outdoc,
            'pycbc', {},
            ifos=ifos,
            comment='',
            version=pycbc_version.git_hash,
            cvs_repository='pycbc/' + pycbc_version.git_branch,
            cvs_entry_time=pycbc_version.date).process_id

        # Set up coinc_definer table
        coinc_def_table = lsctables.New(lsctables.CoincDefTable)
        coinc_def_id = lsctables.CoincDefID(0)
        coinc_def_row = lsctables.CoincDef()
        coinc_def_row.search = "inspiral"
        coinc_def_row.description = "sngl_inspiral<-->sngl_inspiral coincs"
        coinc_def_row.coinc_def_id = coinc_def_id
        coinc_def_row.search_coinc_type = 0
        coinc_def_table.append(coinc_def_row)
        outdoc.childNodes[0].appendChild(coinc_def_table)

        # Set up coinc inspiral and coinc event tables
        coinc_id = lsctables.CoincID(0)
        coinc_event_table = lsctables.New(lsctables.CoincTable)
        coinc_event_row = lsctables.Coinc()
        coinc_event_row.coinc_def_id = coinc_def_id
        coinc_event_row.nevents = len(ifos)
        coinc_event_row.instruments = ','.join(ifos)
        coinc_event_row.time_slide_id = lsctables.TimeSlideID(0)
        coinc_event_row.process_id = proc_id
        coinc_event_row.coinc_event_id = coinc_id
        coinc_event_row.likelihood = 0.
        coinc_event_table.append(coinc_event_row)
        outdoc.childNodes[0].appendChild(coinc_event_table)

        # Set up sngls
        sngl_inspiral_table = lsctables.New(lsctables.SnglInspiralTable)
        coinc_event_map_table = lsctables.New(lsctables.CoincMapTable)

        sngl_id = 0
        sngl_event_id_map = {}
        for ifo in ifos:
            names = [
                n.split('/')[-1] for n in coinc_results
                if 'foreground/%s' % ifo in n
            ]
            sngl_id += 1
            sngl = return_empty_sngl()
            sngl.event_id = lsctables.SnglInspiralID(sngl_id)
            sngl_event_id_map[ifo] = sngl.event_id
            sngl.ifo = ifo
            for name in names:
                val = coinc_results['foreground/%s/%s' % (ifo, name)]
                if name == 'end_time':
                    sngl.set_end(lal.LIGOTimeGPS(val))
                else:
                    try:
                        setattr(sngl, name, val)
                    except AttributeError:
                        pass
            sngl.mtotal, sngl.eta = pnutils.mass1_mass2_to_mtotal_eta(
                sngl.mass1, sngl.mass2)
            sngl.mchirp, _ = pnutils.mass1_mass2_to_mchirp_eta(
                sngl.mass1, sngl.mass2)
            sngl.eff_distance = (sngl.sigmasq)**0.5 / sngl.snr
            sngl_inspiral_table.append(sngl)

            # Set up coinc_map entry
            coinc_map_row = lsctables.CoincMap()
            coinc_map_row.table_name = 'sngl_inspiral'
            coinc_map_row.coinc_event_id = coinc_id
            coinc_map_row.event_id = sngl.event_id
            coinc_event_map_table.append(coinc_map_row)

        outdoc.childNodes[0].appendChild(coinc_event_map_table)
        outdoc.childNodes[0].appendChild(sngl_inspiral_table)

        # Set up the coinc inspiral table
        coinc_inspiral_table = lsctables.New(lsctables.CoincInspiralTable)
        coinc_inspiral_row = lsctables.CoincInspiral()
        # This seems to be used as FAP, which should not be in gracedb
        coinc_inspiral_row.false_alarm_rate = 0
        coinc_inspiral_row.minimum_duration = 0.
        coinc_inspiral_row.set_ifos(ifos)
        coinc_inspiral_row.coinc_event_id = coinc_id
        coinc_inspiral_row.mchirp = sngl.mchirp
        coinc_inspiral_row.mass = sngl.mtotal
        coinc_inspiral_row.end_time = sngl.end_time
        coinc_inspiral_row.end_time_ns = sngl.end_time_ns
        coinc_inspiral_row.snr = coinc_results['foreground/stat']
        far = 1.0 / (lal.YRJUL_SI * coinc_results['foreground/ifar'])
        coinc_inspiral_row.combined_far = far
        coinc_inspiral_table.append(coinc_inspiral_row)
        outdoc.childNodes[0].appendChild(coinc_inspiral_table)
        self.outdoc = outdoc
        self.time = sngl.get_end()

        # compute SNR time series
        self.upload_snr_series = kwargs['upload_snr_series']
        if self.upload_snr_series:
            data_readers = kwargs['data_readers']
            bank = kwargs['bank']
            htilde = bank[self.template_id]
            self.snr_series = {}
            self.snr_series_psd = {}
            for ifo in self.ifos:
                stilde = data_readers[ifo].overwhitened_data(htilde.delta_f)
                norm = 4.0 * htilde.delta_f / (htilde.sigmasq(stilde.psd)**0.5)
                qtilde = zeros((len(htilde) - 1) * 2, dtype=htilde.dtype)
                correlate(htilde, stilde, qtilde)
                snr = qtilde * 0
                ifft(qtilde, snr)

                valid_end = int(len(qtilde) - data_readers[ifo].trim_padding)
                valid_start = int(valid_end - data_readers[ifo].blocksize *
                                  data_readers[ifo].sample_rate)
                seg = slice(valid_start, valid_end)
                snr = snr[seg]
                snr *= norm
                delta_t = 1.0 / data_readers[ifo].sample_rate
                start = data_readers[ifo].start_time
                snr = TimeSeries(snr, delta_t=delta_t, epoch=start)
                self.snr_series[ifo] = snr
                self.snr_series_psd[ifo] = stilde.psd

                # store the on-source slice of the series into the XML doc
                snr_onsource_time = coinc_results['foreground/%s/end_time' %
                                                  ifo] - snr.start_time
                snr_onsource_dur = lal.REARTH_SI / lal.C_SI
                onsource_idx = round(snr_onsource_time * snr.sample_rate)
                onsource_start = onsource_idx - int(
                    snr.sample_rate * snr_onsource_dur / 2)
                onsource_end = onsource_idx + int(
                    snr.sample_rate * snr_onsource_dur / 2)
                onsource_slice = slice(onsource_start, onsource_end + 1)
                snr_lal = snr[onsource_slice].lal()
                snr_lal.name = 'snr'
                snr_lal.sampleUnits = ''
                snr_xml = _build_series(snr_lal,
                                        (u'Time', u'Time,Real,Imaginary'),
                                        None, 'deltaT', 's')
                snr_node = outdoc.childNodes[-1].appendChild(snr_xml)
                eid_param = ligolw_param.new_param(
                    u'event_id', u'ilwd:char', unicode(sngl_event_id_map[ifo]))
                snr_node.appendChild(eid_param)
Exemple #17
0
def inverse_spectrum_truncation(psd, max_filter_len, low_frequency_cutoff=None, trunc_method=None):
    """Modify a PSD such that the impulse response associated with its inverse
    square root is no longer than `max_filter_len` time samples. In practice
    this corresponds to a coarse graining or smoothing of the PSD.

    Parameters
    ----------
    psd : FrequencySeries
        PSD whose inverse spectrum is to be truncated.
    max_filter_len : int
        Maximum length of the time-domain filter in samples.
    low_frequency_cutoff : {None, int}
        Frequencies below `low_frequency_cutoff` are zeroed in the output.
    trunc_method : {None, 'hann'}
        Function used for truncating the time-domain filter.
        None produces a hard truncation at `max_filter_len`.

    Returns
    -------
    psd : FrequencySeries
        PSD whose inverse spectrum has been truncated.

    Raises
    ------
    ValueError
        For invalid types or values of `max_filter_len` and `low_frequency_cutoff`.

    Notes
    -----
    See arXiv:gr-qc/0509116 for details.
    """
    # sanity checks
    if type(max_filter_len) is not int or max_filter_len <= 0:
        raise ValueError('max_filter_len must be a positive integer')
    if low_frequency_cutoff is not None and low_frequency_cutoff < 0 \
        or low_frequency_cutoff > psd.sample_frequencies[-1]:
        raise ValueError('low_frequency_cutoff must be within the bandwidth of the PSD')

    N = (len(psd)-1)*2

    inv_asd = FrequencySeries((1. / psd)**0.5, delta_f=psd.delta_f, \
        dtype=complex_same_precision_as(psd))
        
    inv_asd[0] = 0
    inv_asd[N/2] = 0
    q = TimeSeries(numpy.zeros(N), delta_t=(N / psd.delta_f), \
        dtype=real_same_precision_as(psd))

    if low_frequency_cutoff:
        kmin = int(low_frequency_cutoff / psd.delta_f)
        inv_asd[0:kmin] = 0

    ifft(inv_asd, q)
    
    trunc_start = max_filter_len / 2
    trunc_end = N - max_filter_len / 2

    if trunc_method == 'hann':
        trunc_window = Array(numpy.hanning(max_filter_len), dtype=q.dtype)
        q[0:trunc_start] *= trunc_window[max_filter_len/2:max_filter_len]
        q[trunc_end:N] *= trunc_window[0:max_filter_len/2]

    q[trunc_start:trunc_end] = 0
    psd_trunc = FrequencySeries(numpy.zeros(len(psd)), delta_f=psd.delta_f, \
                                dtype=complex_same_precision_as(psd))
    fft(q, psd_trunc)
    psd_trunc *= psd_trunc.conj()
    psd_out = 1. / abs(psd_trunc)

    return psd_out
Exemple #18
0
# Plot a time domain and fourier domain waveform together in the time domain.
# Note that without special cleanup the Fourier domain waveform will exhibit
# the Gibb's phenomenon. (http://en.wikipedia.org/wiki/Gibbs_phenomenon)

import pylab
from pycbc import types, fft, waveform

# Get a time domain waveform
hp, hc = waveform.get_td_waveform(approximant="EOBNRv2",
                             mass1=6, mass2=6, delta_t=1.0/4096, f_lower=40)
                             
# Get a frequency domain waveform
sptilde, sctilde = waveform. get_fd_waveform(approximant="TaylorF2",
                             mass1=6, mass2=6, delta_f=1.0/4, f_lower=40)

# FFT it to the time-domain  
tlen = int(1.0 / hp.delta_t / sptilde.delta_f)
sptilde.resize(tlen/2 + 1)
sp = types.TimeSeries(types.zeros(tlen), delta_t=hp.delta_t)                            
fft.ifft(sptilde, sp)

pylab.plot(sp.sample_times, sp, label="TaylorF2 (IFFT)")
pylab.plot(hp.sample_times, hp, label="EOBNRv2")

pylab.ylabel('Strain')
pylab.xlabel('Time (s)')
pylab.legend()
pylab.show()
Exemple #19
0
import pylab
from pycbc import types, fft, waveform

# Get a time domain waveform
hp, hc = waveform.get_td_waveform(approximant="EOBNRv2",
                                  mass1=6,
                                  mass2=6,
                                  delta_t=1.0 / 4096,
                                  f_lower=40)

# Get a frequency domain waveform
sptilde, sctilde = waveform.get_fd_waveform(approximant="TaylorF2",
                                            mass1=6,
                                            mass2=6,
                                            delta_f=1.0 / 4,
                                            f_lower=40)

# FFT it to the time-domain
tlen = 1.0 / hp.delta_t / sptilde.delta_f
sptilde.resize(tlen / 2 + 1)
sp = types.TimeSeries(types.zeros(tlen), delta_t=hp.delta_t)
fft.ifft(sptilde, sp)

pylab.plot(sp.sample_times, sp, label="TaylorF2 (IFFT)")
pylab.plot(hp.sample_times, hp, label="EOBNRv2")

pylab.ylabel('Strain')
pylab.xlabel('Time (s)')
pylab.legend()
pylab.show()
Exemple #20
0
def qtransform(fseries, Q, f0):
    """Calculate the energy 'TimeSeries' for the given fseries

    Parameters
    ----------
    fseries: 'pycbc FrequencySeries'
        frequency-series data set
    Q:
        q value
    f0:
        central frequency

    Returns
    -------
    norm_energy: '~pycbc.types.aligned.ArrayWithAligned'
        A 'TimeSeries' of the normalized energy from the Q-transform of
        this tile against the data.
    cenergy: '~pycbc.types.aligned.ArrayWithAligned'
        A 'TimeSeries' of the complex energy from the Q-transform of 
        this tile against the data.
    """

    # q-transform data for each (Q, frequency) tile

    # initialize parameters
    qprime = Q / 11**(1/2.) # ... self.qprime
    dur = fseries.duration

    # check for sampling rate
    sampling = fseries.sample_rate

    # window fft
    window_size = 2 * int(f0 / qprime * dur) + 1

    # get start and end indices
    start = int((f0 - (f0 / qprime)) * dur)
    end = int(start + window_size)

    # apply window to fft
    # normalize and generate bi-square window
    norm = np.sqrt(315. * qprime / (128. * f0))
    windowed = fseries[start:end].numpy() * bisquare(window_size) * norm

    # choice of output sampling rate
    output_sampling = sampling # Can lower this to highest bandwidth
    output_samples = int(dur * output_sampling)

    # pad data, move negative frequencies to the end, and IFFT
    padded = np.pad(windowed, padding(window_size, output_samples), mode='constant')
    wenergy = npfft.ifftshift(padded)

    # return a 'TimeSeries'
    wenergy = FrequencySeries(wenergy, delta_f=1./dur)
    cenergy = TimeSeries(zeros(output_samples, dtype=np.complex128),
                            delta_t=1./sampling)

    ifft(wenergy, cenergy)

    energy = cenergy.squared_norm()
    medianenergy = np.median(energy.numpy())
    norm_energy = energy / float(medianenergy)
 
    return norm_energy, cenergy
Exemple #21
0
def matched_filter_core(template,
                        data,
                        psd=None,
                        low_frequency_cutoff=None,
                        high_frequency_cutoff=None,
                        h_norm=None,
                        out=None,
                        corr_out=None):
    """ Return the complex snr and normalization. 
    
    Return the complex snr, along with its associated normalization of the template,
    matched filtered against the data. 

    Parameters
    ----------
    template : TimeSeries or FrequencySeries 
        The template waveform
    data : TimeSeries or FrequencySeries 
        The strain data to be filtered.
    psd : {FrequencySeries}, optional
        The noise weighting of the filter.
    low_frequency_cutoff : {None, float}, optional
        The frequency to begin the filter calculation. If None, begin at the
        first frequency after DC.
    high_frequency_cutoff : {None, float}, optional
        The frequency to stop the filter calculation. If None, continue to the 
        the nyquist frequency.
    h_norm : {None, float}, optional
        The template normalization. If none, this value is calculated internally.
    out : {None, Array}, optional
        An array to use as memory for snr storage. If None, memory is allocated 
        internally.
    corr_out : {None, Array}, optional
        An array to use as memory for correlation storage. If None, memory is allocated 
        internally. If provided, management of the vector is handled externally by the
        caller. No zero'ing is done internally. 

    Returns
    -------
    snr : TimeSeries
        A time series containing the complex snr. 
    corrrelation: FrequencySeries
        A frequency series containing the correlation vector. 
    norm : float
        The normalization of the complex snr.  
    """
    if corr_out is not None:
        _qtilde = corr_out
    else:
        global _qtilde_t
        _qtilde = _qtilde_t

    htilde = make_frequency_series(template)
    stilde = make_frequency_series(data)

    if len(htilde) != len(stilde):
        raise ValueError("Length of template and data must match")

    N = (len(stilde) - 1) * 2
    kmin, kmax = get_cutoff_indices(low_frequency_cutoff,
                                    high_frequency_cutoff, stilde.delta_f, N)

    if out is None:
        _q = zeros(N, dtype=complex_same_precision_as(data))
    elif (len(out) == N) and type(out) is Array and out.kind == 'complex':
        _q = out
    else:
        raise TypeError('Invalid Output Vector: wrong length or dtype')

    if corr_out:
        pass
    elif (_qtilde is None) or (len(_qtilde) !=
                               N) or _qtilde.dtype != data.dtype:
        _qtilde_t = _qtilde = zeros(N, dtype=complex_same_precision_as(data))
    else:
        _qtilde.clear()

    correlate(htilde[kmin:kmax], stilde[kmin:kmax], _qtilde[kmin:kmax])

    if psd is not None:
        if isinstance(psd, FrequencySeries):
            if psd.delta_f == stilde.delta_f:
                _qtilde[kmin:kmax] /= psd[kmin:kmax]
            else:
                raise TypeError("PSD delta_f does not match data")
        else:
            raise TypeError("PSD must be a FrequencySeries")

    ifft(_qtilde, _q)

    if h_norm is None:
        h_norm = sigmasq(htilde, psd, low_frequency_cutoff,
                         high_frequency_cutoff)

    norm = (4.0 * stilde.delta_f) / sqrt(h_norm)
    delta_t = 1.0 / (N * stilde.delta_f)

    return (TimeSeries(_q, epoch=stilde._epoch, delta_t=delta_t, copy=False),
            FrequencySeries(_qtilde,
                            epoch=stilde._epoch,
                            delta_f=htilde.delta_f,
                            copy=False), norm)
Exemple #22
0
    def heirarchical_matched_filter_and_cluster(self, htilde, template_norm,
                                                stilde, window):
        """ Return the complex snr and normalization. 
    
        Calculated the matched filter, threshold, and cluster. 

        Parameters
        ----------
        htilde : FrequencySeries 
            The template waveform. Must come from the FilterBank class.
        template_norm : float
            The htilde, template normalization factor.
        stilde : FrequencySeries 
            The strain data to be filtered.
        window : int
            The size of the cluster window in samples.

        Returns
        -------
        snr : TimeSeries
            A time series containing the complex snr at the reduced sample rate.
        norm : float
            The normalization of the complex snr.  
        corrrelation: FrequencySeries
            A frequency series containing the correlation vector. 
        idx : Array
            List of indices of the triggers.
        snrv : Array
            The snr values at the trigger locations.
        """
        from pycbc.fft.fftw_pruned import pruned_c2cifft, fft_transpose

        norm = (4.0 * stilde.delta_f) / sqrt(template_norm)

        correlate(htilde[self.kmin_red:self.kmax_red],
                  stilde[self.kmin_red:self.kmax_red],
                  self.corr_mem[self.kmin_red:self.kmax_red])

        ifft(self.corr_mem, self.snr_mem)

        if not hasattr(stilde, 'red_analyze'):
            stilde.red_analyze = \
                             slice(stilde.analyze.start/self.downsample_factor,
                                   stilde.analyze.stop/self.downsample_factor)

        idx_red, snrv_red = events.threshold(
            self.snr_mem[stilde.red_analyze],
            self.snr_threshold / norm * self.upsample_threshold)
        if len(idx_red) == 0:
            return [], None, [], [], []

        idx_red, _ = events.cluster_reduce(idx_red, snrv_red,
                                           window / self.downsample_factor)
        logging.info("%s points above threshold at reduced resolution"\
                      %(str(len(idx_red)),))

        # The fancy upsampling is here
        if self.upsample_method == 'pruned_fft':
            idx = (idx_red + stilde.analyze.start/self.downsample_factor)\
                   * self.downsample_factor

            idx = smear(idx, self.downsample_factor)

            # cache transposed  versions of htilde and stilde
            if not hasattr(self.corr_mem_full, 'transposed'):
                self.corr_mem_full.transposed = zeros(len(self.corr_mem_full),
                                                      dtype=self.dtype)

            if not hasattr(htilde, 'transposed'):
                htilde.transposed = zeros(len(self.corr_mem_full),
                                          dtype=self.dtype)
                htilde.transposed[self.kmin_full:self.kmax_full] = htilde[
                    self.kmin_full:self.kmax_full]
                htilde.transposed = fft_transpose(htilde.transposed)

            if not hasattr(stilde, 'transposed'):
                stilde.transposed = zeros(len(self.corr_mem_full),
                                          dtype=self.dtype)
                stilde.transposed[self.kmin_full:self.kmax_full] = stilde[
                    self.kmin_full:self.kmax_full]
                stilde.transposed = fft_transpose(stilde.transposed)

            correlate(htilde.transposed, stilde.transposed,
                      self.corr_mem_full.transposed)
            snrv = pruned_c2cifft(self.corr_mem_full.transposed,
                                  self.inter_vec,
                                  idx,
                                  pretransposed=True)
            idx = idx - stilde.analyze.start
            idx2, snrv = events.threshold(Array(snrv, copy=False),
                                          self.snr_threshold / norm)

            if len(idx2) > 0:
                correlate(htilde[self.kmax_red:self.kmax_full],
                          stilde[self.kmax_red:self.kmax_full],
                          self.corr_mem_full[self.kmax_red:self.kmax_full])
                idx, snrv = events.cluster_reduce(idx[idx2], snrv, window)
            else:
                idx, snrv = [], []

            logging.info("%s points at full rate and clustering" % len(idx))
            return self.snr_mem, norm, self.corr_mem_full, idx, snrv
        else:
            raise ValueError("Invalid upsample method")
    def heirarchical_matched_filter_and_cluster(self, segnum, template_norm, window):
        """ Return the complex snr and normalization. 
    
        Calculated the matched filter, threshold, and cluster. 

        Parameters
        ----------
        segnum : int
            Index into the list of segments at MatchedFilterControl construction
        template_norm : float
            The htilde, template normalization factor.
        window : int
            Size of the window over which to cluster triggers, in samples

        Returns
        -------
        snr : TimeSeries
            A time series containing the complex snr at the reduced sample rate.
        norm : float
            The normalization of the complex snr.  
        corrrelation: FrequencySeries
            A frequency series containing the correlation vector. 
        idx : Array
            List of indices of the triggers.
        snrv : Array
            The snr values at the trigger locations.
        """
        from pycbc.fft.fftw_pruned import pruned_c2cifft, fft_transpose
        htilde = self.htilde
        stilde = self.segments[segnum]

        norm = (4.0 * stilde.delta_f) / sqrt(template_norm)
        
        correlate(htilde[self.kmin_red:self.kmax_red], 
                  stilde[self.kmin_red:self.kmax_red], 
                  self.corr_mem[self.kmin_red:self.kmax_red]) 
                     
        ifft(self.corr_mem, self.snr_mem)           

        if not hasattr(stilde, 'red_analyze'):
            stilde.red_analyze = \
                             slice(stilde.analyze.start/self.downsample_factor,
                                   stilde.analyze.stop/self.downsample_factor)

        
        idx_red, snrv_red = events.threshold(self.snr_mem[stilde.red_analyze], 
                                self.snr_threshold / norm * self.upsample_threshold)
        if len(idx_red) == 0:
            return [], None, [], [], []

        idx_red, _ = events.cluster_reduce(idx_red, snrv_red, window / self.downsample_factor)
        logging.info("%s points above threshold at reduced resolution"\
                      %(str(len(idx_red)),))

        # The fancy upsampling is here
        if self.upsample_method=='pruned_fft':
            idx = (idx_red + stilde.analyze.start/self.downsample_factor)\
                   * self.downsample_factor

            idx = smear(idx, self.downsample_factor)
            
            # cache transposed  versions of htilde and stilde
            if not hasattr(self.corr_mem_full, 'transposed'):
                self.corr_mem_full.transposed = zeros(len(self.corr_mem_full), dtype=self.dtype)
                
            if not hasattr(htilde, 'transposed'):
                htilde.transposed = zeros(len(self.corr_mem_full), dtype=self.dtype)
                htilde.transposed[self.kmin_full:self.kmax_full] = htilde[self.kmin_full:self.kmax_full]
                htilde.transposed = fft_transpose(htilde.transposed)
                
            if not hasattr(stilde, 'transposed'):
                stilde.transposed = zeros(len(self.corr_mem_full), dtype=self.dtype)
                stilde.transposed[self.kmin_full:self.kmax_full] = stilde[self.kmin_full:self.kmax_full]
                stilde.transposed = fft_transpose(stilde.transposed)  
                
            correlate(htilde.transposed, stilde.transposed, self.corr_mem_full.transposed)      
            snrv = pruned_c2cifft(self.corr_mem_full.transposed, self.inter_vec, idx, pretransposed=True)   
            idx = idx - stilde.analyze.start
            idx2, snrv = events.threshold(Array(snrv, copy=False), self.snr_threshold / norm)
      
            if len(idx2) > 0:
                correlate(htilde[self.kmax_red:self.kmax_full], 
                          stilde[self.kmax_red:self.kmax_full], 
                          self.corr_mem_full[self.kmax_red:self.kmax_full])
                idx, snrv = events.cluster_reduce(idx[idx2], snrv, window)
            else:
                idx, snrv = [], []

            logging.info("%s points at full rate and clustering" % len(idx))
            return self.snr_mem, norm, self.corr_mem_full, idx, snrv
        else:
            raise ValueError("Invalid upsample method")            
def matched_filter_core(template, data, psd=None, low_frequency_cutoff=None,
                  high_frequency_cutoff=None, h_norm=None, out=None, corr_out=None):
    """ Return the complex snr and normalization. 
    
    Return the complex snr, along with its associated normalization of the template,
    matched filtered against the data. 

    Parameters
    ----------
    template : TimeSeries or FrequencySeries 
        The template waveform
    data : TimeSeries or FrequencySeries 
        The strain data to be filtered.
    psd : {FrequencySeries}, optional
        The noise weighting of the filter.
    low_frequency_cutoff : {None, float}, optional
        The frequency to begin the filter calculation. If None, begin at the
        first frequency after DC.
    high_frequency_cutoff : {None, float}, optional
        The frequency to stop the filter calculation. If None, continue to the 
        the nyquist frequency.
    h_norm : {None, float}, optional
        The template normalization. If none, this value is calculated internally.
    out : {None, Array}, optional
        An array to use as memory for snr storage. If None, memory is allocated 
        internally.
    corr_out : {None, Array}, optional
        An array to use as memory for correlation storage. If None, memory is allocated 
        internally. If provided, management of the vector is handled externally by the
        caller. No zero'ing is done internally. 

    Returns
    -------
    snr : TimeSeries
        A time series containing the complex snr. 
    corrrelation: FrequencySeries
        A frequency series containing the correlation vector. 
    norm : float
        The normalization of the complex snr.  
    """
    if corr_out is not None:
        _qtilde = corr_out
    else:
        global _qtilde_t
        _qtilde = _qtilde_t
  
    htilde = make_frequency_series(template)
    stilde = make_frequency_series(data)

    if len(htilde) != len(stilde):
        raise ValueError("Length of template and data must match")

    N = (len(stilde)-1) * 2   
    kmin, kmax = get_cutoff_indices(low_frequency_cutoff,
                                   high_frequency_cutoff, stilde.delta_f, N)   

    if out is None:
        _q = zeros(N, dtype=complex_same_precision_as(data))
    elif (len(out) == N) and type(out) is Array and out.kind =='complex':
        _q = out
    else:
        raise TypeError('Invalid Output Vector: wrong length or dtype')
        
    if corr_out:
        pass
    elif (_qtilde is None) or (len(_qtilde) != N) or _qtilde.dtype != data.dtype:
        _qtilde_t = _qtilde = zeros(N, dtype=complex_same_precision_as(data))
    else:
        _qtilde.clear()         
    
    correlate(htilde[kmin:kmax], stilde[kmin:kmax], _qtilde[kmin:kmax])

    if psd is not None:
        if isinstance(psd, FrequencySeries):
            if psd.delta_f == stilde.delta_f :
                _qtilde[kmin:kmax] /= psd[kmin:kmax]
            else:
                raise TypeError("PSD delta_f does not match data")
        else:
            raise TypeError("PSD must be a FrequencySeries")
            
    ifft(_qtilde, _q)
    
    if h_norm is None:
        h_norm = sigmasq(htilde, psd, low_frequency_cutoff, high_frequency_cutoff)     

    norm = (4.0 * stilde.delta_f) / sqrt( h_norm)
    delta_t = 1.0 / (N * stilde.delta_f)
    
    return (TimeSeries(_q, epoch=stilde._epoch, delta_t=delta_t, copy=False), 
           FrequencySeries(_qtilde, epoch=stilde._epoch, delta_f=htilde.delta_f, copy=False), 
           norm)
Exemple #25
0
def lfilter(coefficients, timeseries):
    """ Apply filter coefficients to a time series

    Parameters
    ----------
    coefficients: numpy.ndarray
        Filter coefficients to apply
    timeseries: numpy.ndarray
        Time series to be filtered.

    Returns
    -------
    tseries: numpy.ndarray
        filtered array
    """
    from pycbc.filter import correlate
    fillen = len(coefficients)

    # If there aren't many points just use the default scipy method
    if len(timeseries) < 2**7:
        series = scipy.signal.lfilter(coefficients, 1.0, timeseries)
        return TimeSeries(series,
                          epoch=timeseries.start_time,
                          delta_t=timeseries.delta_t)
    elif (len(timeseries) < fillen * 10) or (len(timeseries) < 2**18):
        from pycbc.strain.strain import create_memory_and_engine_for_class_based_fft
        from pycbc.strain.strain import execute_cached_fft

        cseries = (Array(coefficients[::-1] * 1)).astype(timeseries.dtype)
        cseries.resize(len(timeseries))
        cseries.roll(len(timeseries) - fillen + 1)

        flen = len(cseries) // 2 + 1
        ftype = complex_same_precision_as(timeseries)

        if not USE_CACHING_FOR_LFILTER:
            cfreq = zeros(flen, dtype=ftype)
            tfreq = zeros(flen, dtype=ftype)
            fft(Array(cseries), cfreq)
            fft(Array(timeseries), tfreq)
            cout = zeros(flen, ftype)
            correlate(cfreq, tfreq, cout)
            out = zeros(len(timeseries), dtype=timeseries)
            ifft(cout, out)

        else:
            npoints = len(cseries)
            # NOTE: This function is cached!
            ifftouts = create_memory_and_engine_for_class_based_fft(
                npoints,
                timeseries.dtype,
                ifft=True,
                uid=LFILTER_UNIQUE_ID_1
            )

            # FFT contents of cseries into cfreq
            cfreq = execute_cached_fft(cseries, uid=LFILTER_UNIQUE_ID_2,
                                       copy_output=False,
                                       normalize_by_rate=False)

            # FFT contents of timeseries into tfreq
            tfreq = execute_cached_fft(timeseries, uid=LFILTER_UNIQUE_ID_3,
                                       copy_output=False,
                                       normalize_by_rate=False)

            cout, out, fft_class = ifftouts

            # Correlate cfreq and tfreq
            correlate(cfreq, tfreq, cout)
            # IFFT correlation output into out
            fft_class.execute()

        return TimeSeries(out.numpy()  / len(out), epoch=timeseries.start_time,
                          delta_t=timeseries.delta_t)
    else:
        # recursively perform which saves a bit on memory usage
        # but must keep within recursion limit
        chunksize = max(fillen * 5, len(timeseries) // 128)
        part1 = lfilter(coefficients, timeseries[0:chunksize])
        part2 = lfilter(coefficients, timeseries[chunksize - fillen:])
        out = timeseries.copy()
        out[:len(part1)] = part1
        out[len(part1):] = part2[fillen:]
        return out