示例#1
0
def get_fd_waveform_sequence(template=None, **kwds):
    """Return values of the waveform evaluated at the sequence of frequency
    points.

    Parameters
    ----------
    template: object
        An object that has attached properties. This can be used to substitute
        for keyword arguments. A common example would be a row in an xml table.
    {params}

    Returns
    -------
    hplustilde: Array
        The plus phase of the waveform in frequency domain evaluated at the
    frequency points.
    hcrosstilde: Array
        The cross phase of the waveform in frequency domain evaluated at the
    frequency points.
    """
    kwds['delta_f'] = -1
    kwds['f_lower'] = -1
    p = props(template, required_args=parameters.cbc_fd_required, **kwds)
    lal_pars = _check_lal_pars(p)

    hp, hc = lalsimulation.SimInspiralChooseFDWaveformSequence(
        float(p['coa_phase']), float(pnutils.solar_mass_to_kg(p['mass1'])),
        float(pnutils.solar_mass_to_kg(p['mass2'])), float(p['spin1x']),
        float(p['spin1y']), float(p['spin1z']), float(p['spin2x']),
        float(p['spin2y']), float(p['spin2z']), float(p['f_ref']),
        pnutils.megaparsecs_to_meters(float(p['distance'])),
        float(p['inclination']), lal_pars, _lalsim_enum[p['approximant']],
        p['sample_points'].lal())
    return Array(hp.data.data), Array(hc.data.data)
示例#2
0
    def test_chirp(self):
        ### use a chirp as a signal

        sigt = TimeSeries(self.sig1, self.del_t)
        sig_tilde = make_frequency_series(sigt)

        del_f = sig_tilde.get_delta_f()
        psd = FrequencySeries(self.Psd, del_f)
        flow = self.low_frequency_cutoff

        with _context:
            hautocor, hacorfr, hnrm = matched_filter_core(self.htilde, self.htilde, psd=psd, \
    low_frequency_cutoff=flow, high_frequency_cutoff=self.fmax)

            snr, cor, nrm = matched_filter_core(self.htilde, sig_tilde, psd=psd, \
    low_frequency_cutoff=flow, high_frequency_cutoff=self.fmax)

        hacor = Array(hautocor.real(), copy=True)

        indx = Array(np.array([352250, 352256, 352260]))

        snr = snr * nrm

        with _context:
            dof, achi_list = autochisq_from_precomputed(snr, cor,  hacor, stride=3, num_points=20, \
     indices=indx)

        obt_snr = achi_list[1, 1]
        obt_ach = achi_list[1, 2]
        self.assertTrue(obt_snr > 10.0 and obt_snr < 12.0)
        self.assertTrue(obt_ach < 1.e-3)
        self.assertTrue(achi_list[0, 2] > 20.0)
        self.assertTrue(achi_list[2, 2] > 20.0)
示例#3
0
 def __init__(self, variable_args, waveform_generator=None, data=None,
              f_lower=None, psds=None, f_upper=None, norm=None,
              **kwargs):
     if waveform_generator is None:
         raise ValueError("waveform_generator must be provided")
     if data is None:
         raise ValueError("data must be provided")
     if f_lower is None:
         raise ValueError("f_lower must be provided")
     # set up the boiler-plate attributes; note: we'll compute the
     # log evidence later
     super(GaussianLikelihood, self).__init__(
         variable_args,
         waveform_generator=waveform_generator, data=data,
         **kwargs)
     # check that the data and waveform generator have the same detectors
     if sorted(waveform_generator.detectors.keys()) != \
             sorted(self._data.keys()):
         raise ValueError("waveform generator's detectors (%s) " %(
             ','.join(sorted(waveform_generator.detector_names))) +
             "does not match data (%s)" %(
             ','.join(sorted(self._data.keys()))))
     # check that the data and waveform generator have the same epoch
     if any(waveform_generator.epoch != d.epoch
            for d in self._data.values()):
         raise ValueError("waveform generator does not have the same epoch "
             "as all of the data sets.")
     # check that the data sets all have the same lengths
     dlens = numpy.array([len(d) for d in data.values()])
     if not all(dlens == dlens[0]):
         raise ValueError("all data must be of the same length")
     # we'll use the first data set for setting values
     d = data.values()[0]
     N = len(d)
     # figure out the kmin, kmax to use
     kmin, kmax = filter.get_cutoff_indices(f_lower, f_upper, d.delta_f,
         (N-1)*2)
     self._kmin = kmin
     self._kmax = kmax
     if norm is None:
         norm = 4*d.delta_f
     # we'll store the weight to apply to the inner product
     if psds is None:
         w = Array(numpy.sqrt(norm)*numpy.ones(N))
         self._weight = {det: w for det in data}
     else:
         # temporarily suppress numpy divide by 0 warning
         numpysettings = numpy.seterr(divide='ignore')
         self._weight = {det: Array(numpy.sqrt(norm/psds[det]))
                         for det in data}
         numpy.seterr(**numpysettings)
     # whiten the data
     for det in self._data:
         self._data[det][kmin:kmax] *= self._weight[det][kmin:kmax]
     # compute the log likelihood function of the noise and save it
     self.set_lognl(-0.5*sum([
         d[kmin:kmax].inner(d[kmin:kmax]).real
         for d in self._data.values()]))
     # set default call function to logplor
     self.set_callfunc('logplr')
示例#4
0
 def __init__(self,
              variable_params,
              data,
              waveform_generator,
              f_lower,
              psds=None,
              f_upper=None,
              norm=None,
              **kwargs):
     # set up the boiler-plate attributes; note: we'll compute the
     # log evidence later
     super(GaussianNoise, self).__init__(variable_params, data,
                                         waveform_generator, **kwargs)
     # check that the data and waveform generator have the same detectors
     if (sorted(waveform_generator.detectors.keys()) != sorted(
             self._data.keys())):
         raise ValueError(
             "waveform generator's detectors ({0}) does not "
             "match data ({1})".format(
                 ','.join(sorted(waveform_generator.detector_names)),
                 ','.join(sorted(self._data.keys()))))
     # check that the data and waveform generator have the same epoch
     if any(waveform_generator.epoch != d.epoch
            for d in self._data.values()):
         raise ValueError("waveform generator does not have the same epoch "
                          "as all of the data sets.")
     # check that the data sets all have the same lengths
     dlens = numpy.array([len(d) for d in data.values()])
     if not all(dlens == dlens[0]):
         raise ValueError("all data must be of the same length")
     # we'll use the first data set for setting values
     d = data.values()[0]
     N = len(d)
     # figure out the kmin, kmax to use
     self._f_lower = f_lower
     kmin, kmax = pyfilter.get_cutoff_indices(f_lower, f_upper, d.delta_f,
                                              (N - 1) * 2)
     self._kmin = kmin
     self._kmax = kmax
     if norm is None:
         norm = 4 * d.delta_f
     # we'll store the weight to apply to the inner product
     if psds is None:
         self._psds = None
         w = Array(numpy.sqrt(norm) * numpy.ones(N))
         self._weight = {det: w for det in data}
     else:
         # store a copy of the psds
         self._psds = {ifo: d.copy() for (ifo, d) in psds.items()}
         # temporarily suppress numpy divide by 0 warning
         numpysettings = numpy.seterr(divide='ignore')
         self._weight = {
             det: Array(numpy.sqrt(norm / psds[det]))
             for det in data
         }
         numpy.seterr(**numpysettings)
     # whiten the data
     for det in self._data:
         self._data[det][kmin:kmax] *= self._weight[det][kmin:kmax]
示例#5
0
def lfilter(coefficients, timeseries):
    """ Apply filter coefficients to a time series

    Parameters
    ----------
    coefficients: numpy.ndarray
        Filter coefficients to apply
    timeseries: numpy.ndarray
        Time series to be filtered.

    Returns
    -------
    tseries: numpy.ndarray
        filtered array
    """
    from pycbc.filter import correlate
    fillen = len(coefficients)

    # If there aren't many points just use the default scipy method
    if len(timeseries) < 2**7:
        series = scipy.signal.lfilter(coefficients, 1.0, timeseries)
        return TimeSeries(series,
                          epoch=timeseries.start_time,
                          delta_t=timeseries.delta_t)
    elif (len(timeseries) < fillen * 10) or (len(timeseries) < 2**18):
        cseries = (Array(coefficients[::-1] * 1)).astype(timeseries.dtype)
        cseries.resize(len(timeseries))
        cseries.roll(len(timeseries) - fillen + 1)

        flen = len(cseries) // 2 + 1
        ftype = complex_same_precision_as(timeseries)

        cfreq = zeros(flen, dtype=ftype)
        tfreq = zeros(flen, dtype=ftype)

        fft(Array(cseries), cfreq)
        fft(Array(timeseries), tfreq)

        cout = zeros(flen, ftype)
        out = zeros(len(timeseries), dtype=timeseries)

        correlate(cfreq, tfreq, cout)
        ifft(cout, out)

        return TimeSeries(out.numpy() / len(out),
                          epoch=timeseries.start_time,
                          delta_t=timeseries.delta_t)
    else:
        # recursively perform which saves a bit on memory usage
        # but must keep within recursion limit
        chunksize = max(fillen * 5, len(timeseries) // 128)
        part1 = lfilter(coefficients, timeseries[0:chunksize])
        part2 = lfilter(coefficients, timeseries[chunksize - fillen:])
        out = timeseries.copy()
        out[:len(part1)] = part1
        out[len(part1):] = part2[fillen:]
        return out
 def __init__(self,
              variable_params,
              data,
              low_frequency_cutoff,
              psds=None,
              high_frequency_cutoff=None,
              norm=None,
              static_params=None,
              **kwargs):
     # set up the boiler-plate attributes
     super(GaussianNoise, self).__init__(variable_params,
                                         data,
                                         static_params=static_params,
                                         **kwargs)
     # create the waveform generator
     self._waveform_generator = create_waveform_generator(
         self.variable_params,
         self.data,
         recalibration=self.recalibration,
         gates=self.gates,
         **self.static_params)
     # check that the data sets all have the same lengths
     dlens = numpy.array([len(d) for d in self.data.values()])
     if not all(dlens == dlens[0]):
         raise ValueError("all data must be of the same length")
     # we'll use the first data set for setting values
     d = list(self.data.values())[0]
     N = len(d)
     # Set low frequency cutoff
     self._f_lower = low_frequency_cutoff
     self._f_upper = high_frequency_cutoff
     kmin, kmax = pyfilter.get_cutoff_indices(self._f_lower, self._f_upper,
                                              d.delta_f, (N - 1) * 2)
     self._kmin = kmin
     self._kmax = kmax
     if norm is None:
         norm = 4 * d.delta_f
     # we'll store the weight to apply to the inner product
     if psds is None:
         self._psds = None
         w = Array(numpy.sqrt(norm) * numpy.ones(N))
         self._weight = {det: w for det in data}
     else:
         # store a copy of the psds
         self._psds = {ifo: d.copy() for (ifo, d) in psds.items()}
         # temporarily suppress numpy divide by 0 warning
         numpysettings = numpy.seterr(divide='ignore')
         self._weight = {
             det: Array(numpy.sqrt(norm / psds[det]))
             for det in data
         }
         numpy.seterr(**numpysettings)
     # whiten the data
     for det in self._data:
         self._data[det][kmin:kmax] *= self._weight[det][kmin:kmax]
示例#7
0
 def __init__(self,
              waveform_generator,
              data,
              f_lower,
              psds=None,
              f_upper=None,
              norm=None,
              prior=None,
              sampling_parameters=None,
              replace_parameters=None,
              sampling_transforms=None,
              return_meta=True):
     # set up the boiler-plate attributes; note: we'll compute the
     # log evidence later
     super(GaussianLikelihood,
           self).__init__(waveform_generator,
                          data,
                          prior=prior,
                          sampling_parameters=sampling_parameters,
                          replace_parameters=replace_parameters,
                          sampling_transforms=sampling_transforms,
                          return_meta=return_meta)
     # we'll use the first data set for setting values
     d = data.values()[0]
     N = len(d)
     # figure out the kmin, kmax to use
     kmin, kmax = filter.get_cutoff_indices(f_lower, f_upper, d.delta_f,
                                            (N - 1) * 2)
     self._kmin = kmin
     self._kmax = kmax
     if norm is None:
         norm = 4 * d.delta_f
     # we'll store the weight to apply to the inner product
     if psds is None:
         w = Array(numpy.sqrt(norm) * numpy.ones(N))
         self._weight = {det: w for det in data}
     else:
         # temporarily suppress numpy divide by 0 warning
         numpysettings = numpy.seterr(divide='ignore')
         self._weight = {
             det: Array(numpy.sqrt(norm / psds[det]))
             for det in data
         }
         numpy.seterr(**numpysettings)
     # whiten the data
     for det in self._data:
         self._data[det][kmin:kmax] *= self._weight[det][kmin:kmax]
     # compute the log likelihood function of the noise and save it
     self.set_lognl(-0.5 * sum([
         d[kmin:kmax].inner(d[kmin:kmax]).real for d in self._data.values()
     ]))
     # set default call function to logplor
     self.set_callfunc('logplr')
示例#8
0
    def __init__(self, xs, zs, size):
        """ Correlate x and y, store in z. Arrays need not be equal length, but
        must be at least size long and of the same dtype. No error checking
        will be performed, so be careful. All dtypes must be the same.
        Note, must be created within the processing context that it will be used in.
        """
        self.size = int(size)
        self.dtype = xs[0].dtype
        self.num_vectors = len(xs)

        # Store each pointer as in integer array
        self.x = Array([v.ptr for v in xs], dtype=numpy.int)
        self.z = Array([v.ptr for v in zs], dtype=numpy.int)
示例#9
0
def _lalsim_fd_sequence(**p):
    """ Shim to interface to lalsimulation SimInspiralChooseFDWaveformSequence
    """
    lal_pars = _check_lal_pars(p)
    hp, hc = lalsimulation.SimInspiralChooseFDWaveformSequence(
        float(p['coa_phase']), float(pnutils.solar_mass_to_kg(p['mass1'])),
        float(pnutils.solar_mass_to_kg(p['mass2'])), float(p['spin1x']),
        float(p['spin1y']), float(p['spin1z']), float(p['spin2x']),
        float(p['spin2y']), float(p['spin2z']), float(p['f_ref']),
        pnutils.megaparsecs_to_meters(float(p['distance'])),
        float(p['inclination']), lal_pars, _lalsim_enum[p['approximant']],
        p['sample_points'].lal())
    return Array(hp.data.data), Array(hc.data.data)
示例#10
0
 def test_eigen_directions(self):
     evalsStock = Array(numpy.loadtxt('%sstockEvals.dat' % (self.dataDir)))
     evecsStock = Array(numpy.loadtxt('%sstockEvecs.dat' % (self.dataDir)))
     maxEval = max(evalsStock)
     evalsCurr = Array(self.metricParams.evals[self.f_upper])
     evecsCurr = Array(self.metricParams.evecs[self.f_upper])
     errMsg = "pycbc.tmpltbank.determine_eigen_directions has failed "
     errMsg += "sanity check."
     evalsDiff = abs(evalsCurr - evalsStock) / maxEval
     self.assertTrue(not (evalsDiff > 1E-5).any(), msg=errMsg)
     for stock, test in zip(evecsStock.data, evecsCurr.data):
         stockScaled = stock * evalsCurr.data**0.5
         testScaled = test * evalsCurr.data**0.5
         diff = stockScaled - testScaled
         self.assertTrue(not (diff > 1E-4).any(), msg=errMsg)
示例#11
0
    def psds(self, psds):
        """Sets the psds, and calculates the weight and norm from them.

        The data and the low and high frequency cutoffs must be set first.
        """
        # check that the data has been set
        if self._data is None:
            raise ValueError("No data set")
        if self._f_lower is None:
            raise ValueError("low frequency cutoff not set")
        if self._f_upper is None:
            raise ValueError("high frequency cutoff not set")
        # make sure the relevant caches are cleared
        self._psds.clear()
        self._weight.clear()
        self._lognorm.clear()
        self._det_lognls.clear()
        for det, d in self._data.items():
            if psds is None:
                # No psd means assume white PSD
                p = FrequencySeries(numpy.ones(int(self._N / 2 + 1)),
                                    delta_f=d.delta_f)
            else:
                # copy for storage
                p = psds[det].copy()
            self._psds[det] = p
            # we'll store the weight to apply to the inner product
            w = Array(numpy.zeros(len(p)))
            # only set weight in band we will analyze
            kmin = self._kmin[det]
            kmax = self._kmax[det]
            w[kmin:kmax] = numpy.sqrt(4. * p.delta_f / p[kmin:kmax])
            self._weight[det] = w
        # set the lognl and lognorm; we'll get this by just calling lognl
        _ = self.lognl
示例#12
0
def lfilter(coefficients, timeseries):
    """ Apply filter coefficients to a time series
    
    Parameters
    ----------
    coefficients: numpy.ndarray
        Filter coefficients to apply
    timeseries: numpy.ndarray
        Time series to be filtered.

    Returns
    -------
    tseries: numpy.ndarray
        filtered array
    """
    from pycbc.fft import fft, ifft
    from pycbc.filter import correlate

    # If there aren't many points just use the default scipy method
    if len(timeseries) < 2**7:
        if hasattr(timeseries, 'numpy'):
            timeseries = timeseries.numpy()
        series = scipy.signal.lfilter(coefficients, 1.0, timeseries)
        return series
    else:
        cseries = (Array(coefficients[::-1] * 1)).astype(timeseries.dtype)
        cseries.resize(len(timeseries))
        cseries.roll(len(timeseries) - len(coefficients) + 1)
        timeseries = Array(timeseries, copy=False)

        flen = len(cseries) / 2 + 1
        ftype = complex_same_precision_as(timeseries)

        cfreq = zeros(flen, dtype=ftype)
        tfreq = zeros(flen, dtype=ftype)

        fft(Array(cseries), cfreq)
        fft(Array(timeseries), tfreq)

        cout = zeros(flen, ftype)
        out = zeros(len(timeseries), dtype=timeseries)

        correlate(cfreq, tfreq, cout)
        ifft(cout, out)

        return out.numpy() / len(out)
示例#13
0
 def combine_layout(self):
     # determine the unique ifo layouts
     self.edge_unique = []
     self.ifo_map = {}
     for ifo in self.fedges:
         if len(self.edge_unique) == 0:
             self.ifo_map[ifo] = 0
             self.edge_unique.append(Array(self.fedges[ifo]))
         else:
             for i, edge in enumerate(self.edge_unique):
                 if numpy.array_equal(edge, self.fedges[ifo]):
                     self.ifo_map[ifo] = i
                     break
             else:
                 self.ifo_map[ifo] = len(self.edge_unique)
                 self.edge_unique.append(Array(self.fedges[ifo]))
     logging.info("%s unique ifo layouts", len(self.edge_unique))
示例#14
0
文件: relbin.py 项目: maxtrevor/pycbc
    def _loglr(self):
        r"""Computes the log likelihood ratio,

        .. math::

            \log \mathcal{L}(\Theta) = \sum_i
                \left<h_i(\Theta)|d_i\right> -
                \frac{1}{2}\left<h_i(\Theta)|h_i(\Theta)\right>,

        at the current parameter values :math:`\Theta`.

        Returns
        -------
        float
            The value of the log likelihood ratio.
        """
        # get model params
        p = self.current_params.copy()
        p.update(self.static_params)

        hh = 0.0
        hd = 0j
        for ifo in self.data:
            # get detector antenna pattern
            fp, fc = self.det[ifo].antenna_pattern(
                p["ra"], p["dec"], p["polarization"], self.antenna_time[ifo]
            )
            # get timeshift relative to end of data
            dt = self.det[ifo].time_delay_from_earth_center(
                p["ra"], p["dec"], p["tc"]
            )
            dtc = p["tc"] + dt - self.end_time[ifo]
            tshift = numpy.exp(-2.0j * numpy.pi * self.fedges[ifo] * dtc)
            # generate template and calculate waveform ratio
            hp, hc = get_fd_waveform_sequence(
                sample_points=Array(self.fedges[ifo]), **p
            )
            htilde = numpy.array(fp * hp + fc * hc) * tshift
            r = (htilde / self.h00_sparse[ifo]).astype(numpy.complex128)
            r0 = r[:-1]
            r1 = (r[1:] - r[:-1]) / (
                self.fedges[ifo][1:] - self.fedges[ifo][:-1]
            )

            # <h, d> is sum over bins of A0r0 + A1r1
            hd += numpy.sum(
                self.sdat[ifo]["a0"] * r0 + self.sdat[ifo]["a1"] * r1
            )
            # <h, h> is sum over bins of B0|r0|^2 + 2B1Re(r1r0*)
            hh += numpy.sum(
                self.sdat[ifo]["b0"] * numpy.absolute(r0) ** 2.0
                + 2.0 * self.sdat[ifo]["b1"] * (r1 * numpy.conjugate(r0)).real
            )
        hd = abs(hd)
        llr = numpy.log(special.i0e(hd)) + hd - 0.5 * hh
        return float(llr)
示例#15
0
    def test_sg(self):
        ### use a sin-gaussian as a signal

        sigt = TimeSeries(self.sig2, self.del_t)
        sig_tilde = make_frequency_series(sigt)

        del_f = sig_tilde.get_delta_f()
        psd = FrequencySeries(self.Psd, del_f)
        flow = self.low_frequency_cutoff

        with _context:
            hautocor, hacorfr, hnrm = matched_filter_core(self.htilde, self.htilde, psd=psd, \
   low_frequency_cutoff=flow, high_frequency_cutoff=self.fmax)

            snr, cor, nrm = matched_filter_core(self.htilde, sig_tilde, psd=psd, \
   low_frequency_cutoff=flow, high_frequency_cutoff=self.fmax)

        hacor = Array(hautocor.real(), copy=True)

        indx = Array(np.array([301440, 301450, 301460]))

        snr = snr * nrm
        with _context:
            dof, achi_list = autochisq_from_precomputed(snr, cor,  hacor, stride=3, num_points=20, \
    indices=indx)
        obt_snr = achi_list[1, 1]
        obt_ach = achi_list[1, 2]
        self.assertTrue(obt_snr > 12.0 and obt_snr < 15.0)
        self.assertTrue(obt_ach > 6.8e3)
        self.assertTrue(achi_list[0, 2] > 6.8e3)
        self.assertTrue(achi_list[2, 2] > 6.8e3)

        with _context:
            dof, achi_list = autochisq(self.htilde, sig_tilde, psd,  stride=3,  num_points=20, \
           low_frequency_cutoff=flow, high_frequency_cutoff=self.fmax, max_snr=True)

        self.assertTrue(obt_snr == achi_list[0, 1])
        self.assertTrue(obt_ach == achi_list[0, 2])

        for i in xrange(1, len(achi_list)):
            self.assertTrue(achi_list[i, 2] > 2.e3)
示例#16
0
文件: nltides.py 项目: vivienr/pycbc
def nonlinear_tidal_spa(**kwds):
    from . import waveform
    from pycbc.types import Array

    # We start with the standard TaylorF2 based waveform
    kwds.pop('approximant')
    hp, hc = waveform.get_fd_waveform(approximant="TaylorF2", **kwds)

    # Add the phasing difference from the nonlinear tides
    kmin = int((kwds['f0'] / hp.delta_f))
    f = numpy.arange(kmin, len(hp)) * hp.delta_f
    pd = Array(numpy.exp(1.0j * nonlinear_phase_difference(
        f, kwds['f0'], kwds['A'], kwds['n'], kwds['mass1'], kwds['mass2'])),
               dtype=hp.dtype)
    hp[kmin:] *= pd
    hc[kmin:] *= pd
    return hp, hc
示例#17
0
def td_taper(out, start, end, beta=8, side='left'):
    """Applies a taper to the given TimeSeries.

    A half-kaiser window is used for the roll-off.

    Parameters
    ----------
    out : TimeSeries
        The ``TimeSeries`` to taper.
    start : float
        The time (in s) to start the taper window.

    end : float
        The time (in s) to end the taper window.
    beta : int, optional
        The beta parameter to use for the Kaiser window. See
        ``scipy.signal.kaiser`` for details. Default is 8.
    side : {'left', 'right'}
        The side to apply the taper to. If ``'left'`` (``'right'``), the taper
        will roll up (down) between ``start`` and ``end``, with all values
        before ``start`` (after ``end``) set to zero. Default is ``'left'``.

    Returns
    -------
    TimeSeries
        The tapered time series.
    """
    out = out.copy()
    width = end - start
    winlen = 2 * int(width / out.delta_t)
    window = Array(signal.get_window(('kaiser', beta), winlen))
    xmin = int((start - out.start_time) / out.delta_t)
    xmax = xmin + winlen / 2
    if side == 'left':
        out[xmin:xmax] *= window[:winlen / 2]
        if xmin > 0:
            out[:xmin].clear()
    elif side == 'right':
        out[xmin:xmax] *= window[winlen / 2:]
        if xmax < len(out):
            out[xmax:].clear()
    else:
        raise ValueError("unrecognized side argument {}".format(side))
    return out
示例#18
0
def apply_fd_time_shift(htilde, shifttime, kmin=0, fseries=None, copy=True):
    """Shifts a frequency domain waveform in time. The shift applied is
    shiftime - htilde.epoch.

    Parameters
    ----------
    htilde : FrequencySeries
        The waveform frequency series.
    shifttime : float
        The time to shift the frequency series to.
    kmin : {0, int}
        The starting index of htilde to apply the time shift. Default is 0.
    fseries : {None, numpy array}
        The frequencies of each element in htilde. This is only needed if htilde is not
        sampled at equal frequency steps.
    copy : {True, bool}
        Make a copy of htilde before applying the time shift. If False, the time
        shift will be applied to htilde's data.

    Returns
    -------
    FrequencySeries
        A frequency series with the waveform shifted to the new time. If makecopy
        is True, will be a new frequency series; if makecopy is False, will be
        the same as htilde.
    """
    dt = float(shifttime - htilde.epoch)
    if dt == 0.:
        # no shift to apply, just copy if desired
        if copy:
            htilde = 1. * htilde
    elif isinstance(htilde, FrequencySeries):
        # FrequencySeries means equally sampled in frequency, use faster shifting
        htilde = apply_fseries_time_shift(htilde, dt, kmin=kmin, copy=copy)
    else:
        if fseries is None:
            fseries = htilde.sample_frequencies.numpy()
        shift = Array(numpy.exp(-2j * numpy.pi * dt * fseries),
                      dtype=complex_same_precision_as(htilde))
        if copy:
            htilde = 1. * htilde
        htilde *= shift
    return htilde
示例#19
0
def fd_taper(out, start, end, beta=8, side='left'):
    """Applies a taper to the given FrequencySeries.

    A half-kaiser window is used for the roll-off.

    Parameters
    ----------
    out : FrequencySeries
        The ``FrequencySeries`` to taper.
    start : float
        The frequency (in Hz) to start the taper window.
    end : float
        The frequency (in Hz) to end the taper window.
    beta : int, optional
        The beta parameter to use for the Kaiser window. See
        ``scipy.signal.kaiser`` for details. Default is 8.
    side : {'left', 'right'}
        The side to apply the taper to. If ``'left'`` (``'right'``), the taper
        will roll up (down) between ``start`` and ``end``, with all values
        before ``start`` (after ``end``) set to zero. Default is ``'left'``.

    Returns
    -------
    FrequencySeries
        The tapered frequency series.
    """
    out = out.copy()
    width = end - start
    winlen = 2 * int(width / out.delta_f)
    window = Array(signal.get_window(('kaiser', beta), winlen))
    kmin = int(start / out.delta_f)
    kmax = kmin + winlen / 2
    if side == 'left':
        out[kmin:kmax] *= window[:winlen / 2]
        out[:kmin] *= 0.
    elif side == 'right':
        out[kmin:kmax] *= window[winlen / 2:]
        out[kmax:] *= 0.
    else:
        raise ValueError("unrecognized side argument {}".format(side))
    return out
示例#20
0
def nonlinear_tidal_spa(**kwds):
    """Generates a frequency-domain waveform that implements the
    TaylorF2+NL tide model described in https://arxiv.org/abs/1808.07013
    """

    from pycbc import waveform
    from pycbc.types import Array

    # We start with the standard TaylorF2 based waveform
    kwds.pop('approximant')
    hp, hc = waveform.get_fd_waveform(approximant="TaylorF2", **kwds)

    # Add the phasing difference from the nonlinear tides
    f = numpy.arange(len(hp)) * hp.delta_f
    pd = Array(numpy.exp(-1.0j * nltides_fourier_phase_difference(
        f, hp.delta_f, kwds['f0'], kwds['amplitude'], kwds['n'], kwds['mass1'],
        kwds['mass2'])),
               dtype=hp.dtype)
    hp *= pd
    hc *= pd
    return hp, hc
示例#21
0
    def test_sg(self):
        ### use a sin-gaussian as a signal

        sigt = TimeSeries(self.sig2, self.del_t)
        sig_tilde = make_frequency_series(sigt)

        del_f = sig_tilde.get_delta_f()
        psd = FrequencySeries(self.Psd, del_f)
        flow = self.low_frequency_cutoff

        with _context:
            hautocor, hacorfr, hnrm = matched_filter_core(self.htilde, self.htilde, psd=psd, \
                        low_frequency_cutoff=flow, high_frequency_cutoff=self.fmax)
            hautocor = hautocor * float(np.real(1./hautocor[0]))


            snr, cor, nrm = matched_filter_core(self.htilde, sig_tilde, psd=psd, \
                        low_frequency_cutoff=flow, high_frequency_cutoff=self.fmax)



        hacor = Array(hautocor.real(), copy=True)

        indx = np.array([301440, 301450, 301460])

        snr = snr*nrm

        with _context:
           dof, achisq, indices= \
               autochisq_from_precomputed(snr, snr, hacor, indx, stride=3,
                                          num_points=20)

        obt_snr = abs(snr[indices[1]])
        obt_ach = achisq[1]
        self.assertTrue(obt_snr > 12.0 and obt_snr < 15.0)
        self.assertTrue(obt_ach > 6.8e3)
        self.assertTrue(achisq[0] > 6.8e3)
        self.assertTrue(achisq[2] > 6.8e3)
示例#22
0
def welch(timeseries, seg_len=4096, seg_stride=2048, window='hann',
          avg_method='median', num_segments=None, require_exact_data_fit=False):
    """PSD estimator based on Welch's method.

    Parameters
    ----------
    timeseries : TimeSeries
        Time series for which the PSD is to be estimated.
    seg_len : int
        Segment length in samples.
    seg_stride : int
        Separation between consecutive segments, in samples.
    window : {'hann'}
        Function used to window segments before Fourier transforming.
    avg_method : {'median', 'mean', 'median-mean'}
        Method used for averaging individual segment PSDs.

    Returns
    -------
    psd : FrequencySeries
        Frequency series containing the estimated PSD.

    Raises
    ------
    ValueError
        For invalid choices of `seg_len`, `seg_stride` `window` and
        `avg_method` and for inconsistent combinations of len(`timeseries`),
        `seg_len` and `seg_stride`.

    Notes
    -----
    See arXiv:gr-qc/0509116 for details.
    """
    window_map = {
        'hann': numpy.hanning
    }

    # sanity checks
    if not window in window_map:
        raise ValueError('Invalid window')
    if not avg_method in ('mean', 'median', 'median-mean'):
        raise ValueError('Invalid averaging method')
    if type(seg_len) is not int or type(seg_stride) is not int \
        or seg_len <= 0 or seg_stride <= 0:
        raise ValueError('Segment length and stride must be positive integers')

    if timeseries.precision == 'single':
        fs_dtype = numpy.complex64
    elif timeseries.precision == 'double':
        fs_dtype = numpy.complex128
        
    num_samples = len(timeseries)
    if num_segments is None:
        num_segments = int(num_samples // seg_stride)
        # NOTE: Is this not always true?
        if (num_segments - 1) * seg_stride + seg_len > num_samples:
            num_segments -= 1

    if not require_exact_data_fit:
        data_len = (num_segments - 1) * seg_stride + seg_len

        # Get the correct amount of data
        if data_len < num_samples:
            diff = num_samples - data_len
            start = diff // 2
            end = num_samples - diff // 2
            # Want this to be integers so if diff is odd, catch it here.
            if diff % 2:
                start = start + 1

            timeseries = timeseries[start:end]
            num_samples = len(timeseries)
        if data_len > num_samples:
            err_msg = "I was asked to estimate a PSD on %d " %(data_len)
            err_msg += "data samples. However the data provided only contains "
            err_msg += "%d data samples." %(num_samples)

    if num_samples != (num_segments - 1) * seg_stride + seg_len:
        raise ValueError('Incorrect choice of segmentation parameters')
        
    w = Array(window_map[window](seg_len).astype(timeseries.dtype))

    # calculate psd of each segment
    delta_f = 1. / timeseries.delta_t / seg_len
    segment_tilde = FrequencySeries(numpy.zeros(seg_len / 2 + 1), \
        delta_f=delta_f, dtype=fs_dtype)

    segment_psds = []
    for i in xrange(num_segments):
        segment_start = i * seg_stride
        segment_end = segment_start + seg_len
        segment = timeseries[segment_start:segment_end]
        assert len(segment) == seg_len
        fft(segment * w, segment_tilde)
        seg_psd = abs(segment_tilde * segment_tilde.conj()).numpy()

        #halve the DC and Nyquist components to be consistent with TO10095
        seg_psd[0] /= 2
        seg_psd[-1] /= 2

        segment_psds.append(seg_psd)

    segment_psds = numpy.array(segment_psds)   

    if avg_method == 'mean':
        psd = numpy.mean(segment_psds, axis=0)
    elif avg_method == 'median':
        psd = numpy.median(segment_psds, axis=0) / median_bias(num_segments)
    elif avg_method == 'median-mean':
        odd_psds = segment_psds[::2]
        even_psds = segment_psds[1::2]
        odd_median = numpy.median(odd_psds, axis=0) / \
            median_bias(len(odd_psds))
        even_median = numpy.median(even_psds, axis=0) / \
            median_bias(len(even_psds))
        psd = (odd_median + even_median) / 2

    psd *= 2 * delta_f * seg_len / (w*w).sum()

    return FrequencySeries(psd, delta_f=delta_f, dtype=timeseries.dtype,
                           epoch=timeseries.start_time)
示例#23
0
def inverse_spectrum_truncation(psd, max_filter_len, low_frequency_cutoff=None, trunc_method=None):
    """Modify a PSD such that the impulse response associated with its inverse
    square root is no longer than `max_filter_len` time samples. In practice
    this corresponds to a coarse graining or smoothing of the PSD.

    Parameters
    ----------
    psd : FrequencySeries
        PSD whose inverse spectrum is to be truncated.
    max_filter_len : int
        Maximum length of the time-domain filter in samples.
    low_frequency_cutoff : {None, int}
        Frequencies below `low_frequency_cutoff` are zeroed in the output.
    trunc_method : {None, 'hann'}
        Function used for truncating the time-domain filter.
        None produces a hard truncation at `max_filter_len`.

    Returns
    -------
    psd : FrequencySeries
        PSD whose inverse spectrum has been truncated.

    Raises
    ------
    ValueError
        For invalid types or values of `max_filter_len` and `low_frequency_cutoff`.

    Notes
    -----
    See arXiv:gr-qc/0509116 for details.
    """
    # sanity checks
    if type(max_filter_len) is not int or max_filter_len <= 0:
        raise ValueError('max_filter_len must be a positive integer')
    if low_frequency_cutoff is not None and low_frequency_cutoff < 0 \
        or low_frequency_cutoff > psd.sample_frequencies[-1]:
        raise ValueError('low_frequency_cutoff must be within the bandwidth of the PSD')

    N = (len(psd)-1)*2

    inv_asd = FrequencySeries((1. / psd)**0.5, delta_f=psd.delta_f, \
        dtype=complex_same_precision_as(psd))
        
    inv_asd[0] = 0
    inv_asd[N/2] = 0
    q = TimeSeries(numpy.zeros(N), delta_t=(N / psd.delta_f), \
        dtype=real_same_precision_as(psd))

    if low_frequency_cutoff:
        kmin = int(low_frequency_cutoff / psd.delta_f)
        inv_asd[0:kmin] = 0

    ifft(inv_asd, q)
    
    trunc_start = max_filter_len / 2
    trunc_end = N - max_filter_len / 2

    if trunc_method == 'hann':
        trunc_window = Array(numpy.hanning(max_filter_len), dtype=q.dtype)
        q[0:trunc_start] *= trunc_window[max_filter_len/2:max_filter_len]
        q[trunc_end:N] *= trunc_window[0:max_filter_len/2]

    q[trunc_start:trunc_end] = 0
    psd_trunc = FrequencySeries(numpy.zeros(len(psd)), delta_f=psd.delta_f, \
                                dtype=complex_same_precision_as(psd))
    fft(q, psd_trunc)
    psd_trunc *= psd_trunc.conj()
    psd_out = 1. / abs(psd_trunc)

    return psd_out
示例#24
0
def sin_cos_lookup():
    vec = numpy.arange(0, lal.TWOPI * 3, lal.TWOPI / 10000)
    return Array(numpy.sin(vec)).astype(float32)
示例#25
0
def get_log(vmax, delta):
    global _logv_vec
    if _logv_vec is None or (_logv_vec.delta_f !=
                             delta) or (len(_logv_vec) < int(vmax / delta)):
        _logv_vec = logv_lookup(vmax, delta)
    return _logv_vec


# Precompute the sine function #################################################
def sin_cos_lookup():
    vec = numpy.arange(0, lal.TWOPI * 3, lal.TWOPI / 10000)
    return Array(numpy.sin(vec)).astype(float32)


sin_cos = Array([], dtype=float32)


def spa_tmplt_engine(htilde, kmin, phase_order, delta_f, piM, pfaN, pfa2, pfa3,
                     pfa4, pfa5, pfl5, pfa6, pfl6, pfa7, amp_factor):
    """ Calculate the spa tmplt phase 
    """
    kfac = numpy.array(spa_tmplt_precondition(len(htilde), delta_f, kmin).data,
                       copy=False)
    htilde = numpy.array(htilde.data, copy=False)
    cbrt_vec = numpy.array(get_cbrt(len(htilde) * delta_f + kmin,
                                    delta_f).data,
                           copy=False)
    logv_vec = numpy.array(get_log(len(htilde) * delta_f + kmin, delta_f).data,
                           copy=False)
    length = len(htilde)
示例#26
0
    avgSpecParams.overlap = numPoints / 2;
    avgSpecParams.window = lal.CreateHannREAL4Window(numPoints);
    lal.REAL4AverageSpectrum(spec, chan, avgSpecParams)
    return FrequencySeries(spec.data.data, delta_f=spec.deltaF)

pad_data = 8
sample_rate=4096
duration=2048
start_time=968605000
seg_len = 256*4096
stride = 128*4096
nsegs = 15
delta_f = 1.0 / 256

# Check that we are using the same hann window
w_pycbc = Array(numpy.hanning(seg_len).astype(float32))
w_lal = lal.CreateHannREAL4Window(seg_len)
print "hann props pycbc", w_pycbc.sum(), w_pycbc.squared_norm().sum()
print "hann props lal", w_lal.sum, w_lal.sumofsquares

# Check that the median bias is the same
print "%s segments" % nsegs
print "BIAS pycbc", pycbc.psd.median_bias(nsegs)
print "BIAS lal", lal.RngMedBias(nsegs)

# Check the psd norm
print "PYCBC psd norm", 2.0 / float(sample_rate) / (w_pycbc.squared_norm().sum()) / pycbc.psd.median_bias(nsegs)

# Same strain preparation for lal and pycbc psd estimation
strain = read_frame("LER2.lcf", "H1:LDAS-STRAIN", start_time=start_time-pad_data, duration=duration+pad_data*2)
strain = highpass(strain, frequency=30)
示例#27
0
with ctx:
    in1 = zeros(1024 * 1024, dtype = complex64)
    in2 = zeros(1024 * 1024, dtype = complex64)
    in1.data[:1024*512] = random(1024*512) + 1j * random(1024*512)
    in2.data[:1024*512] = random(1024*512) + 1j * random(1024*512)

    out_np = zeros(1024 * 1024, dtype = complex64)
    out_parallel = zeros(1024 * 1024, dtype = complex64)

    print "Running with aligned input"
    correlate_numpy(in1, in2, out_np)
    correlate_parallel(in1, in2, out_parallel)
    print "Results: {0}".format(out_np == out_parallel)

    a = zeros(1024 * 1024 + 1, dtype = complex64)
    b = zeros(1024 * 1024 + 1, dtype = complex64)
    c = zeros(1024 * 1024 + 1, dtype = complex64)
    d = zeros(1024 * 1024 + 1, dtype = complex64)
    in1 = Array(a[1:], copy = False)
    in2 = Array(b[1:], copy = False)
    out_np = Array(c[1:], copy = False)
    out_parallel = Array(d[1:], copy = False)
    in1.data[:1024*512] = random(1024*512) + 1j * random(1024*512)
    in2.data[:1024*512] = random(1024*512) + 1j * random(1024*512)

    print "Running with mis-aligned input"
    correlate_numpy(in1, in2, out_np)
    correlate_parallel(in1, in2, out_parallel)
    print "Results: {0}".format(out_np == out_parallel)

示例#28
0
    def __init__(self,
                 variable_params,
                 data,
                 low_frequency_cutoff,
                 fiducial_params=None,
                 epsilon=0.5,
                 **kwargs):
        super(Relative, self).__init__(variable_params, data,
                                       low_frequency_cutoff, **kwargs)
        # check that all of the frequency cutoffs are the same
        # FIXME: this can probably be loosened at some point
        kmins = list(self.kmin.values())
        kmaxs = list(self.kmax.values())
        if any(kk != kmins[0] for kk in kmins):
            raise ValueError("All lower frequency cutoffs must be the same")
        if any(kk != kmaxs[0] for kk in kmaxs):
            raise ValueError("All high frequency cutoffs must be the same")
        # store data and frequencies
        d0 = list(self.data.values())[0]
        self.f = numpy.array(d0.sample_frequencies)
        self.df = d0.delta_f
        self.end_time = float(d0.end_time)
        self.det = {ifo: Detector(ifo) for ifo in self.data}
        self.epsilon = float(epsilon)
        # store data and psds as arrays for faster computation
        self.comp_data = {ifo: d.numpy() for ifo, d in self.data.items()}
        self.comp_psds = {ifo: p.numpy() for ifo, p in self.psds.items()}
        # store fiducial waveform params
        self.fid_params = fiducial_params

        # get detector-specific arrival times relative to end of data
        dt = {
            ifo:
            self.det[ifo].time_delay_from_earth_center(self.fid_params['ra'],
                                                       self.fid_params['dec'],
                                                       self.fid_params['tc'])
            for ifo in self.data
        }
        self.ta = {
            ifo: self.fid_params['tc'] + dt[ifo] - self.end_time
            for ifo in self.data
        }

        # generate fiducial waveform
        f_lo = kmins[0] * self.df
        f_hi = kmaxs[0] * self.df
        logging.info("Generating fiducial waveform from %s to %s Hz", f_lo,
                     f_hi)
        # prune low frequency samples to avoid waveform errors
        nbelow = sum(self.f < 10)
        fpoints = Array(self.f.astype(numpy.float64))[nbelow:]
        approx = self.static_params['approximant']
        fid_hp, fid_hc = get_fd_waveform_sequence(approximant=approx,
                                                  sample_points=fpoints,
                                                  **self.fid_params)
        self.h00 = {}
        for ifo in self.data:
            # make copy of fiducial wfs, adding back in low frequencies
            hp0 = numpy.concatenate([[0j] * nbelow, fid_hp.copy()])
            hc0 = numpy.concatenate([[0j] * nbelow, fid_hc.copy()])
            fp, fc = self.det[ifo].antenna_pattern(
                self.fid_params['ra'], self.fid_params['dec'],
                self.fid_params['polarization'], self.fid_params['tc'])
            tshift = numpy.exp(-2.0j * numpy.pi * self.f * self.ta[ifo])
            self.h00[ifo] = numpy.array(hp0 * fp + hc0 * fc) * tshift

        # compute frequency bins
        logging.info("Computing frequency bins")
        nbin, fbin, fbin_ind = setup_bins(f_full=self.f,
                                          f_lo=kmins[0] * self.df,
                                          f_hi=kmaxs[0] * self.df,
                                          eps=self.epsilon)
        logging.info("Using %s bins for this model", nbin)
        # store bins and edges in sample and frequency space
        self.edges = fbin_ind
        self.fedges = numpy.array(fbin).astype(numpy.float64)
        self.bins = numpy.array([(self.edges[i], self.edges[i + 1])
                                 for i in range(len(self.edges) - 1)])
        self.fbins = numpy.array([(fbin[i], fbin[i + 1])
                                  for i in range(len(fbin) - 1)])
        # store low res copy of fiducial waveform
        self.h00_sparse = {
            ifo: self.h00[ifo].copy().take(self.edges)
            for ifo in self.h00
        }

        # compute summary data
        logging.info("Calculating summary data at frequency resolution %s Hz",
                     self.df)
        self.sdat = self.summary_data()
示例#29
0
def bank_chisq_from_filters(tmplt_snr,
                            tmplt_norm,
                            bank_snrs,
                            bank_norms,
                            tmplt_bank_matches,
                            indices=None):
    """ This function calculates and returns a TimeSeries object containing the
    bank veto calculated over a segment.

    Parameters
    ----------
    tmplt_snr: TimeSeries
        The SNR time series from filtering the segment against the current
        search template
    tmplt_norm: float
        The normalization factor for the search template
    bank_snrs: list of TimeSeries
        The precomputed list of SNR time series between each of the bank veto
        templates and the segment
    bank_norms: list of floats
        The normalization factors for the list of bank veto templates
        (usually this will be the same for all bank veto templates)
    tmplt_bank_matches: list of floats
        The complex overlap between the search template and each
        of the bank templates
    indices: {None, Array}, optional
        Array of indices into the snr time series. If given, the bank chisq
        will only be calculated at these values.

    Returns
    -------
    bank_chisq: TimeSeries of the bank vetos
    """
    if indices is not None:
        tmplt_snr = Array(tmplt_snr, copy=False)
        bank_snrs_tmp = []
        for bank_snr in bank_snrs:
            bank_snrs_tmp.append(bank_snr.take(indices))
        bank_snrs = bank_snrs_tmp

    # Initialise bank_chisq as 0s everywhere
    bank_chisq = zeros(len(tmplt_snr), dtype=real_same_precision_as(tmplt_snr))

    # Loop over all the bank templates
    for i in range(len(bank_snrs)):
        bank_match = tmplt_bank_matches[i]
        if (abs(bank_match) > 0.99):
            # Not much point calculating bank_chisquared if the bank template
            # is very close to the filter template. Can also hit numerical
            # error due to approximations made in this calculation.
            # The value of 2 is the expected addition to the chisq for this
            # template
            bank_chisq += 2.
            continue
        bank_norm = sqrt((1 - bank_match * bank_match.conj()).real)

        bank_SNR = bank_snrs[i] * (bank_norms[i] / bank_norm)
        tmplt_SNR = tmplt_snr * (bank_match.conj() * tmplt_norm / bank_norm)

        bank_SNR = Array(bank_SNR, copy=False)
        tmplt_SNR = Array(tmplt_SNR, copy=False)

        bank_chisq += (bank_SNR - tmplt_SNR).squared_norm()

    if indices is not None:
        return bank_chisq
    else:
        return TimeSeries(bank_chisq,
                          delta_t=tmplt_snr.delta_t,
                          epoch=tmplt_snr.start_time,
                          copy=False)
示例#30
0
    def __init__(self,
                 low_frequency_cutoff,
                 high_frequency_cutoff,
                 snr_threshold,
                 tlen,
                 delta_f,
                 dtype,
                 segment_list,
                 template_output,
                 use_cluster,
                 downsample_factor=1,
                 upsample_threshold=1,
                 upsample_method='pruned_fft',
                 gpu_callback_method='none'):
        """ Create a matched filter engine.

        Parameters
        ----------
        low_frequency_cutoff : {None, float}, optional
            The frequency to begin the filter calculation. If None, begin at the
            first frequency after DC.
        high_frequency_cutoff : {None, float}, optional
            The frequency to stop the filter calculation. If None, continue to the
            the nyquist frequency.
        snr_threshold : float
            The minimum snr to return when filtering
        segment_list : list
            List of FrequencySeries that are the Fourier-transformed data segments
        template_output : complex64
            Array of memory given as the 'out' parameter to waveform.FilterBank
        use_cluster : boolean
            If true, cluster triggers above threshold using a window; otherwise,
            only apply a threshold.
        downsample_factor : {1, int}, optional
            The factor by which to reduce the sample rate when doing a heirarchical
            matched filter
        upsample_threshold : {1, float}, optional
            The fraction of the snr_threshold to trigger on the subsampled filter.
        upsample_method : {pruned_fft, str}
            The method to upsample or interpolate the reduced rate filter.
        """
        # Assuming analysis time is constant across templates and segments, also
        # delta_f is constant across segments.
        self.tlen = tlen
        self.flen = self.tlen / 2 + 1
        self.delta_f = delta_f
        self.dtype = dtype
        self.snr_threshold = snr_threshold
        self.flow = low_frequency_cutoff
        self.fhigh = high_frequency_cutoff
        self.gpu_callback_method = gpu_callback_method

        if downsample_factor == 1:
            self.snr_mem = zeros(self.tlen, dtype=self.dtype)
            self.corr_mem = zeros(self.tlen, dtype=self.dtype)
            self.segments = segment_list

            if use_cluster:
                self.matched_filter_and_cluster = self.full_matched_filter_and_cluster
                # setup the threasholding/clustering operations for each segment
                self.threshold_and_clusterers = []
                for seg in self.segments:
                    thresh = events.ThresholdCluster(self.snr_mem[seg.analyze])
                    self.threshold_and_clusterers.append(thresh)
            else:
                self.matched_filter_and_cluster = self.full_matched_filter_thresh_only

            # Assuming analysis time is constant across templates and segments, also
            # delta_f is constant across segments.
            self.htilde = template_output
            self.kmin, self.kmax = get_cutoff_indices(self.flow, self.fhigh,
                                                      self.delta_f, self.tlen)

            # Set up the correlation operations for each analysis segment
            corr_slice = slice(self.kmin, self.kmax)
            self.correlators = []
            for seg in self.segments:
                corr = Correlator(self.htilde[corr_slice], seg[corr_slice],
                                  self.corr_mem[corr_slice])
                self.correlators.append(corr)

            # setup up the ifft we will do
            self.ifft = IFFT(self.corr_mem, self.snr_mem)

        elif downsample_factor >= 1:
            self.matched_filter_and_cluster = self.heirarchical_matched_filter_and_cluster
            self.downsample_factor = downsample_factor
            self.upsample_method = upsample_method
            self.upsample_threshold = upsample_threshold

            N_full = self.tlen
            N_red = N_full / downsample_factor
            self.kmin_full, self.kmax_full = get_cutoff_indices(
                self.flow, self.fhigh, self.delta_f, N_full)

            self.kmin_red, _ = get_cutoff_indices(self.flow, self.fhigh,
                                                  self.delta_f, N_red)

            if self.kmax_full < N_red:
                self.kmax_red = self.kmax_full
            else:
                self.kmax_red = N_red - 1

            self.snr_mem = zeros(N_red, dtype=self.dtype)
            self.corr_mem_full = FrequencySeries(zeros(N_full,
                                                       dtype=self.dtype),
                                                 delta_f=self.delta_f)
            self.corr_mem = Array(self.corr_mem_full[0:N_red], copy=False)
            self.inter_vec = zeros(N_full, dtype=self.dtype)

        else:
            raise ValueError("Invalid downsample factor")
示例#31
0
    def heirarchical_matched_filter_and_cluster(self, htilde, template_norm,
                                                stilde, window):
        """ Return the complex snr and normalization. 
    
        Calculated the matched filter, threshold, and cluster. 

        Parameters
        ----------
        htilde : FrequencySeries 
            The template waveform. Must come from the FilterBank class.
        template_norm : float
            The htilde, template normalization factor.
        stilde : FrequencySeries 
            The strain data to be filtered.
        window : int
            The size of the cluster window in samples.

        Returns
        -------
        snr : TimeSeries
            A time series containing the complex snr at the reduced sample rate.
        norm : float
            The normalization of the complex snr.  
        corrrelation: FrequencySeries
            A frequency series containing the correlation vector. 
        idx : Array
            List of indices of the triggers.
        snrv : Array
            The snr values at the trigger locations.
        """
        from pycbc.fft.fftw_pruned import pruned_c2cifft, fft_transpose

        norm = (4.0 * stilde.delta_f) / sqrt(template_norm)

        correlate(htilde[self.kmin_red:self.kmax_red],
                  stilde[self.kmin_red:self.kmax_red],
                  self.corr_mem[self.kmin_red:self.kmax_red])

        ifft(self.corr_mem, self.snr_mem)

        if not hasattr(stilde, 'red_analyze'):
            stilde.red_analyze = \
                             slice(stilde.analyze.start/self.downsample_factor,
                                   stilde.analyze.stop/self.downsample_factor)

        idx_red, snrv_red = events.threshold(
            self.snr_mem[stilde.red_analyze],
            self.snr_threshold / norm * self.upsample_threshold)
        if len(idx_red) == 0:
            return [], None, [], [], []

        idx_red, _ = events.cluster_reduce(idx_red, snrv_red,
                                           window / self.downsample_factor)
        logging.info("%s points above threshold at reduced resolution"\
                      %(str(len(idx_red)),))

        # The fancy upsampling is here
        if self.upsample_method == 'pruned_fft':
            idx = (idx_red + stilde.analyze.start/self.downsample_factor)\
                   * self.downsample_factor

            idx = smear(idx, self.downsample_factor)

            # cache transposed  versions of htilde and stilde
            if not hasattr(self.corr_mem_full, 'transposed'):
                self.corr_mem_full.transposed = zeros(len(self.corr_mem_full),
                                                      dtype=self.dtype)

            if not hasattr(htilde, 'transposed'):
                htilde.transposed = zeros(len(self.corr_mem_full),
                                          dtype=self.dtype)
                htilde.transposed[self.kmin_full:self.kmax_full] = htilde[
                    self.kmin_full:self.kmax_full]
                htilde.transposed = fft_transpose(htilde.transposed)

            if not hasattr(stilde, 'transposed'):
                stilde.transposed = zeros(len(self.corr_mem_full),
                                          dtype=self.dtype)
                stilde.transposed[self.kmin_full:self.kmax_full] = stilde[
                    self.kmin_full:self.kmax_full]
                stilde.transposed = fft_transpose(stilde.transposed)

            correlate(htilde.transposed, stilde.transposed,
                      self.corr_mem_full.transposed)
            snrv = pruned_c2cifft(self.corr_mem_full.transposed,
                                  self.inter_vec,
                                  idx,
                                  pretransposed=True)
            idx = idx - stilde.analyze.start
            idx2, snrv = events.threshold(Array(snrv, copy=False),
                                          self.snr_threshold / norm)

            if len(idx2) > 0:
                correlate(htilde[self.kmax_red:self.kmax_full],
                          stilde[self.kmax_red:self.kmax_full],
                          self.corr_mem_full[self.kmax_red:self.kmax_full])
                idx, snrv = events.cluster_reduce(idx[idx2], snrv, window)
            else:
                idx, snrv = [], []

            logging.info("%s points at full rate and clustering" % len(idx))
            return self.snr_mem, norm, self.corr_mem_full, idx, snrv
        else:
            raise ValueError("Invalid upsample method")
示例#32
0
    def setNumbers(self):
        # We create instances of our types, and need to know the generic answer
        # type so that we can convert the many basic lists into the appropriate
        # precision and kind.  This logic essentially implements (for our limited
        # use cases) what is in the function numpy.result_type, but that function
        # doesn't become available until Numpy 1.6.0
        if self.kind == 'real':
            if self.okind == 'real':
                self.result_dtype = self.dtype
            else:
                self.result_dtype = self.odtype
        else:
            self.result_dtype = self.dtype

        self.rdtype = _real_dtype_dict[self.dtype]

        # The child class (testing one of Array, TimeSeries, or FrequencySeries)
        # should set the following in its setUp method before this method (setNumbers)
        # is called:
        #    self.type = one of [Array,TimeSeries,FrequencySeries]
        #    self.kwds = dict for other kwd args beyond 'dtype'; normally an
        #                epoch and one of delta_t or delta_f

        # These are the values that should be used to initialize the test arrays.
        if self.kind == 'real':
            self.a = self.type([5, 3, 1], dtype=self.dtype, **self.kwds)
            self.alist = [5, 3, 1]
        else:
            self.a = self.type([5 + 1j, 3 + 3j, 1 + 5j],
                               dtype=self.dtype,
                               **self.kwds)
            self.alist = [5 + 1j, 3 + 3j, 1 + 5j]
        if self.okind == 'real':
            self.b = self.type([10, 8, 6], dtype=self.odtype, **self.kwds)
            self.blist = [10, 8, 6]
        else:
            self.b = self.type([10 + 6j, 8 + 4j, 6 + 2j],
                               dtype=self.odtype,
                               **self.kwds)
            self.blist = [10 + 6j, 8 + 4j, 6 + 2j]
        # And the scalar to test on
        if self.okind == 'real':
            self.scalar = 5
        else:
            self.scalar = 5 + 2j

        # The weights used in the weighted inner product test are always an Array,
        # regardless of the types whose inner product is being tested.
        self.w = Array([1, 2, 1], dtype=self.dtype)

        # All the answers are stored here to make it easier to read in the actual tests.
        # Again, it makes a difference whether they are complex or real valued, so there
        # are four sets of possible answers, depending on the dtypes.
        if self.kind == 'real' and self.okind == 'real':
            self.cumsum = self.type([5, 8, 9], dtype=self.dtype, **self.kwds)

            self.mul = self.type([50, 24, 6],
                                 dtype=self.result_dtype,
                                 **self.kwds)
            self.mul_s = self.type([25, 15, 5],
                                   dtype=self.result_dtype,
                                   **self.kwds)

            self.add = self.type([15, 11, 7],
                                 dtype=self.result_dtype,
                                 **self.kwds)
            self.add_s = self.type([10, 8, 6],
                                   dtype=self.result_dtype,
                                   **self.kwds)

            #self.div = [.5, 3./8., 1./6.]
            self.div = self.type([.5, 0.375, .16666666666666666667],
                                 dtype=self.result_dtype,
                                 **self.kwds)
            #self.div_s = [1., 3./5., 1./5.]
            self.div_s = self.type([1., 0.6, 0.2],
                                   dtype=self.result_dtype,
                                   **self.kwds)

            #self.rdiv = [2., 8./3., 6.]
            self.rdiv = self.type([2., 2.66666666666666666667, 6.],
                                  dtype=self.result_dtype,
                                  **self.kwds)
            #self.rdiv_s = [1., 5./3., 5.]
            self.rdiv_s = self.type([1., 1.66666666666666666667, 5.],
                                    dtype=self.result_dtype,
                                    **self.kwds)

            self.sub = self.type([-5, -5, -5],
                                 dtype=self.result_dtype,
                                 **self.kwds)
            self.sub_s = self.type([0, -2, -4],
                                   dtype=self.result_dtype,
                                   **self.kwds)

            self.rsub = self.type([5, 5, 5],
                                  dtype=self.result_dtype,
                                  **self.kwds)
            self.rsub_s = self.type([0, 2, 4],
                                    dtype=self.result_dtype,
                                    **self.kwds)

            self.pow1 = self.type([25., 9., 1.], dtype=self.dtype, **self.kwds)
            #self.pow2 = [pow(5,-1.5), pow(3,-1.5), pow(1,-1.5)]
            self.pow2 = self.type(
                [0.08944271909999158786, 0.19245008972987525484, 1.],
                dtype=self.dtype,
                **self.kwds)

            self.abs = self.type([5, 3, 1], dtype=self.rdtype, **self.kwds)
            self.real = self.type([5, 3, 1], dtype=self.rdtype, **self.kwds)
            self.imag = self.type([0, 0, 0], dtype=self.rdtype, **self.kwds)
            self.conj = self.type([5, 3, 1], dtype=self.dtype, **self.kwds)

            self.sum = 9

            self.dot = 80
            self.inner = self.dot
            self.weighted_inner = 68

        if self.kind == 'real' and self.okind == 'complex':
            self.cumsum = self.type([5, 8, 9], dtype=self.dtype, **self.kwds)
            self.mul = self.type([50 + 30j, 24 + 12j, 6 + 2j],
                                 dtype=self.result_dtype,
                                 **self.kwds)
            self.mul_s = self.type([25 + 10j, 15 + 6j, 5 + 2j],
                                   dtype=self.result_dtype,
                                   **self.kwds)

            self.add = self.type([15 + 6j, 11 + 4j, 7 + 2j],
                                 dtype=self.result_dtype,
                                 **self.kwds)
            self.add_s = self.type([10 + 2j, 8 + 2j, 6 + 2j],
                                   dtype=self.result_dtype,
                                   **self.kwds)

            #self.div = [25./68.-15.j/68., 3./10.-3.j/20., 3./20.-1.j/20.]
            self.div = self.type([
                0.36764705882352941176 - 0.22058823529411764706j, 0.3 - 0.15j,
                0.15 - 0.05j
            ],
                                 dtype=self.result_dtype,
                                 **self.kwds)
            #self.div_s = [25./29.-10.j/29., 15./29.-6.j/29., 5./29.-2.j/29.]
            self.div_s = self.type([
                0.86206896551724137931 - 0.34482758620689655172j,
                0.51724137931034482759 - 0.20689655172413793103j,
                0.17241379310344827586 - 0.06896551724137931034j
            ],
                                   dtype=self.result_dtype,
                                   **self.kwds)

            #self.rdiv = [2.+6.j/5., 8./3.+4.j/3, 6.+2.j]
            self.rdiv = self.type([
                2. + 1.2j, 2.66666666666666666667 + 1.33333333333333333333j,
                6. + 2.j
            ],
                                  dtype=self.result_dtype,
                                  **self.kwds)
            #self.rdiv_s = [1.+2.j/5., 5./3.+2.j/3., 5.+2.j]
            self.rdiv_s = self.type([
                1. + 0.4j, 1.66666666666666666667 + 0.666666666666666666667j,
                5. + 2.j
            ],
                                    dtype=self.result_dtype,
                                    **self.kwds)

            self.sub = self.type([-5 - 6j, -5 - 4j, -5 - 2j],
                                 dtype=self.result_dtype,
                                 **self.kwds)
            self.sub_s = self.type([0 - 2j, -2 - 2j, -4 - 2j],
                                   dtype=self.result_dtype,
                                   **self.kwds)

            self.rsub = self.type([5 + 6j, 5 + 4j, 5 + 2j],
                                  dtype=self.result_dtype,
                                  **self.kwds)
            self.rsub_s = self.type([0 + 2j, 2 + 2j, 4 + 2j],
                                    dtype=self.result_dtype,
                                    **self.kwds)

            self.pow1 = self.type([25., 9., 1.], dtype=self.dtype, **self.kwds)
            #self.pow2 = [pow(5,-1.5), pow(3,-1.5), pow(1,-1.5)]
            self.pow2 = self.type(
                [0.08944271909999158786, 0.19245008972987525484, 1.],
                dtype=self.dtype,
                **self.kwds)

            self.abs = self.type([5, 3, 1], dtype=self.rdtype, **self.kwds)
            self.real = self.type([5, 3, 1], dtype=self.rdtype, **self.kwds)
            self.imag = self.type([0, 0, 0], dtype=self.rdtype, **self.kwds)
            self.conj = self.type([5, 3, 1], dtype=self.dtype, **self.kwds)

            self.sum = 9

            self.dot = 80 + 44j
            self.inner = self.dot
            self.weighted_inner = 68 + 38j

        if self.kind == 'complex' and self.okind == 'real':
            self.cumsum = self.type([5 + 1j, 8 + 4j, 9 + 9j],
                                    dtype=self.dtype,
                                    **self.kwds)
            self.mul = self.type([50 + 10j, 24 + 24j, 6 + 30j],
                                 dtype=self.result_dtype,
                                 **self.kwds)
            self.mul_s = self.type([25 + 5j, 15 + 15j, 5 + 25j],
                                   dtype=self.result_dtype,
                                   **self.kwds)

            self.add = self.type([15 + 1j, 11 + 3j, 7 + 5j],
                                 dtype=self.result_dtype,
                                 **self.kwds)
            self.add_s = self.type([10 + 1j, 8 + 3j, 6 + 5j],
                                   dtype=self.result_dtype,
                                   **self.kwds)

            #self.div = [1./2.+1.j/10., 3./8.+3.j/8., 1./6.+5.j/6.]
            self.div = self.type([
                0.5 + 0.1j, 0.375 + 0.375j,
                0.16666666666666666667 + 0.83333333333333333333j
            ],
                                 dtype=self.result_dtype,
                                 **self.kwds)
            #self.div_s = [1.+1.j/5., 3./5.+3.j/5., 1./5.+1.j]
            self.div_s = self.type([1. + 0.2j, 0.6 + 0.6j, 0.2 + 1.j],
                                   dtype=self.result_dtype,
                                   **self.kwds)

            #self.rdiv = [25./13.-5.j/13., 4./3.-4.j/3., 3./13.-15.j/13.]
            self.rdiv = self.type([
                1.92307692307692307692 - 0.38461538461538461538j,
                1.33333333333333333333 - 1.33333333333333333333j,
                0.23076923076923076923 - 1.15384615384615384615j
            ],
                                  dtype=self.result_dtype,
                                  **self.kwds)
            #self.rdiv_s = [25./26.-5.j/26., 5./6.-5.j/6., 5./26.-25.j/26.]
            self.rdiv_s = self.type([
                0.96153846153846153846 - 0.19230769230769230769j,
                0.83333333333333333333 - 0.83333333333333333333j,
                0.19230769230769230769 - 0.96153846153846153846j
            ],
                                    dtype=self.result_dtype,
                                    **self.kwds)

            self.sub = self.type([-5 + 1j, -5 + 3j, -5 + 5j],
                                 dtype=self.result_dtype,
                                 **self.kwds)
            self.sub_s = self.type([0 + 1j, -2 + 3j, -4 + 5j],
                                   dtype=self.result_dtype,
                                   **self.kwds)

            self.rsub = self.type([5 - 1j, 5 - 3j, 5 - 5j],
                                  dtype=self.result_dtype,
                                  **self.kwds)
            self.rsub_s = self.type([0 - 1j, 2 - 3j, 4 - 5j],
                                    dtype=self.result_dtype,
                                    **self.kwds)

            self.pow1 = self.type([24. + 10.j, 0. + 18.j, -24. + 10.j],
                                  dtype=self.dtype,
                                  **self.kwds)
            #self.pow2 = [pow(5+1j,-1.5), pow(3+3j,-1.5), pow(1+5j,-1.5)]
            self.pow2 = self.type([
                0.08307064054041229214 - 0.0253416052125975132j,
                0.04379104225017853491 - 0.1057209281108342370j,
                -0.04082059235165559671 - 0.0766590341356157206j
            ],
                                  dtype=self.dtype,
                                  **self.kwds)

            #self.abs = [pow(26,.5), 3*pow(2,.5), pow(26,.5)]
            self.abs = self.type([
                5.09901951359278483003, 4.24264068711928514641,
                5.09901951359278483003
            ],
                                 dtype=self.rdtype,
                                 **self.kwds)
            self.real = self.type([5, 3, 1], dtype=self.rdtype, **self.kwds)
            self.imag = self.type([1, 3, 5], dtype=self.rdtype, **self.kwds)
            self.conj = self.type([5 - 1j, 3 - 3j, 1 - 5j],
                                  dtype=self.dtype,
                                  **self.kwds)

            self.sum = 9 + 9j

            self.dot = 80 + 64j
            self.inner = 80 - 64j
            self.weighted_inner = 68 - 52j

        if self.kind == 'complex' and self.okind == 'complex':
            self.cumsum = self.type([5 + 1j, 8 + 4j, 9 + 9j],
                                    dtype=self.dtype,
                                    **self.kwds)
            self.mul = self.type([44 + 40j, 12 + 36j, -4 + 32j],
                                 dtype=self.result_dtype,
                                 **self.kwds)
            self.mul_s = self.type([23 + 15j, 9 + 21j, -5 + 27j],
                                   dtype=self.result_dtype,
                                   **self.kwds)

            self.add = self.type([15 + 7j, 11 + 7j, 7 + 7j],
                                 dtype=self.result_dtype,
                                 **self.kwds)
            self.add_s = self.type([10 + 3j, 8 + 5j, 6 + 7j],
                                   dtype=self.result_dtype,
                                   **self.kwds)

            #self.div = [7./17.-5.j/34., 9./20.+3.j/20., 2./5.+7.j/10.]
            self.div = self.type([
                0.41176470588235294118 - 0.14705882352941176471j, 0.45 + 0.15j,
                0.4 + 0.7j
            ],
                                 dtype=self.result_dtype,
                                 **self.kwds)
            #self.div_s = [27./29.-5.j/29., 21./29.+9.j/29., 15./29.+23.j/29.]
            self.div_s = self.type([
                0.93103448275862068966 - 0.17241379310344827586j,
                0.72413793103448275862 + 0.31034482758620689655j,
                0.51724137931034482759 + 0.79310344827586206897j
            ],
                                   dtype=self.result_dtype,
                                   **self.kwds)

            #self.rdiv = [28./13.+10.j/13., 2.-2.j/3., 8./13.-14.j/13.]
            self.rdiv = self.type([
                2.15384615384615384615 + 0.76923076923076923077j,
                2. - 0.66666666666666666667j,
                0.61538461538461538462 - 1.07692307692307692308j
            ],
                                  dtype=self.result_dtype,
                                  **self.kwds)
            #self.rdiv_s = [27./26.+5.j/26., 7./6.-1.j/2., 15./26.-23.j/26]
            self.rdiv_s = self.type([
                1.03846153846153846154 + 0.19230769230769230769j,
                1.16666666666666666667 - 0.5j,
                0.57692307692307692308 - 0.88461538461538461538j
            ],
                                    dtype=self.result_dtype,
                                    **self.kwds)

            self.sub = self.type([-5 - 5j, -5 - 1j, -5 + 3j],
                                 dtype=self.result_dtype,
                                 **self.kwds)
            self.sub_s = self.type([0 - 1j, -2 + 1j, -4 + 3j],
                                   dtype=self.result_dtype,
                                   **self.kwds)

            self.rsub = self.type([5 + 5j, 5 + 1j, 5 - 3j],
                                  dtype=self.result_dtype,
                                  **self.kwds)
            self.rsub_s = self.type([0 + 1j, 2 - 1j, 4 - 3j],
                                    dtype=self.result_dtype,
                                    **self.kwds)

            self.pow1 = self.type([24. + 10.j, 0. + 18.j, -24. + 10.j],
                                  dtype=self.dtype,
                                  **self.kwds)
            #self.pow2 = [pow(5+1j,-1.5), pow(3+3j,-1.5), pow(1+5j,-1.5)]
            self.pow2 = self.type([
                0.08307064054041229214 - 0.0253416052125975132j,
                0.04379104225017853491 - 0.1057209281108342370j,
                -0.04082059235165559671 - 0.0766590341356157206j
            ],
                                  dtype=self.dtype,
                                  **self.kwds)

            #self.abs = [pow(26,.5), 3*pow(2,.5), pow(26,.5)]
            self.abs = self.type([
                5.09901951359278483003, 4.24264068711928514641,
                5.09901951359278483003
            ],
                                 dtype=self.rdtype,
                                 **self.kwds)
            self.real = self.type([5, 3, 1], dtype=self.rdtype, **self.kwds)
            self.imag = self.type([1, 3, 5], dtype=self.rdtype, **self.kwds)
            self.conj = self.type([5 - 1j, 3 - 3j, 1 - 5j],
                                  dtype=self.dtype,
                                  **self.kwds)

            self.sum = 9 + 9j

            self.dot = 52 + 108j
            self.inner = 108 - 20j
            self.weighted_inner = 90 - 14j
        self.min = 1
        self.max = 5