示例#1
0
    def test_chirp(self):
        ### use a chirp as a signal

        sigt = TimeSeries(self.sig1, self.del_t)
        sig_tilde = make_frequency_series(sigt)

        del_f = sig_tilde.get_delta_f()
        psd = FrequencySeries(self.Psd, del_f)
        flow = self.low_frequency_cutoff

        with _context:
            hautocor, hacorfr, hnrm = matched_filter_core(self.htilde, self.htilde, psd=psd, \
    low_frequency_cutoff=flow, high_frequency_cutoff=self.fmax)

            snr, cor, nrm = matched_filter_core(self.htilde, sig_tilde, psd=psd, \
    low_frequency_cutoff=flow, high_frequency_cutoff=self.fmax)

        hacor = Array(hautocor.real(), copy=True)

        indx = Array(np.array([352250, 352256, 352260]))

        snr = snr * nrm

        with _context:
            dof, achi_list = autochisq_from_precomputed(snr, cor,  hacor, stride=3, num_points=20, \
     indices=indx)

        obt_snr = achi_list[1, 1]
        obt_ach = achi_list[1, 2]
        self.assertTrue(obt_snr > 10.0 and obt_snr < 12.0)
        self.assertTrue(obt_ach < 1.e-3)
        self.assertTrue(achi_list[0, 2] > 20.0)
        self.assertTrue(achi_list[2, 2] > 20.0)
示例#2
0
    def test_chirp(self):
	### use a chirp as a signal
        

        sigt = TimeSeries(self.sig1, self.del_t)
        sig_tilde = make_frequency_series(sigt)

        del_f = sig_tilde.get_delta_f()
        psd = FrequencySeries(self.Psd, del_f)
        flow = self.low_frequency_cutoff
  
        with _context: 
           hautocor, hacorfr, hnrm = matched_filter_core(self.htilde, self.htilde, psd=psd, \
			low_frequency_cutoff=flow, high_frequency_cutoff=self.fmax)
 
           snr, cor, nrm = matched_filter_core(self.htilde, sig_tilde, psd=psd, \
			low_frequency_cutoff=flow, high_frequency_cutoff=self.fmax)
  
        hacor = Array(hautocor.real(), copy=True)

        indx = Array(np.array([352250, 352256, 352260]))

        snr = snr*nrm

        with _context:
           dof, achi_list = autochisq_from_precomputed(snr, cor,  hacor, stride=3, num_points=20, \
	 	 indices=indx)
         
        obt_snr = achi_list[1,1]
        obt_ach = achi_list[1,2]
        self.assertTrue(obt_snr > 10.0 and obt_snr < 12.0)
        self.assertTrue(obt_ach < 1.e-3)
        self.assertTrue(achi_list[0,2] > 20.0)
        self.assertTrue(achi_list[2,2] > 20.0)
示例#3
0
def segment_snrs(filters, stilde, psd, low_frequency_cutoff):
    """ This functions calculates the snr of each bank veto template against
    the segment

    Parameters
    ----------
    filters: list of FrequencySeries
        The list of bank veto templates filters.
    stilde: FrequencySeries
        The current segment of data.
    psd: FrequencySeries
    low_frequency_cutoff: float

    Returns
    -------
    snr (list): List of snr time series.
    norm (list): List of normalizations factors for the snr time series.
    """
    snrs = []
    norms = []

    for bank_template in filters:
        # For every template compute the snr against the stilde segment
        snr, _, norm = matched_filter_core(
                bank_template, stilde, h_norm=bank_template.sigmasq(psd),
                psd=None, low_frequency_cutoff=low_frequency_cutoff)
        # SNR time series stored here
        snrs.append(snr)
        # Template normalization factor stored here
        norms.append(norm)

    return snrs, norms
示例#4
0
def segment_snrs(filters, stilde, psd, low_frequency_cutoff):
    """ This functions calculates the snr of each bank veto template against
    the segment

    Parameters
    ----------
    filters: list of FrequencySeries
        The list of bank veto templates filters.
    stilde: FrequencySeries
        The current segment of data.
    psd: FrequencySeries
    low_frequency_cutoff: float

    Returns
    -------
    snr (list): List of snr time series.
    norm (list): List of normalizations factors for the snr time series.
    """
    snrs = []
    norms = []

    for i, bank_template in enumerate(filters):
        # For every template compute the snr against the stilde segment
        snr, corr, norm = matched_filter_core(
            bank_template,
            stilde,
            h_norm=bank_template.sigmasq(psd),
            psd=None,
            low_frequency_cutoff=low_frequency_cutoff)
        # SNR time series stored here
        snrs.append(snr)
        # Template normalization factor stored here
        norms.append(norm)

    return snrs, norms
示例#5
0
def inner(vec1,
          vec2,
          psd=None,
          low_frequency_cutoff=None,
          high_frequency_cutoff=None,
          v1_norm=None,
          v2_norm=None):

    htilde = pf.make_frequency_series(vec1)
    stilde = pf.make_frequency_series(vec2)

    N = (len(htilde) - 1) * 2

    global _snr
    _snr = None
    if _snr is None or _snr.dtype != htilde.dtype or len(_snr) != N:
        _snr = pt.zeros(N, dtype=pt.complex_same_precision_as(vec1))
        snr, corr, snr_norm = pf.matched_filter_core(htilde,
                                                     stilde,
                                                     psd,
                                                     low_frequency_cutoff,
                                                     high_frequency_cutoff,
                                                     v1_norm,
                                                     out=_snr)
    if v2_norm is None:
        v2_norm = pf.sigmasq(stilde, psd, low_frequency_cutoff,
                             high_frequency_cutoff)

    snr.data = snr.data * snr_norm / np.sqrt(v2_norm)

    return snr
示例#6
0
    def test_sg(self):
        ### use a sin-gaussian as a signal
 
        sigt = TimeSeries(self.sig2, self.del_t)
        sig_tilde = make_frequency_series(sigt)

        del_f = sig_tilde.get_delta_f()
        psd = FrequencySeries(self.Psd, del_f)
	flow = self.low_frequency_cutoff
   
        with _context: 
            hautocor, hacorfr, hnrm = matched_filter_core(self.htilde, self.htilde, psd=psd, \
			low_frequency_cutoff=flow, high_frequency_cutoff=self.fmax)
 
            snr, cor, nrm = matched_filter_core(self.htilde, sig_tilde, psd=psd, \
			low_frequency_cutoff=flow, high_frequency_cutoff=self.fmax)
        

  
        hacor = Array(hautocor.real(), copy=True)

        indx = Array(np.array([301440, 301450, 301460])) 

        snr = snr*nrm
        with _context:
            dof, achi_list = autochisq_from_precomputed(snr, cor,  hacor, stride=3, num_points=20, \
	 	 indices=indx)
	obt_snr = achi_list[1,1]
	obt_ach = achi_list[1,2]
	self.assertTrue(obt_snr > 12.0 and obt_snr < 15.0)
	self.assertTrue(obt_ach > 6.8e3)
	self.assertTrue(achi_list[0,2] > 6.8e3)
	self.assertTrue(achi_list[2,2] > 6.8e3)

        with _context:
	    dof, achi_list = autochisq(self.htilde, sig_tilde, psd,  stride=3,  num_points=20, \
	 	 low_frequency_cutoff=flow, high_frequency_cutoff=self.fmax, max_snr=True)
	
	self.assertTrue(obt_snr == achi_list[0, 1])
	self.assertTrue(obt_ach == achi_list[0, 2])

        for i in xrange(1, len(achi_list)):
	   self.assertTrue(achi_list[i,2] > 2.e3)
示例#7
0
    def __init__(self,
                 variable_params,
                 data,
                 low_frequency_cutoff,
                 sample_rate=32768,
                 polarization_samples=None,
                 **kwargs):

        variable_params, kwargs = self.setup_distance_marginalization(
            variable_params, marginalize_phase=True, **kwargs)
        super(SingleTemplate, self).__init__(variable_params, data,
                                             low_frequency_cutoff, **kwargs)

        # Generate template waveforms
        df = data[self.detectors[0]].delta_f
        p = self.static_params.copy()
        if 'distance' in p:
            _ = p.pop('distance')
        if 'inclination' in p:
            _ = p.pop('inclination')
        hp, _ = get_fd_waveform(delta_f=df, distance=1, inclination=0, **p)

        # Extend template to high sample rate
        flen = int(int(sample_rate) / df) / 2 + 1
        hp.resize(flen)
        #polarization array to marginalize over if num_samples given
        self.pflag = 0
        if polarization_samples is not None:
            self.polarization = numpy.linspace(0, 2 * numpy.pi,
                                               int(polarization_samples))
            self.pflag = 1

        # Calculate high sample rate SNR time series
        self.sh = {}
        self.hh = {}
        self.det = {}
        for ifo in self.data:
            flow = self.kmin[ifo] * df
            fhigh = self.kmax[ifo] * df
            # Extend data to high sample rate
            self.data[ifo].resize(flen)
            self.det[ifo] = Detector(ifo)
            snr, _, _ = pyfilter.matched_filter_core(
                hp,
                self.data[ifo],
                psd=self.psds[ifo],
                low_frequency_cutoff=flow,
                high_frequency_cutoff=fhigh)

            self.sh[ifo] = 4 * df * snr
            self.hh[ifo] = pyfilter.sigmasq(hp,
                                            psd=self.psds[ifo],
                                            low_frequency_cutoff=flow,
                                            high_frequency_cutoff=fhigh)
        self.time = None
示例#8
0
    def __init__(self,
                 data,
                 psds,
                 low_frequency_cutoff=None,
                 high_frequency_cutoff=None,
                 sample_rate=32768,
                 **kwargs):

        super(SingleTemplate, self).__init__(data=data, **kwargs)

        if low_frequency_cutoff is not None:
            low_frequency_cutoff = float(low_frequency_cutoff)

        if high_frequency_cutoff is not None:
            high_frequency_cutoff = float(high_frequency_cutoff)

        # Generate template waveforms
        df = data[tuple(data.keys())[0]].delta_f
        p = self.static_params.copy()
        if 'distance' in p:
            p.pop('distance')
        if 'inclination' in p:
            p.pop('inclination')

        hp, _ = get_fd_waveform(delta_f=df, distance=1, inclination=0, **p)

        if high_frequency_cutoff is None:
            high_frequency_cutoff = len(data[tuple(data.keys())[0]] - 1) * df

        # Extend data and template to high sample rate
        flen = int(sample_rate / df) / 2 + 1
        hp.resize(flen)
        for ifo in data:
            data[ifo].resize(flen)

        # Calculate high sample rate SNR time series
        self.sh = {}
        self.hh = {}
        self.det = {}
        for ifo in data:
            self.det[ifo] = Detector(ifo)
            snr, _, _ = pyfilter.matched_filter_core(
                hp,
                data[ifo],
                psd=psds[ifo],
                low_frequency_cutoff=low_frequency_cutoff,
                high_frequency_cutoff=high_frequency_cutoff)

            self.sh[ifo] = 4 * df * snr
            self.hh[ifo] = -0.5 * pyfilter.sigmasq(
                hp,
                psd=psds[ifo],
                low_frequency_cutoff=low_frequency_cutoff,
                high_frequency_cutoff=high_frequency_cutoff)
        self.time = None
示例#9
0
    def test_sg(self):
        ### use a sin-gaussian as a signal

        sigt = TimeSeries(self.sig2, self.del_t)
        sig_tilde = make_frequency_series(sigt)

        del_f = sig_tilde.get_delta_f()
        psd = FrequencySeries(self.Psd, del_f)
        flow = self.low_frequency_cutoff

        with _context:
            hautocor, hacorfr, hnrm = matched_filter_core(self.htilde, self.htilde, psd=psd, \
   low_frequency_cutoff=flow, high_frequency_cutoff=self.fmax)

            snr, cor, nrm = matched_filter_core(self.htilde, sig_tilde, psd=psd, \
   low_frequency_cutoff=flow, high_frequency_cutoff=self.fmax)

        hacor = Array(hautocor.real(), copy=True)

        indx = Array(np.array([301440, 301450, 301460]))

        snr = snr * nrm
        with _context:
            dof, achi_list = autochisq_from_precomputed(snr, cor,  hacor, stride=3, num_points=20, \
    indices=indx)
        obt_snr = achi_list[1, 1]
        obt_ach = achi_list[1, 2]
        self.assertTrue(obt_snr > 12.0 and obt_snr < 15.0)
        self.assertTrue(obt_ach > 6.8e3)
        self.assertTrue(achi_list[0, 2] > 6.8e3)
        self.assertTrue(achi_list[2, 2] > 6.8e3)

        with _context:
            dof, achi_list = autochisq(self.htilde, sig_tilde, psd,  stride=3,  num_points=20, \
           low_frequency_cutoff=flow, high_frequency_cutoff=self.fmax, max_snr=True)

        self.assertTrue(obt_snr == achi_list[0, 1])
        self.assertTrue(obt_ach == achi_list[0, 2])

        for i in xrange(1, len(achi_list)):
            self.assertTrue(achi_list[i, 2] > 2.e3)
示例#10
0
文件: chisq.py 项目: apratim07/pycbc
def power_chisq(template,
                data,
                num_bins,
                psd,
                low_frequency_cutoff=None,
                high_frequency_cutoff=None,
                return_bins=False):
    """Calculate the chisq timeseries

    Parameters
    ----------
    template: FrequencySeries or TimeSeries
        A time or frequency series that contains the filter template.
    data: FrequencySeries or TimeSeries
        A time or frequency series that contains the data to filter. The length
        must be commensurate with the template.
        (EXPLAINME - does this mean 'the same as' or something else?)
    num_bins: int
        The number of frequency bins used for chisq. The number of statistical
        degrees of freedom ('dof') is 2*num_bins-2.
    psd: FrequencySeries
        The psd of the data.
    low_frequency_cutoff: {None, float}, optional
        The low frequency cutoff for the filter
    high_frequency_cutoff: {None, float}, optional
        The high frequency cutoff for the filter
    return_bins: {boolean, False}, optional
        Return a list of the individual chisq bins

    Returns
    -------
    chisq: TimeSeries
        TimeSeries containing the chisq values for all times.
    """
    htilde = make_frequency_series(template)
    stilde = make_frequency_series(data)

    bins = power_chisq_bins(htilde, num_bins, psd, low_frequency_cutoff,
                            high_frequency_cutoff)
    corra = zeros((len(htilde) - 1) * 2, dtype=htilde.dtype)
    total_snr, corr, tnorm = matched_filter_core(htilde,
                                                 stilde,
                                                 psd,
                                                 low_frequency_cutoff,
                                                 high_frequency_cutoff,
                                                 corr_out=corra)

    return power_chisq_from_precomputed(corr,
                                        total_snr,
                                        tnorm,
                                        bins,
                                        return_bins=return_bins)
示例#11
0
    def setUp(self, *args):
        self.context = _context
        self.scheme = _scheme
        self.tolerance = 1e-6
        xr = numpy.random.uniform(low=-1, high=1.0, size=2**20)
        xi = numpy.random.uniform(low=-1, high=1.0, size=2**20)
        self.x = Array(xr + xi * 1.0j, dtype=complex64)
        self.z = zeros(2**20, dtype=float32)
        for i in range(0, 4):
            trusted_accum(self.z, self.x)

        m = Merger("GW170814")

        ifos = ['H1', 'L1', 'V1']
        data = {}
        psd = {}
        for ifo in ifos:
            # Read in and condition the data and measure PSD
            ts = m.strain(ifo).highpass_fir(15, 512)
            data[ifo] = resample_to_delta_t(ts, 1.0 / 2048).crop(2, 2)
            p = data[ifo].psd(2)
            p = interpolate(p, data[ifo].delta_f)
            p = inverse_spectrum_truncation(p,
                                            int(2 * data[ifo].sample_rate),
                                            low_frequency_cutoff=15.0)
            psd[ifo] = p
        hp, _ = get_fd_waveform(approximant="IMRPhenomD",
                                mass1=31.36,
                                mass2=31.36,
                                f_lower=20.0,
                                delta_f=data[ifo].delta_f)
        hp.resize(len(psd[ifo]))

        # For each ifo use this template to calculate the SNR time series
        snr = {}
        snr_unnorm = {}
        norm = {}
        corr = {}
        for ifo in ifos:
            snr_unnorm[ifo], corr[ifo], norm[ifo] = \
                matched_filter_core(hp, data[ifo], psd=psd[ifo],
                                    low_frequency_cutoff=20)
            snr[ifo] = snr_unnorm[ifo] * norm[ifo]

        self.snr = snr
        self.snr_unnorm = snr_unnorm
        self.norm = norm
        self.corr = corr
        self.hp = hp
        self.data = data
        self.psd = psd
        self.ifos = ifos
示例#12
0
    def __init__(self, data, psds,
                 low_frequency_cutoff=None,
                 high_frequency_cutoff=None,
                 sample_rate=32768,
                 **kwargs):

        super(SingleTemplate, self).__init__(data=data, **kwargs)

        if low_frequency_cutoff is not None:
            low_frequency_cutoff = float(low_frequency_cutoff)

        if high_frequency_cutoff is not None:
            high_frequency_cutoff = float(high_frequency_cutoff)

        # Generate template waveforms
        df = data[data.keys()[0]].delta_f
        p = self.static_params.copy()
        if 'distance' in p:
            p.pop('distance')
        if 'inclination' in p:
            p.pop('inclination')

        hp, _ = get_fd_waveform(delta_f=df, distance=1, inclination=0, **p)

        if high_frequency_cutoff is None:
            high_frequency_cutoff = len(data[data.keys()[0]]-1) * df

        # Extend data and template to high sample rate
        flen = int(sample_rate / df) / 2 + 1
        hp.resize(flen)
        for ifo in data:
            data[ifo].resize(flen)

        # Calculate high sample rate SNR time series
        self.sh = {}
        self.hh = {}
        self.det = {}
        for ifo in data:
            self.det[ifo] = Detector(ifo)
            snr, _, _ = pyfilter.matched_filter_core(
                hp, data[ifo],
                psd=psds[ifo],
                low_frequency_cutoff=low_frequency_cutoff,
                high_frequency_cutoff=high_frequency_cutoff)

            self.sh[ifo] = 4 * df * snr
            self.hh[ifo] = -0.5 * pyfilter.sigmasq(
                hp, psd=psds[ifo],
                low_frequency_cutoff=low_frequency_cutoff,
                high_frequency_cutoff=high_frequency_cutoff)
        self.time = None
示例#13
0
    def test_sg(self):
        ### use a sin-gaussian as a signal
 
        sigt = TimeSeries(self.sig2, self.del_t)
        sig_tilde = make_frequency_series(sigt)

        del_f = sig_tilde.get_delta_f()
        psd = FrequencySeries(self.Psd, del_f)
	flow = self.low_frequency_cutoff
   
        with _context: 
            hautocor, hacorfr, hnrm = matched_filter_core(self.htilde, self.htilde, psd=psd, \
			low_frequency_cutoff=flow, high_frequency_cutoff=self.fmax)
            hautocor = hautocor * float(np.real(1./hautocor[0]))

 
            snr, cor, nrm = matched_filter_core(self.htilde, sig_tilde, psd=psd, \
			low_frequency_cutoff=flow, high_frequency_cutoff=self.fmax)
        

  
        hacor = Array(hautocor.real(), copy=True)

        indx = np.array([301440, 301450, 301460])

        snr = snr*nrm

        with _context:
           dof, achisq, indices= \
               autochisq_from_precomputed(snr, snr, hacor, indx, stride=3,
                                          num_points=20)

        obt_snr = abs(snr[indices[1]])
        obt_ach = achisq[1]
        self.assertTrue(obt_snr > 12.0 and obt_snr < 15.0)
        self.assertTrue(obt_ach > 6.8e3)
        self.assertTrue(achisq[0] > 6.8e3)
        self.assertTrue(achisq[2] > 6.8e3)
示例#14
0
    def test_sg(self):
        ### use a sin-gaussian as a signal

        sigt = TimeSeries(self.sig2, self.del_t)
        sig_tilde = make_frequency_series(sigt)

        del_f = sig_tilde.get_delta_f()
        psd = FrequencySeries(self.Psd, del_f)
        flow = self.low_frequency_cutoff

        with _context:
            hautocor, hacorfr, hnrm = matched_filter_core(self.htilde, self.htilde, psd=psd, \
                        low_frequency_cutoff=flow, high_frequency_cutoff=self.fmax)
            hautocor = hautocor * float(np.real(1./hautocor[0]))


            snr, cor, nrm = matched_filter_core(self.htilde, sig_tilde, psd=psd, \
                        low_frequency_cutoff=flow, high_frequency_cutoff=self.fmax)



        hacor = Array(hautocor.real(), copy=True)

        indx = np.array([301440, 301450, 301460])

        snr = snr*nrm

        with _context:
           dof, achisq, indices= \
               autochisq_from_precomputed(snr, snr, hacor, indx, stride=3,
                                          num_points=20)

        obt_snr = abs(snr[indices[1]])
        obt_ach = achisq[1]
        self.assertTrue(obt_snr > 12.0 and obt_snr < 15.0)
        self.assertTrue(obt_ach > 6.8e3)
        self.assertTrue(achisq[0] > 6.8e3)
        self.assertTrue(achisq[2] > 6.8e3)
示例#15
0
    def setUp(self,*args):
        self.context = _context
        self.scheme = _scheme
        self.tolerance = 1e-6
        xr = numpy.random.uniform(low=-1, high=1.0, size=2**20)
        xi = numpy.random.uniform(low=-1, high=1.0, size=2**20)
        self.x = Array(xr + xi * 1.0j, dtype=complex64)
        self.z = zeros(2**20, dtype=float32)
        for i in range(0, 4):
            trusted_accum(self.z, self.x)

        m = Merger("GW170814")

        ifos = ['H1', 'L1', 'V1']
        data = {}
        psd = {}
        for ifo in ifos:
            # Read in and condition the data and measure PSD
            ts = m.strain(ifo).highpass_fir(15, 512)
            data[ifo] = resample_to_delta_t(ts, 1.0/2048).crop(2, 2)
            p = data[ifo].psd(2)
            p = interpolate(p, data[ifo].delta_f)
            p = inverse_spectrum_truncation(p, 2 * data[ifo].sample_rate,
                                            low_frequency_cutoff=15.0)
            psd[ifo] = p
        hp, _ = get_fd_waveform(approximant="IMRPhenomD",
                                 mass1=31.36, mass2=31.36,
                                 f_lower=20.0, delta_f=data[ifo].delta_f)
        hp.resize(len(psd[ifo]))

        # For each ifo use this template to calculate the SNR time series
        snr = {}
        snr_unnorm = {}
        norm = {}
        corr = {}
        for ifo in ifos:
            snr_unnorm[ifo], corr[ifo], norm[ifo] = \
                matched_filter_core(hp, data[ifo], psd=psd[ifo],
                                    low_frequency_cutoff=20)
            snr[ifo] = snr_unnorm[ifo] * norm[ifo]

        self.snr = snr
        self.snr_unnorm = snr_unnorm
        self.norm = norm
        self.corr = corr
        self.hp = hp
        self.data = data
        self.psd = psd
        self.ifos = ifos
    def __init__(self,
                 variable_params,
                 data,
                 low_frequency_cutoff,
                 sample_rate=32768,
                 **kwargs):
        super(SingleTemplate, self).__init__(variable_params, data,
                                             low_frequency_cutoff, **kwargs)

        # Generate template waveforms
        df = data[self.detectors[0]].delta_f
        p = self.static_params.copy()
        if 'distance' in p:
            _ = p.pop('distance')
        if 'inclination' in p:
            _ = p.pop('inclination')
        hp, _ = get_fd_waveform(delta_f=df, distance=1, inclination=0, **p)

        # Extend template to high sample rate
        flen = int(int(sample_rate) / df) / 2 + 1
        hp.resize(flen)

        # Calculate high sample rate SNR time series
        self.sh = {}
        self.hh = {}
        self.det = {}
        for ifo in self.data:
            flow = self.kmin[ifo] * df
            fhigh = self.kmax[ifo] * df
            # Extend data to high sample rate
            self.data[ifo].resize(flen)
            self.det[ifo] = Detector(ifo)
            snr, _, _ = pyfilter.matched_filter_core(
                hp,
                self.data[ifo],
                psd=self.psds[ifo],
                low_frequency_cutoff=flow,
                high_frequency_cutoff=fhigh)

            self.sh[ifo] = 4 * df * snr
            self.hh[ifo] = -0.5 * pyfilter.sigmasq(hp,
                                                   psd=self.psds[ifo],
                                                   low_frequency_cutoff=flow,
                                                   high_frequency_cutoff=fhigh)
        self.time = None
示例#17
0
 def calculate_hihjs(self, models):
     """ Pre-calculate the hihj inner products on a grid
     """
     self.hihj = {}
     for m1, m2 in itertools.combinations(models, 2):
         self.hihj[(m1, m2)] = {}
         h1 = m1.waveform
         h2 = m2.waveform
         for ifo in self.data:
             flow = self.kmin[ifo] * self.df
             fhigh = self.kmax[ifo] * self.df
             h1h2, _, _ = pyfilter.matched_filter_core(
                 h1,
                 h2,
                 psd=self.psds[ifo],
                 low_frequency_cutoff=flow,
                 high_frequency_cutoff=fhigh)
             self.hihj[(m1, m2)][ifo] = 4 * self.df * h1h2
示例#18
0
文件: chisq.py 项目: johnveitch/pycbc
def power_chisq(template, data, num_bins, psd,
                low_frequency_cutoff=None,
                high_frequency_cutoff=None,
                return_bins=False):
    """Calculate the chisq timeseries

    Parameters
    ----------
    template: FrequencySeries or TimeSeries
        A time or frequency series that contains the filter template.
    data: FrequencySeries or TimeSeries
        A time or frequency series that contains the data to filter. The length
        must be commensurate with the template.
        (EXPLAINME - does this mean 'the same as' or something else?)
    num_bins: int
        The number of bins in the chisq. Note that the dof goes as 2*num_bins-2.
    psd: FrequencySeries
        The psd of the data.
    low_frequency_cutoff: {None, float}, optional
        The low frequency cutoff for the filter
    high_frequency_cutoff: {None, float}, optional
        The high frequency cutoff for the filter
    return_bins: {boolean, False}, optional
        Return a list of the individual chisq bins

    Returns
    -------
    chisq: TimeSeries
        TimeSeries containing the chisq values for all times.
    """
    htilde = make_frequency_series(template)
    stilde = make_frequency_series(data)

    bins = power_chisq_bins(htilde, num_bins, psd, low_frequency_cutoff,
                            high_frequency_cutoff)
    corra = zeros((len(htilde)-1)*2, dtype=htilde.dtype)
    total_snr, corr, tnorm = matched_filter_core(htilde, stilde, psd,
                           low_frequency_cutoff, high_frequency_cutoff,
                           corr_out=corra)

    return power_chisq_from_precomputed(corr, total_snr, tnorm, bins, return_bins=return_bins)
示例#19
0
def create_tf_plane(fd_psd,nchans,seg_len,filter_bank,band,fs_data):
    """
    Create time-frequency map

    Parameters
    ----------
    fd_psd : array
      Power Spectrum Density
    """
    print "|-- Create time-frequency plane for current block"
    # Return the complex snr, along with its associated normalization of the template,
    # matched filtered against the data
    #filter.matched_filter_core(types.FrequencySeries(tmp_filter_bank,delta_f=fd_psd.delta_f),fs_data,h_norm=1,psd=fd_psd,low_frequency_cutoff=filter_bank[0].f0,high_frequency_cutoff=filter_bank[0].f0+2*band)
    print "|-- Filtering all %d channels..." % nchans
    # Initialise 2D zero array 
    tmp_filter_bank = numpy.zeros(len(fd_psd), dtype=numpy.complex128)
    # Initialise 2D zero array for time-frequency map
    tf_map = numpy.zeros((nchans, seg_len), dtype=numpy.complex128)
    # Loop over all the channels
    for i in range(nchans):
        # Reset filter bank series
        tmp_filter_bank *= 0.0
        # Index of starting frequency
        f1 = int(filter_bank[i].f0/fd_psd.delta_f)
        # Index of ending frequency
        f2 = int((filter_bank[i].f0 + 2*band)/fd_psd.delta_f)+1
        # (FIXME: Why is there a factor of 2 here?)
        tmp_filter_bank[f1:f2] = filter_bank[i].data.data * 2
        # Define the template to filter the frequency series with
        template = types.FrequencySeries(tmp_filter_bank, delta_f=fd_psd.delta_f, copy=False)
        # Create filtered series
        filtered_series = filter.matched_filter_core(template,fs_data,h_norm=None,psd=None,
                                                     low_frequency_cutoff=filter_bank[i].f0,
                                                     high_frequency_cutoff=filter_bank[i].f0+2*band)
        # Include filtered series in the map
        tf_map[i,:] = filtered_series[0].numpy()
    return tf_map
示例#20
0
def align_waveforms_suboptimally(hplus1,
                                 hcross1,
                                 hplus2,
                                 hcross2,
                                 psd='aLIGOZeroDetHighPower',
                                 low_frequency_cutoff=None,
                                 high_frequency_cutoff=None,
                                 tsign=1,
                                 phsign=1,
                                 verify=True,
                                 trim_leading=False,
                                 trim_trailing=False,
                                 verbose=False):
    # Cast into time-series
    h_plus1 = TimeSeries(hplus1,
                         epoch=hplus1._epoch,
                         delta_t=hplus1.delta_t,
                         dtype=hplus1.dtype)
    h_cross1 = TimeSeries(hcross1,
                          epoch=hplus1._epoch,
                          delta_t=hplus1.delta_t,
                          dtype=hplus1.dtype)
    h_plus2 = TimeSeries(hplus2,
                         epoch=hplus2._epoch,
                         delta_t=hplus2.delta_t,
                         dtype=hplus2.dtype)
    h_cross2 = TimeSeries(hcross2,
                          epoch=hplus2._epoch,
                          delta_t=hplus2.delta_t,
                          dtype=hplus2.dtype)
    #
    # Ensure both input hplus vectors are equal in length
    if len(hplus2) > len(hplus1):
        h_plus1.append_zeros(len(hplus2) - len(hplus1))
        h_cross1.append_zeros(len(hplus2) - len(hplus1))
    elif len(hplus2) < len(hplus1):
        h_plus2.append_zeros(len(hplus1) - len(hplus2))
        h_cross2.append_zeros(len(hplus1) - len(hplus2))
    #
    htilde = make_frequency_series(h_plus1)
    stilde = make_frequency_series(h_plus2)
    #
    if high_frequency_cutoff == None:
        high_frequency_cutoff = 1. / h_plus1.delta_t / 2.
    #
    if psd == None:
        raise IOError("Need compatible psd [or name] as input!")
    elif type(psd) == str:
        psd_name = psd
        psd = from_string(psd_name, len(htilde), htilde.delta_f,
                          low_frequency_cutoff)
    #
    # Determine the phase and time shifts for optimal match
    snr, corr, snr_norm = matched_filter_core(
        htilde,
        stilde,
        # h_plus1, h_plus2,
        psd,
        low_frequency_cutoff,
        high_frequency_cutoff,
        None)
    max_snr, max_id = snr.abs_max_loc()

    if max_id != 0:
        t_shift = snr.delta_t * (len(snr) - max_id)
    else:
        t_shift = snr.delta_t * max_id

    ph_shift = np.angle(snr[max_id]) - 0.24850315030 - 0.0465881735639
    #
    if verbose:
        print(("max_id = %d, id_shift = %d" %
               (max_id, int(t_shift / snr.delta_t))))
        print(("t_shift = %f,\n ph_shift = %f" % (t_shift, ph_shift)))
    #
    # print(OVERLAPS
    if verbose:
        print(("Overlap BEFORE ALIGNMENT:",
               overlap_cplx(h_plus1,
                            h_plus2,
                            psd=psd,
                            low_frequency_cutoff=low_frequency_cutoff,
                            high_frequency_cutoff=high_frequency_cutoff,
                            normalized=True)))
        print(("Match BEFORE ALIGNMENT:",
               match(h_plus1,
                     h_plus2,
                     psd=psd,
                     low_frequency_cutoff=low_frequency_cutoff,
                     high_frequency_cutoff=high_frequency_cutoff)))

    # Shift whichever needs to be shifted to future time.
    # Shifting back in time is tricky.
    if t_shift >= 0:
        hp2, hc2 = shift_waveform_phase_time(h_plus2,
                                             h_cross2,
                                             tsign * t_shift,
                                             phsign * ph_shift,
                                             verbose=verbose)
    else:
        hp2, hc2 = shift_waveform_phase_time(h_plus2,
                                             h_cross2,
                                             tsign * t_shift,
                                             phsign * ph_shift,
                                             verbose=verbose)
    #
    # Ensure both input hplus vectors are equal in length
    if len(h_plus1) > len(hp2):
        hp2.append_zeros(len(h_plus1) - len(hp2))
    elif len(h_plus1) < len(hp2):
        h_plus1.append_zeros(len(hp2) - len(h_plus1))

    if verbose:
        htilde = make_frequency_series(h_plus1)
        psd = from_string(psd_name, len(htilde), htilde.delta_f,
                          low_frequency_cutoff)
        print(("Overlap AFTER ALIGNMENT:",
               overlap_cplx(h_plus1,
                            hp2,
                            psd=psd,
                            low_frequency_cutoff=low_frequency_cutoff,
                            high_frequency_cutoff=high_frequency_cutoff,
                            normalized=True)))
        print(("Match AFTER ALIGNMENT:",
               match(h_plus1,
                     hp2,
                     psd=psd,
                     low_frequency_cutoff=low_frequency_cutoff,
                     high_frequency_cutoff=high_frequency_cutoff)))
    if verify:
        #
        print("Verifying time alignment...")
        # Determine the phase and time shifts for optimal match
        snr, corr, snr_norm = matched_filter_core(  # htilde, stilde,
            h_plus1, hp2, psd, low_frequency_cutoff, high_frequency_cutoff,
            None)
        max_snr, max_id = snr.abs_max_loc()
        print(("Post-Alignment Index of MAX SNR (should be 0 or 1 or %d): %d" %
               (len(snr) - 1, max_id)))
        print(("Length of whole SNR time-series: ", len(snr)))
        if max_id != 0 and max_id != 1 and max_id != (
                len(snr) - 1) and max_id != (len(snr) - 2):
            # raise RuntimeError( "Warning: ALIGNMENT NOT CORRECT (see above)" )
            print("Warning: ALIGNMENT NOT CORRECT (see above)")
        else:
            print("Alignment in time correct..")
        #
        print("Verifying phase alignment...")
        ph_shift = np.angle(snr[max_id])
        if ph_shift != 0:
            print("Warning: Phasing alignment possibly incorrect.")
            print(("dphi, dphi+pi, dphi-pi: ", ph_shift, ph_shift + np.pi,
                   ph_shift - np.pi))
            print(("dphi/pi, dphi*pi: ", ph_shift / np.pi, ph_shift * np.pi))
        #

    #
    if trim_trailing:
        hp1 = trim_trailing_zeros(hp1)
        hc1 = trim_trailing_zeros(hc1)
        hp2 = trim_trailing_zeros(hp2)
        hc2 = trim_trailing_zeros(hc2)
    if trim_leading:
        hp1 = trim_leading_zeros(hp1)
        hc1 = trim_leading_zeros(hc1)
        hp2 = trim_leading_zeros(hp2)
        hc2 = trim_leading_zeros(hc2)
    #
    return hplus1, hcross1, hp2, hc2
示例#21
0
文件: epower.py 项目: Yuzhe98/Yu0702
def excess_power(
    ts_data,  # Time series from magnetic field data 
    band=None,  # Channel bandwidth
    channel_name='channel-name',  # Channel name
    fmin=0,  # Lowest frequency of the filter bank.
    fmax=None,  # Highest frequency of the filter bank.
    impulse=False,  # Impulse response
    make_plot=True,  # Condition to produce plots
    max_duration=None,  # Maximum duration of the tile
    nchans=256,  # Total number of channels
    psd_estimation='median-mean',  # Average method
    psd_segment_length=60,  # Length of each segment in seconds
    psd_segment_stride=30,  # Separation between 2 consecutive segments in seconds
    station='station-name',  # Station name
    tile_fap=1e-7,  # Tile false alarm probability threshold in Gaussian noise.
    verbose=True,  # Print details
    window_fraction=0,  # Withening window fraction
    wtype='tukey'):  # Whitening type, can tukey or hann
    '''
    Perform excess-power search analysis on magnetic field data.
    This method will produce a bunch of time-frequency plots for every
    tile duration and bandwidth analysed as well as a XML file identifying
    all the triggers found in the selected data within the user-defined
    time range.

    Parameters
    ----------
    ts_data : TimeSeries
      Time Series from magnetic field data
    psd_segment_length : float
      Length of each segment in seconds
    psd_segment_stride : float
      Separation between 2 consecutive segments in seconds
    psd_estimation : string
      Average method
    window_fraction : float
      Withening window fraction
    tile_fap : float
      Tile false alarm probability threshold in Gaussian noise.
    nchans : int
      Total number of channels
    band : float
      Channel bandwidth
    fmin : float
      Lowest frequency of the filter bank.
    fmax : float
      Highest frequency of the filter bank

    Examples
    --------
    The program can be ran as an executable by using the ``excesspower`` command
    line as follows::

      excesspower --station "mainz01" \\
                  --start-time "2017-04-15-17-1" \\
                  --end-time "2017-04-15-18" \\
                  --rep "/Users/vincent/ASTRO/data/GNOME/GNOMEDrive/gnome/serverdata/" \\
                  --resample 512 \\
                  --verbose

    '''
    # Determine sampling rate based on extracted time series
    sample_rate = ts_data.sample_rate
    # Check if tile maximum frequency is not defined
    if fmax is None or fmax > sample_rate / 2.:
        # Set the tile maximum frequency equal to the Nyquist frequency
        # (i.e. half the sampling rate)
        fmax = sample_rate / 2.0
    # Check whether or not tile bandwidth and channel are defined
    if band is None and nchans is None:
        # Exit program with error message
        exit("Either bandwidth or number of channels must be specified...")
    else:
        # Check if tile maximum frequency larger than its minimum frequency
        assert fmax >= fmin
        # Define spectral band of data
        data_band = fmax - fmin
        # Check whether tile bandwidth or channel is defined
        if band is not None:
            # Define number of possible filter bands
            nchans = int(data_band / band)
        elif nchans is not None:
            # Define filter bandwidth
            band = data_band / nchans
            nchans -= 1
        # Check if number of channels is superior than unity
        assert nchans > 1
    # Print segment information
    if verbose: print '|- Estimating PSD from segments of',
    if verbose:
        print '%.2f s, with %.2f s stride...' % (psd_segment_length,
                                                 psd_segment_stride)
    # Convert time series as array of float
    data = ts_data.astype(numpy.float64)
    # Define segment length for PSD estimation in sample unit
    seg_len = int(psd_segment_length * sample_rate)
    # Define separation between consecutive segments in sample unit
    seg_stride = int(psd_segment_stride * sample_rate)
    # Minimum frequency of detectable signal in a segment
    delta_f = 1. / psd_segment_length
    # Calculate PSD length counting the zero frequency element
    fd_len = fmax / delta_f + 1
    # Calculate the overall PSD from individual PSD segments
    if impulse:
        # Produce flat data
        flat_data = numpy.ones(int(fd_len)) * 2. / fd_len
        # Create PSD frequency series
        fd_psd = types.FrequencySeries(flat_data, 1. / psd_segment_length,
                                       ts_data.start_time)
    else:
        # Create overall PSD using Welch's method
        fd_psd = psd.welch(data,
                           avg_method=psd_estimation,
                           seg_len=seg_len,
                           seg_stride=seg_stride)
    if make_plot:
        # Plot the power spectral density
        plot_spectrum(fd_psd)
    # We need this for the SWIG functions
    lal_psd = fd_psd.lal()
    # Create whitening window
    if verbose: print "|- Whitening window and spectral correlation..."
    if wtype == 'hann': window = lal.CreateHannREAL8Window(seg_len)
    elif wtype == 'tukey':
        window = lal.CreateTukeyREAL8Window(seg_len, window_fraction)
    else:
        raise ValueError("Can't handle window type %s" % wtype)
    # Create FFT plan
    fft_plan = lal.CreateForwardREAL8FFTPlan(len(window.data.data), 1)
    # Perform two point spectral correlation
    spec_corr = lal.REAL8WindowTwoPointSpectralCorrelation(window, fft_plan)
    # Determine length of individual filters
    filter_length = int(2 * band / fd_psd.delta_f) + 1
    # Initialise filter bank
    if verbose:
        print "|- Create bank of %i filters of %i Hz bandwidth..." % (
            nchans, filter_length)
    # Initialise array to store filter's frequency series and metadata
    lal_filters = []
    # Initialise array to store filter's time series
    fdb = []
    # Loop over the channels
    for i in range(nchans):
        # Define central position of the filter
        freq = fmin + band / 2 + i * band
        # Create excess power filter
        lal_filter = lalburst.CreateExcessPowerFilter(freq, band, lal_psd,
                                                      spec_corr)
        # Testing spectral correlation on filter
        #print lalburst.ExcessPowerFilterInnerProduct(lal_filter, lal_filter, spec_corr, None)
        # Append entire filter structure
        lal_filters.append(lal_filter)
        # Append filter's spectrum
        fdb.append(FrequencySeries.from_lal(lal_filter))
        #print fdb[0].frequencies
        #print fdb[0]
    if make_plot:
        # Plot filter bank
        plot_bank(fdb)
        # Convert filter bank from frequency to time domain
        if verbose:
            print "|- Convert all the frequency domain to the time domain..."
        tdb = []
        # Loop for each filter's spectrum
        for fdt in fdb:
            zero_padded = numpy.zeros(int((fdt.f0 / fdt.df).value) + len(fdt))
            st = int((fdt.f0 / fdt.df).value)
            zero_padded[st:st + len(fdt)] = numpy.real_if_close(fdt.value)
            n_freq = int(sample_rate / 2 / fdt.df.value) * 2
            tdt = numpy.fft.irfft(zero_padded, n_freq) * math.sqrt(sample_rate)
            tdt = numpy.roll(tdt, len(tdt) / 2)
            tdt = TimeSeries(tdt,
                             name="",
                             epoch=fdt.epoch,
                             sample_rate=sample_rate)
            tdb.append(tdt)
        # Plot time series filter
        plot_filters(tdb, fmin, band)
    # Computer whitened inner products of input filters with themselves
    #white_filter_ip = numpy.array([lalburst.ExcessPowerFilterInnerProduct(f, f, spec_corr, None) for f in lal_filters])
    # Computer unwhitened inner products of input filters with themselves
    #unwhite_filter_ip = numpy.array([lalburst.ExcessPowerFilterInnerProduct(f, f, spec_corr, lal_psd) for f in lal_filters])
    # Computer whitened filter inner products between input adjacent filters
    #white_ss_ip = numpy.array([lalburst.ExcessPowerFilterInnerProduct(f1, f2, spec_corr, None) for f1, f2 in zip(lal_filters[:-1], lal_filters[1:])])
    # Computer unwhitened filter inner products between input adjacent filters
    #unwhite_ss_ip = numpy.array([lalburst.ExcessPowerFilterInnerProduct(f1, f2, spec_corr, lal_psd) for f1, f2 in zip(lal_filters[:-1], lal_filters[1:])])
    # Check filter's bandwidth is equal to user defined channel bandwidth
    min_band = (len(lal_filters[0].data.data) - 1) * lal_filters[0].deltaF / 2
    assert min_band == band
    # Create an event list where all the triggers will be stored
    event_list = lsctables.New(lsctables.SnglBurstTable, [
        'start_time', 'start_time_ns', 'peak_time', 'peak_time_ns', 'duration',
        'bandwidth', 'central_freq', 'chisq_dof', 'confidence', 'snr',
        'amplitude', 'channel', 'ifo', 'process_id', 'event_id', 'search',
        'stop_time', 'stop_time_ns'
    ])
    # Create repositories to save TF and time series plots
    os.system('mkdir -p segments/time-frequency')
    os.system('mkdir -p segments/time-series')
    # Define time edges
    t_idx_min, t_idx_max = 0, seg_len
    # Loop over each segment
    while t_idx_max <= len(ts_data):
        # Define first and last timestamps of the block
        start_time = ts_data.start_time + t_idx_min / float(
            ts_data.sample_rate)
        end_time = ts_data.start_time + t_idx_max / float(ts_data.sample_rate)
        if verbose:
            print "\n|- Analyzing block %i to %i (%.2f percent)" % (
                start_time, end_time, 100 * float(t_idx_max) / len(ts_data))
        # Debug for impulse response
        if impulse:
            for i in range(t_idx_min, t_idx_max):
                ts_data[i] = 1000. if i == (t_idx_max + t_idx_min) / 2 else 0.
        # Model a withen time series for the block
        tmp_ts_data = types.TimeSeries(ts_data[t_idx_min:t_idx_max] *
                                       window.data.data,
                                       delta_t=1. / ts_data.sample_rate,
                                       epoch=start_time)
        # Save time series in relevant repository
        os.system('mkdir -p segments/%i-%i' % (start_time, end_time))
        if make_plot:
            # Plot time series
            plot_ts(tmp_ts_data,
                    fname='segments/time-series/%i-%i.png' %
                    (start_time, end_time))
        # Convert times series to frequency series
        fs_data = tmp_ts_data.to_frequencyseries()
        if verbose:
            print "|- Frequency series data has variance: %s" % fs_data.data.std(
            )**2
        # Whitening (FIXME: Whiten the filters, not the data)
        fs_data.data /= numpy.sqrt(fd_psd) / numpy.sqrt(2 * fd_psd.delta_f)
        if verbose:
            print "|- Whitened frequency series data has variance: %s" % fs_data.data.std(
            )**2
        if verbose: print "|- Create time-frequency plane for current block"
        # Return the complex snr, along with its associated normalization of the template,
        # matched filtered against the data
        #filter.matched_filter_core(types.FrequencySeries(tmp_filter_bank,delta_f=fd_psd.delta_f),
        #                           fs_data,h_norm=1,psd=fd_psd,low_frequency_cutoff=lal_filters[0].f0,
        #                           high_frequency_cutoff=lal_filters[0].f0+2*band)
        if verbose: print "|- Filtering all %d channels...\n" % nchans,
        # Initialise 2D zero array
        tmp_filter_bank = numpy.zeros(len(fd_psd), dtype=numpy.complex128)
        # Initialise 2D zero array for time-frequency map
        tf_map = numpy.zeros((nchans, seg_len), dtype=numpy.complex128)
        # Loop over all the channels
        for i in range(nchans):
            # Reset filter bank series
            tmp_filter_bank *= 0.0
            # Index of starting frequency
            f1 = int(lal_filters[i].f0 / fd_psd.delta_f)
            # Index of last frequency bin
            f2 = int((lal_filters[i].f0 + 2 * band) / fd_psd.delta_f) + 1
            # (FIXME: Why is there a factor of 2 here?)
            tmp_filter_bank[f1:f2] = lal_filters[i].data.data * 2
            # Define the template to filter the frequency series with
            template = types.FrequencySeries(tmp_filter_bank,
                                             delta_f=fd_psd.delta_f,
                                             copy=False)
            # Create filtered series
            filtered_series = filter.matched_filter_core(
                template,
                fs_data,
                h_norm=None,
                psd=None,
                low_frequency_cutoff=lal_filters[i].f0,
                high_frequency_cutoff=lal_filters[i].f0 + 2 * band)
            # Include filtered series in the map
            tf_map[i, :] = filtered_series[0].numpy()
        if make_plot:
            # Plot spectrogram
            plot_spectrogram(numpy.abs(tf_map).T,
                             dt=tmp_ts_data.delta_t,
                             df=band,
                             ymax=ts_data.sample_rate / 2.,
                             t0=start_time,
                             t1=end_time,
                             fname='segments/time-frequency/%i-%i.png' %
                             (start_time, end_time))
            plot_tiles_ts(numpy.abs(tf_map),
                          2,
                          1,
                          sample_rate=ts_data.sample_rate,
                          t0=start_time,
                          t1=end_time,
                          fname='segments/%i-%i/ts.png' %
                          (start_time, end_time))
            #plot_tiles_tf(numpy.abs(tf_map),2,1,ymax=ts_data.sample_rate/2,
            #              sample_rate=ts_data.sample_rate,t0=start_time,t1=end_time,
            #              fname='segments/%i-%i/tf.png'%(start_time,end_time))
        # Loop through powers of 2 up to number of channels
        for nc_sum in range(0, int(math.log(nchans, 2)))[::-1]:
            # Calculate total number of summed channels
            nc_sum = 2**nc_sum
            if verbose:
                print "\n\t|- Contructing tiles containing %d narrow band channels" % nc_sum
            # Compute full bandwidth of virtual channel
            df = band * nc_sum
            # Compute minimal signal's duration in virtual channel
            dt = 1.0 / (2 * df)
            # Compute under sampling rate
            us_rate = int(round(dt / ts_data.delta_t))
            if verbose:
                print "\t|- Undersampling rate for this level: %f" % (
                    ts_data.sample_rate / us_rate)
            if verbose: print "\t|- Calculating tiles..."
            # Clip the boundaries to remove window corruption
            clip_samples = int(psd_segment_length * window_fraction *
                               ts_data.sample_rate / 2)
            # Undersample narrow band channel's time series
            # Apply clipping condition because [0:-0] does not give the full array
            tf_map_temp = tf_map[:,clip_samples:-clip_samples:us_rate] \
                          if clip_samples > 0 else tf_map[:,::us_rate]
            # Initialise final tile time-frequency map
            tiles = numpy.zeros(((nchans + 1) / nc_sum, tf_map_temp.shape[1]))
            # Loop over tile index
            for i in xrange(len(tiles)):
                # Sum all inner narrow band channels
                ts_tile = numpy.absolute(tf_map_temp[nc_sum * i:nc_sum *
                                                     (i + 1)].sum(axis=0))
                # Define index of last narrow band channel for given tile
                n = (i + 1) * nc_sum - 1
                n = n - 1 if n == len(lal_filters) else n
                # Computer withened inner products of each input filter with itself
                mu_sq = nc_sum * lalburst.ExcessPowerFilterInnerProduct(
                    lal_filters[n], lal_filters[n], spec_corr, None)
                #kmax = nc_sum-1 if n==len(lal_filters) else nc_sum-2
                # Loop over the inner narrow band channels
                for k in xrange(0, nc_sum - 1):
                    # Computer whitened filter inner products between input adjacent filters
                    mu_sq += 2 * lalburst.ExcessPowerFilterInnerProduct(
                        lal_filters[n - k], lal_filters[n - 1 - k], spec_corr,
                        None)
                # Normalise tile's time series
                tiles[i] = ts_tile.real**2 / mu_sq
            if verbose: print "\t|- TF-plane is %dx%s samples" % tiles.shape
            if verbose:
                print "\t|- Tile energy mean %f, var %f" % (numpy.mean(tiles),
                                                            numpy.var(tiles))
            # Define maximum number of degrees of freedom and check it larger or equal to 2
            max_dof = 32 if max_duration == None else int(max_duration / dt)
            assert max_dof >= 2
            # Loop through multiple degrees of freedom
            for j in [2**l for l in xrange(0, int(math.log(max_dof, 2)))]:
                # Duration is fixed by the NDOF and bandwidth
                duration = j * dt
                if verbose: print "\n\t\t|- Summing DOF = %d ..." % (2 * j)
                if verbose:
                    print "\t\t|- Explore signal duration of %f s..." % duration
                # Construct filter
                sum_filter = numpy.array([1, 0] * (j - 1) + [1])
                # Calculate length of filtered time series
                tlen = tiles.shape[1] - sum_filter.shape[0] + 1
                # Initialise filtered time series array
                dof_tiles = numpy.zeros((tiles.shape[0], tlen))
                # Loop over tiles
                for f in range(tiles.shape[0]):
                    # Sum and drop correlate tiles
                    dof_tiles[f] = fftconvolve(tiles[f], sum_filter, 'valid')
                if verbose:
                    print "\t\t|- Summed tile energy mean: %f" % (
                        numpy.mean(dof_tiles))
                if verbose:
                    print "\t\t|- Variance tile energy: %f" % (
                        numpy.var(dof_tiles))
                if make_plot:
                    plot_spectrogram(
                        dof_tiles.T,
                        dt,
                        df,
                        ymax=ts_data.sample_rate / 2,
                        t0=start_time,
                        t1=end_time,
                        fname='segments/%i-%i/%02ichans_%02idof.png' %
                        (start_time, end_time, nc_sum, 2 * j))
                    plot_tiles_ts(
                        dof_tiles,
                        2 * j,
                        df,
                        sample_rate=ts_data.sample_rate / us_rate,
                        t0=start_time,
                        t1=end_time,
                        fname='segments/%i-%i/%02ichans_%02idof_ts.png' %
                        (start_time, end_time, nc_sum, 2 * j))
                    plot_tiles_tf(
                        dof_tiles,
                        2 * j,
                        df,
                        ymax=ts_data.sample_rate / 2,
                        sample_rate=ts_data.sample_rate / us_rate,
                        t0=start_time,
                        t1=end_time,
                        fname='segments/%i-%i/%02ichans_%02idof_tf.png' %
                        (start_time, end_time, nc_sum, 2 * j))
                threshold = scipy.stats.chi2.isf(tile_fap, j)
                if verbose:
                    print "\t\t|- Threshold for this level: %f" % threshold
                spant, spanf = dof_tiles.shape[1] * dt, dof_tiles.shape[0] * df
                if verbose:
                    print "\t\t|- Processing %.2fx%.2f time-frequency map." % (
                        spant, spanf)
                # Since we clip the data, the start time needs to be adjusted accordingly
                window_offset_epoch = fs_data.epoch + psd_segment_length * window_fraction / 2
                window_offset_epoch = LIGOTimeGPS(float(window_offset_epoch))
                for i, j in zip(*numpy.where(dof_tiles > threshold)):
                    event = event_list.RowType()
                    # The points are summed forward in time and thus a `summed point' is the
                    # sum of the previous N points. If this point is above threshold, it
                    # corresponds to a tile which spans the previous N points. However, the
                    # 0th point (due to the convolution specifier 'valid') is actually
                    # already a duration from the start time. All of this means, the +
                    # duration and the - duration cancels, and the tile 'start' is, by
                    # definition, the start of the time frequency map if j = 0
                    # FIXME: I think this needs a + dt/2 to center the tile properly
                    event.set_start(window_offset_epoch + float(j * dt))
                    event.set_stop(window_offset_epoch + float(j * dt) +
                                   duration)
                    event.set_peak(event.get_start() + duration / 2)
                    event.central_freq = lal_filters[
                        0].f0 + band / 2 + i * df + 0.5 * df
                    event.duration = duration
                    event.bandwidth = df
                    event.chisq_dof = 2 * duration * df
                    event.snr = math.sqrt(dof_tiles[i, j] / event.chisq_dof -
                                          1)
                    # FIXME: Magic number 0.62 should be determine empircally
                    event.confidence = -lal.LogChisqCCDF(
                        event.snr * 0.62, event.chisq_dof * 0.62)
                    event.amplitude = None
                    event.process_id = None
                    event.event_id = event_list.get_next_id()
                    event_list.append(event)
                for event in event_list[::-1]:
                    if event.amplitude != None:
                        continue
                    etime_min_idx = float(event.get_start()) - float(
                        fs_data.epoch)
                    etime_min_idx = int(etime_min_idx / tmp_ts_data.delta_t)
                    etime_max_idx = float(event.get_start()) - float(
                        fs_data.epoch) + event.duration
                    etime_max_idx = int(etime_max_idx / tmp_ts_data.delta_t)
                    # (band / 2) to account for sin^2 wings from finest filters
                    flow_idx = int((event.central_freq - event.bandwidth / 2 -
                                    (df / 2) - fmin) / df)
                    fhigh_idx = int((event.central_freq + event.bandwidth / 2 +
                                     (df / 2) - fmin) / df)
                    # TODO: Check that the undersampling rate is always commensurate
                    # with the indexing: that is to say that
                    # mod(etime_min_idx, us_rate) == 0 always
                    z_j_b = tf_map[flow_idx:fhigh_idx,
                                   etime_min_idx:etime_max_idx:us_rate]
                    # FIXME: Deal with negative hrss^2 -- e.g. remove the event
                    try:
                        event.amplitude = measure_hrss(
                            z_j_b, unwhite_filter_ip[flow_idx:fhigh_idx],
                            unwhite_ss_ip[flow_idx:fhigh_idx - 1],
                            white_ss_ip[flow_idx:fhigh_idx - 1],
                            fd_psd.delta_f, tmp_ts_data.delta_t,
                            len(lal_filters[0].data.data), event.chisq_dof)
                    except ValueError:
                        event.amplitude = 0
                if verbose:
                    print "\t\t|- Total number of events: %d" % len(event_list)
        t_idx_min += int(seg_len * (1 - window_fraction))
        t_idx_max += int(seg_len * (1 - window_fraction))
    setname = "MagneticFields"
    __program__ = 'pyburst_excesspower_gnome'
    start_time = LIGOTimeGPS(int(ts_data.start_time))
    end_time = LIGOTimeGPS(int(ts_data.end_time))
    inseg = segment(start_time, end_time)
    xmldoc = ligolw.Document()
    xmldoc.appendChild(ligolw.LIGO_LW())
    ifo = channel_name.split(":")[0]
    straindict = psd.insert_psd_option_group.__dict__
    proc_row = register_to_xmldoc(xmldoc,
                                  __program__,
                                  straindict,
                                  ifos=[ifo],
                                  version=git_version.id,
                                  cvs_repository=git_version.branch,
                                  cvs_entry_time=git_version.date)
    dt_stride = psd_segment_length
    sample_rate = ts_data.sample_rate
    # Amount to overlap successive blocks so as not to lose data
    window_overlap_samples = window_fraction * sample_rate
    outseg = inseg.contract(window_fraction * dt_stride / 2)
    # With a given dt_stride, we cannot process the remainder of this data
    remainder = math.fmod(abs(outseg), dt_stride * (1 - window_fraction))
    # ...so make an accounting of it
    outseg = segment(outseg[0], outseg[1] - remainder)
    ss = append_search_summary(xmldoc,
                               proc_row,
                               ifos=(station, ),
                               inseg=inseg,
                               outseg=outseg)
    for sb in event_list:
        sb.process_id = proc_row.process_id
        sb.search = proc_row.program
        sb.ifo, sb.channel = station, setname
    xmldoc.childNodes[0].appendChild(event_list)
    ifostr = ifo if isinstance(ifo, str) else "".join(ifo)
    st_rnd, end_rnd = int(math.floor(inseg[0])), int(math.ceil(inseg[1]))
    dur = end_rnd - st_rnd
    fname = "%s-excesspower-%d-%d.xml.gz" % (ifostr, st_rnd, dur)
    utils.write_filename(xmldoc, fname, gz=fname.endswith("gz"))
    plot_triggers(fname)
示例#22
0
def excess_power2(
    ts_data,  # Time series from magnetic field data
    psd_segment_length,  # Length of each segment in seconds
    psd_segment_stride,  # Separation between 2 consecutive segments in seconds
    psd_estimation,  # Average method
    window_fraction,  # Withening window fraction
    tile_fap,  # Tile false alarm probability threshold in Gaussian noise.
    station,  # Station
    nchans=None,  # Total number of channels
    band=None,  # Channel bandwidth
    fmin=0,  # Lowest frequency of the filter bank.
    fmax=None,  # Highest frequency of the filter bank.
    max_duration=None,  # Maximum duration of the tile
    wtype='tukey'):  # Whitening type, can tukey or hann
    """
    Perform excess-power search analysis on magnetic field data.
    This method will produce a bunch of time-frequency plots for every
    tile duration and bandwidth analysed as well as a XML file identifying
    all the triggers found in the selected data within the user-defined
    time range.

    Parameters
    ----------
    ts_data : TimeSeries
      Time Series from magnetic field data
    psd_segment_length : float
      Length of each segment in seconds
    psd_segment_stride : float
      Separation between 2 consecutive segments in seconds
    psd_estimation : string
      Average method
    window_fraction : float
      Withening window fraction
    tile_fap : float
      Tile false alarm probability threshold in Gaussian noise.
    nchans : int
      Total number of channels
    band : float
      Channel bandwidth
    fmin : float
      Lowest frequency of the filter bank.
    fmax : float
      Highest frequency of the filter bank
    """
    # Determine sampling rate based on extracted time series
    sample_rate = ts_data.sample_rate
    # Check if tile maximum frequency is not defined
    if fmax is None or fmax > sample_rate / 2.:
        # Set the tile maximum frequency equal to the Nyquist frequency
        # (i.e. half the sampling rate)
        fmax = sample_rate / 2.0
    # Check whether or not tile bandwidth and channel are defined
    if band is None and nchans is None:
        # Exit program with error message
        exit("Either bandwidth or number of channels must be specified...")
    else:
        # Check if tile maximum frequency larger than its minimum frequency
        assert fmax >= fmin
        # Define spectral band of data
        data_band = fmax - fmin
        # Check whether tile bandwidth or channel is defined
        if band is not None:
            # Define number of possible filter bands
            nchans = int(data_band / band) - 1
        elif nchans is not None:
            # Define filter bandwidth
            band = data_band / nchans
            nchans = nchans - 1
        # Check if number of channels is superior than unity
        assert nchans > 1
    # Print segment information
    print '|- Estimating PSD from segments of time',
    print '%.2f s in length, with %.2f s stride...' % (psd_segment_length,
                                                       psd_segment_stride)
    # Convert time series as array of float
    data = ts_data.astype(numpy.float64)
    # Define segment length for PSD estimation in sample unit
    seg_len = int(psd_segment_length * sample_rate)
    # Define separation between consecutive segments in sample unit
    seg_stride = int(psd_segment_stride * sample_rate)
    # Calculate the overall PSD from individual PSD segments
    fd_psd = psd.welch(data,
                       avg_method=psd_estimation,
                       seg_len=seg_len,
                       seg_stride=seg_stride)
    # We need this for the SWIG functions...
    lal_psd = fd_psd.lal()
    # Plot the power spectral density
    plot_spectrum(fd_psd)
    # Create whitening window
    print "|- Whitening window and spectral correlation..."
    if wtype == 'hann':
        window = lal.CreateHannREAL8Window(seg_len)
    elif wtype == 'tukey':
        window = lal.CreateTukeyREAL8Window(seg_len, window_fraction)
    else:
        raise ValueError("Can't handle window type %s" % wtype)
    # Create FFT plan
    fft_plan = lal.CreateForwardREAL8FFTPlan(len(window.data.data), 1)
    # Perform two point spectral correlation
    spec_corr = lal.REAL8WindowTwoPointSpectralCorrelation(window, fft_plan)
    # Initialise filter bank
    print "|- Create filter..."
    filter_bank, fdb = [], []
    # Loop for each channels
    for i in range(nchans):
        channel_flow = fmin + band / 2 + i * band
        channel_width = band
        # Create excess power filter
        lal_filter = lalburst.CreateExcessPowerFilter(channel_flow,
                                                      channel_width, lal_psd,
                                                      spec_corr)
        filter_bank.append(lal_filter)
        fdb.append(Spectrum.from_lal(lal_filter))
    # Calculate the minimum bandwidth
    min_band = (len(filter_bank[0].data.data) - 1) * filter_bank[0].deltaF / 2
    # Plot filter bank
    plot_bank(fdb)
    # Convert filter bank from frequency to time domain
    print "|- Convert all the frequency domain to the time domain..."
    tdb = []
    # Loop for each filter's spectrum
    for fdt in fdb:
        zero_padded = numpy.zeros(int((fdt.f0 / fdt.df).value) + len(fdt))
        st = int((fdt.f0 / fdt.df).value)
        zero_padded[st:st + len(fdt)] = numpy.real_if_close(fdt.value)
        n_freq = int(sample_rate / 2 / fdt.df.value) * 2
        tdt = numpy.fft.irfft(zero_padded, n_freq) * math.sqrt(sample_rate)
        tdt = numpy.roll(tdt, len(tdt) / 2)
        tdt = TimeSeries(tdt,
                         name="",
                         epoch=fdt.epoch,
                         sample_rate=sample_rate)
        tdb.append(tdt)
    # Plot time series filter
    plot_filters(tdb, fmin, band)
    # Compute the renormalization for the base filters up to a given bandwidth.
    mu_sq_dict = {}
    # Loop through powers of 2 up to number of channels
    for nc_sum in range(0, int(math.log(nchans, 2))):
        nc_sum = 2**nc_sum - 1
        print "|- Calculating renormalization for resolution level containing %d %fHz channels" % (
            nc_sum + 1, min_band)
        mu_sq = (nc_sum + 1) * numpy.array([
            lalburst.ExcessPowerFilterInnerProduct(f, f, spec_corr, None)
            for f in filter_bank
        ])
        # Uncomment to get all possible frequency renormalizations
        #for n in xrange(nc_sum, nchans): # channel position index
        for n in xrange(nc_sum, nchans, nc_sum + 1):  # channel position index
            for k in xrange(0, nc_sum):  # channel sum index
                # FIXME: We've precomputed this, so use it instead
                mu_sq[n] += 2 * lalburst.ExcessPowerFilterInnerProduct(
                    filter_bank[n - k], filter_bank[n - 1 - k], spec_corr,
                    None)
        #print mu_sq[nc_sum::nc_sum+1]
        mu_sq_dict[nc_sum] = mu_sq
    # Create an event list where all the triggers will be stored
    event_list = lsctables.New(lsctables.SnglBurstTable, [
        'start_time', 'start_time_ns', 'peak_time', 'peak_time_ns', 'duration',
        'bandwidth', 'central_freq', 'chisq_dof', 'confidence', 'snr',
        'amplitude', 'channel', 'ifo', 'process_id', 'event_id', 'search',
        'stop_time', 'stop_time_ns'
    ])
    # Create repositories to save TF and time series plots
    os.system('mkdir -p segments/time-frequency')
    os.system('mkdir -p segments/time-series')
    # Define time edges
    t_idx_min, t_idx_max = 0, seg_len
    while t_idx_max <= len(ts_data):
        # Define starting and ending time of the segment in seconds
        start_time = ts_data.start_time + t_idx_min / float(
            ts_data.sample_rate)
        end_time = ts_data.start_time + t_idx_max / float(ts_data.sample_rate)
        print "\n|-- Analyzing block %i to %i (%.2f percent)" % (
            start_time, end_time, 100 * float(t_idx_max) / len(ts_data))
        # Model a withen time series for the block
        tmp_ts_data = types.TimeSeries(ts_data[t_idx_min:t_idx_max] *
                                       window.data.data,
                                       delta_t=1. / ts_data.sample_rate,
                                       epoch=start_time)
        # Save time series in relevant repository
        segfolder = 'segments/%i-%i' % (start_time, end_time)
        os.system('mkdir -p ' + segfolder)
        plot_ts(tmp_ts_data,
                fname='segments/time-series/%i-%i.png' %
                (start_time, end_time))
        # Convert times series to frequency series
        fs_data = tmp_ts_data.to_frequencyseries()
        print "|-- Frequency series data has variance: %s" % fs_data.data.std(
        )**2
        # Whitening (FIXME: Whiten the filters, not the data)
        fs_data.data /= numpy.sqrt(fd_psd) / numpy.sqrt(2 * fd_psd.delta_f)
        print "|-- Whitened frequency series data has variance: %s" % fs_data.data.std(
        )**2
        print "|-- Create time-frequency plane for current block"
        # Return the complex snr, along with its associated normalization of the template,
        # matched filtered against the data
        #filter.matched_filter_core(types.FrequencySeries(tmp_filter_bank,delta_f=fd_psd.delta_f),
        #                           fs_data,h_norm=1,psd=fd_psd,low_frequency_cutoff=filter_bank[0].f0,
        #                           high_frequency_cutoff=filter_bank[0].f0+2*band)
        print "|-- Filtering all %d channels..." % nchans
        # Initialise 2D zero array
        tmp_filter_bank = numpy.zeros(len(fd_psd), dtype=numpy.complex128)
        # Initialise 2D zero array for time-frequency map
        tf_map = numpy.zeros((nchans, seg_len), dtype=numpy.complex128)
        # Loop over all the channels
        for i in range(nchans):
            # Reset filter bank series
            tmp_filter_bank *= 0.0
            # Index of starting frequency
            f1 = int(filter_bank[i].f0 / fd_psd.delta_f)
            # Index of ending frequency
            f2 = int((filter_bank[i].f0 + 2 * band) / fd_psd.delta_f) + 1
            # (FIXME: Why is there a factor of 2 here?)
            tmp_filter_bank[f1:f2] = filter_bank[i].data.data * 2
            # Define the template to filter the frequency series with
            template = types.FrequencySeries(tmp_filter_bank,
                                             delta_f=fd_psd.delta_f,
                                             copy=False)
            # Create filtered series
            filtered_series = filter.matched_filter_core(
                template,
                fs_data,
                h_norm=None,
                psd=None,
                low_frequency_cutoff=filter_bank[i].f0,
                high_frequency_cutoff=filter_bank[i].f0 + 2 * band)
            # Include filtered series in the map
            tf_map[i, :] = filtered_series[0].numpy()
        # Plot spectrogram
        plot_spectrogram(numpy.abs(tf_map).T,
                         tmp_ts_data.delta_t,
                         band,
                         ts_data.sample_rate,
                         start_time,
                         end_time,
                         fname='segments/time-frequency/%i-%i.png' %
                         (start_time, end_time))
        # Loop through all summed channels
        for nc_sum in range(0, int(math.log(nchans, 2)))[::-1]:
            nc_sum = 2**nc_sum - 1
            mu_sq = mu_sq_dict[nc_sum]
            # Clip the boundaries to remove window corruption
            clip_samples = int(psd_segment_length * window_fraction *
                               ts_data.sample_rate / 2)
            # Constructing tile and calculate their energy
            print "\n|--- Constructing tile with %d summed channels..." % (
                nc_sum + 1)
            # Current bandwidth of the time-frequency map tiles
            df = band * (nc_sum + 1)
            dt = 1.0 / (2 * df)
            # How much each "step" is in the time domain -- under sampling rate
            us_rate = int(round(dt / ts_data.delta_t))
            print "|--- Undersampling rate for this level: %f" % (
                ts_data.sample_rate / us_rate)
            print "|--- Calculating tiles..."
            # Making independent tiles
            # because [0:-0] does not give the full array
            tf_map_temp = tf_map[:,clip_samples:-clip_samples:us_rate] \
                          if clip_samples > 0 else tf_map[:,::us_rate]
            tiles = tf_map_temp.copy()
            # Here's the deal: we're going to keep only the valid output and
            # it's *always* going to exist in the lowest available indices
            stride = nc_sum + 1
            for i in xrange(tiles.shape[0] / stride):
                numpy.absolute(tiles[stride * i:stride * (i + 1)].sum(axis=0),
                               tiles[stride * (i + 1) - 1])
            tiles = tiles[nc_sum::nc_sum + 1].real**2 / mu_sq[nc_sum::nc_sum +
                                                              1].reshape(
                                                                  -1, 1)
            print "|--- TF-plane is %dx%s samples" % tiles.shape
            print "|--- Tile energy mean %f, var %f" % (numpy.mean(tiles),
                                                        numpy.var(tiles))
            # Define maximum number of degrees of freedom and check it larger or equal to 2
            max_dof = 32 if max_duration == None else 2 * max_duration * df
            assert max_dof >= 2
            # Loop through multiple degrees of freedom
            for j in [2**l for l in xrange(0, int(math.log(max_dof, 2)))]:
                # Duration is fixed by the NDOF and bandwidth
                duration = j * dt
                print "\n|----- Explore signal duration of %f s..." % duration
                print "|----- Summing DOF = %d ..." % (2 * j)
                tlen = tiles.shape[1] - 2 * j + 1 + 1
                dof_tiles = numpy.zeros((tiles.shape[0], tlen))
                sum_filter = numpy.array([1, 0] * (j - 1) + [1])
                for f in range(tiles.shape[0]):
                    # Sum and drop correlate tiles
                    dof_tiles[f] = fftconvolve(tiles[f], sum_filter, 'valid')
                print "|----- Summed tile energy mean: %f, var %f" % (
                    numpy.mean(dof_tiles), numpy.var(dof_tiles))
                plot_spectrogram(
                    dof_tiles.T,
                    dt,
                    df,
                    ts_data.sample_rate,
                    start_time,
                    end_time,
                    fname='segments/%i-%i/tf_%02ichans_%02idof.png' %
                    (start_time, end_time, nc_sum + 1, 2 * j))
                threshold = scipy.stats.chi2.isf(tile_fap, j)
                print "|------ Threshold for this level: %f" % threshold
                spant, spanf = dof_tiles.shape[1] * dt, dof_tiles.shape[0] * df
                print "|------ Processing %.2fx%.2f time-frequency map." % (
                    spant, spanf)
                # Since we clip the data, the start time needs to be adjusted accordingly
                window_offset_epoch = fs_data.epoch + psd_segment_length * window_fraction / 2
                window_offset_epoch = LIGOTimeGPS(float(window_offset_epoch))
                for i, j in zip(*numpy.where(dof_tiles > threshold)):
                    event = event_list.RowType()
                    # The points are summed forward in time and thus a `summed point' is the
                    # sum of the previous N points. If this point is above threshold, it
                    # corresponds to a tile which spans the previous N points. However, the
                    # 0th point (due to the convolution specifier 'valid') is actually
                    # already a duration from the start time. All of this means, the +
                    # duration and the - duration cancels, and the tile 'start' is, by
                    # definition, the start of the time frequency map if j = 0
                    # FIXME: I think this needs a + dt/2 to center the tile properly
                    event.set_start(window_offset_epoch + float(j * dt))
                    event.set_stop(window_offset_epoch + float(j * dt) +
                                   duration)
                    event.set_peak(event.get_start() + duration / 2)
                    event.central_freq = filter_bank[
                        0].f0 + band / 2 + i * df + 0.5 * df
                    event.duration = duration
                    event.bandwidth = df
                    event.chisq_dof = 2 * duration * df
                    event.snr = math.sqrt(dof_tiles[i, j] / event.chisq_dof -
                                          1)
                    # FIXME: Magic number 0.62 should be determine empircally
                    event.confidence = -lal.LogChisqCCDF(
                        event.snr * 0.62, event.chisq_dof * 0.62)
                    event.amplitude = None
                    event.process_id = None
                    event.event_id = event_list.get_next_id()
                    event_list.append(event)
                for event in event_list[::-1]:
                    if event.amplitude != None:
                        continue
                    etime_min_idx = float(event.get_start()) - float(
                        fs_data.epoch)
                    etime_min_idx = int(etime_min_idx / tmp_ts_data.delta_t)
                    etime_max_idx = float(event.get_start()) - float(
                        fs_data.epoch) + event.duration
                    etime_max_idx = int(etime_max_idx / tmp_ts_data.delta_t)
                    # (band / 2) to account for sin^2 wings from finest filters
                    flow_idx = int((event.central_freq - event.bandwidth / 2 -
                                    (df / 2) - fmin) / df)
                    fhigh_idx = int((event.central_freq + event.bandwidth / 2 +
                                     (df / 2) - fmin) / df)
                    # TODO: Check that the undersampling rate is always commensurate
                    # with the indexing: that is to say that
                    # mod(etime_min_idx, us_rate) == 0 always
                    z_j_b = tf_map[flow_idx:fhigh_idx,
                                   etime_min_idx:etime_max_idx:us_rate]
                    event.amplitude = 0
                print "|------ Total number of events: %d" % len(event_list)
        t_idx_min += int(seg_len * (1 - window_fraction))
        t_idx_max += int(seg_len * (1 - window_fraction))
    setname = "MagneticFields"
    __program__ = 'pyburst_excesspower'
    start_time = LIGOTimeGPS(int(ts_data.start_time))
    end_time = LIGOTimeGPS(int(ts_data.end_time))
    inseg = segment(start_time, end_time)
    xmldoc = ligolw.Document()
    xmldoc.appendChild(ligolw.LIGO_LW())
    ifo = 'H1'  #channel_name.split(":")[0]
    straindict = psd.insert_psd_option_group.__dict__
    proc_row = register_to_xmldoc(xmldoc,
                                  __program__,
                                  straindict,
                                  ifos=[ifo],
                                  version=git_version.id,
                                  cvs_repository=git_version.branch,
                                  cvs_entry_time=git_version.date)
    dt_stride = psd_segment_length
    sample_rate = ts_data.sample_rate
    # Amount to overlap successive blocks so as not to lose data
    window_overlap_samples = window_fraction * sample_rate
    outseg = inseg.contract(window_fraction * dt_stride / 2)
    # With a given dt_stride, we cannot process the remainder of this data
    remainder = math.fmod(abs(outseg), dt_stride * (1 - window_fraction))
    # ...so make an accounting of it
    outseg = segment(outseg[0], outseg[1] - remainder)
    ss = append_search_summary(xmldoc,
                               proc_row,
                               ifos=(station, ),
                               inseg=inseg,
                               outseg=outseg)
    for sb in event_list:
        sb.process_id = proc_row.process_id
        sb.search = proc_row.program
        sb.ifo, sb.channel = station, setname
    xmldoc.childNodes[0].appendChild(event_list)
    fname = 'excesspower.xml.gz'
    utils.write_filename(xmldoc, fname, gz=fname.endswith("gz"))
示例#23
0
    def values(self, sn, indices, template, psd, norm, stilde=None,
               low_frequency_cutoff=None, high_frequency_cutoff=None):
        """
        Calculate the auto-chisq at the specified indices.

        Parameters
        -----------
        sn : Array[complex]
            SNR time series of the template for which auto-chisq is being
            computed. Provided unnormalized. 
        indices : Array[int]
            List of points at which to calculate auto-chisq
        template : Pycbc template object 
            The template for which we are calculating auto-chisq
        psd : Pycbc psd object
            The PSD of the data being analysed
        norm : float
            The normalization factor to apply to sn
        stilde : Pycbc data object, needed if using reverse-template
            The data being analysed. Only needed if using reverse-template,
            otherwise ignored
        low_frequency_cutoff : float
            The lower frequency to consider in matched-filters
        high_frequency_cutoff : float
            The upper frequency to consider in matched-filters
        """
        if self.do and (len(indices) > 0):
            htilde = make_frequency_series(template)

            # Check if we need to recompute the autocorrelation
            key = (id(template), id(psd))
            if key != self._autocor_id:
                logging.info("Calculating autocorrelation")

                if not self.reverse_template:
                    Pt, _Ptilde, P_norm = matched_filter_core(htilde,
                              htilde, psd=psd,
                              low_frequency_cutoff=low_frequency_cutoff,
                              high_frequency_cutoff=high_frequency_cutoff)
                    Pt = Pt * (1./ Pt[0])
                    self._autocor = Array(Pt, copy=True)
                else:
                    Pt, _Ptilde, P_norm = matched_filter_core(htilde.conj(),
                              htilde, psd=psd,
                              low_frequency_cutoff=low_frequency_cutoff,
                              high_frequency_cutoff=high_frequency_cutoff)

                    # T-reversed template has same norm as forward template
                    # so we can normalize using that
                    # FIXME: Here sigmasq has to be cast to a float or the
                    #        code is really slow ... why??
                    norm_fac = P_norm / float(((template.sigmasq(psd))**0.5))
                    Pt *= norm_fac
                    self._autocor = Array(Pt, copy=True)
                self._autocor_id = key
            
            logging.info("...Calculating autochisquare")
            sn = sn*norm
            if self.reverse_template:
                assert(stilde is not None)
                asn, acor, ahnrm = matched_filter_core(htilde.conj(), stilde,
                                 low_frequency_cutoff=low_frequency_cutoff,
                                 high_frequency_cutoff=high_frequency_cutoff,
                                 h_norm=template.sigmasq(psd))
                correlation_snr = asn * ahnrm
            else:
                correlation_snr = sn

            achi_list = np.array([])
            index_list = np.array(indices)
            dof, achi_list, _ = autochisq_from_precomputed(sn, correlation_snr,
                               self._autocor, index_list, stride=self.stride,
                               num_points=self.num_points,
                               oneside=self.one_sided, twophase=self.two_phase,
                               maxvalued=self.take_maximum_value)
            self.dof = dof
            return achi_list
示例#24
0
    # Generate a zeros frequency series the same length as the psd
    filter = FrequencySeries(numpy.zeros(len(psd)), 1, dtype=numpy.complex128)

    # Generate the waveform filter with given params
    filter = get_waveform_filter(filter,
                                 approximant=params.apx,
                                 mass1=m1,
                                 mass2=m2,
                                 sp1z=params.sp1z,
                                 sp2z=params.sp2z,
                                 polarization=params.pol,
                                 delta_f=psd.delta_f,
                                 f_lower=params.flow)

    # Calculate the SNR Time Series
    snr, _, norm = matched_filter_core(filter, conditioned, psd=psd, low_frequency_cutoff=20)

    snr = snr * norm

    # Trim SNR Time Series to account for edge effects
    snr = snr.crop(4 + 4, 4)

    snrs[i] = snr

    # Get data of the peak
    peak = abs(snr).numpy().argmax()
    snrp = snr[peak]
    time = snr.sample_times[peak]
    phase = numpy.angle(snrp)

    snrname = cwd + '/' + outfile[i]
示例#25
0
    def values(self, sn, indices, template, psd, norm, stilde=None,
               low_frequency_cutoff=None, high_frequency_cutoff=None):
        """
        Calculate the auto-chisq at the specified indices.

        Parameters
        -----------
        sn : Array[complex]
            SNR time series of the template for which auto-chisq is being
            computed. Provided unnormalized. 
        indices : Array[int]
            List of points at which to calculate auto-chisq
        template : Pycbc template object 
            The template for which we are calculating auto-chisq
        psd : Pycbc psd object
            The PSD of the data being analysed
        norm : float
            The normalization factor to apply to sn
        stilde : Pycbc data object, needed if using reverse-template
            The data being analysed. Only needed if using reverse-template,
            otherwise ignored
        low_frequency_cutoff : float
            The lower frequency to consider in matched-filters
        high_frequency_cutoff : float
            The upper frequency to consider in matched-filters
        """
        if self.do and (len(indices) > 0):
            htilde = make_frequency_series(template)

            # Check if we need to recompute the autocorrelation
            key = (id(template), id(psd))
            if key != self._autocor_id:
                logging.info("Calculating autocorrelation")

                if not self.reverse_template:
                    Pt, _Ptilde, P_norm = matched_filter_core(htilde,
                              htilde, psd=psd,
                              low_frequency_cutoff=low_frequency_cutoff,
                              high_frequency_cutoff=high_frequency_cutoff)
                    Pt = Pt * (1./ Pt[0])
                    self._autocor = Array(Pt, copy=True)
                else:
                    Pt, _Ptilde, P_norm = matched_filter_core(htilde,
                              htilde.conj(), psd=psd,
                              low_frequency_cutoff=low_frequency_cutoff,
                              high_frequency_cutoff=high_frequency_cutoff)

                    # T-reversed template has same norm as forward template
                    # so we can normalize using that
                    # FIXME: Here sigmasq has to be cast to a float or the
                    #        code is really slow ... why??
                    norm_fac = P_norm / float(((template.sigmasq(psd))**0.5))
                    Pt *= norm_fac
                    self._autocor = Array(Pt, copy=True)
                self._autocor_id = key
            
            logging.info("...Calculating autochisquare")
            sn = sn*norm
            if self.reverse_template:
                assert(stilde is not None)
                asn, acor, ahnrm = matched_filter_core(htilde.conj(), stilde,
                                 low_frequency_cutoff=low_frequency_cutoff,
                                 high_frequency_cutoff=high_frequency_cutoff,
                                 h_norm=template.sigmasq(psd))
                correlation_snr = asn * ahnrm
            else:
                correlation_snr = sn

            achi_list = np.array([])
            index_list = np.array(indices)
            dof, achi_list, _ = autochisq_from_precomputed(sn, correlation_snr,
                               self._autocor, index_list, stride=self.stride,
                               num_points=self.num_points,
                               oneside=self.one_sided, twophase=self.two_phase,
                               maxvalued=self.take_maximum_value)
            self.dof = dof
            return achi_list
示例#26
0
def align_waveforms_optimally(hplus1,
                              hcross1,
                              hplus2,
                              hcross2,
                              psd='aLIGOZeroDetHighPower',
                              low_frequency_cutoff=None,
                              high_frequency_cutoff=None,
                              tsign=1,
                              phsign=-1,
                              verify=True,
                              phase_tolerance=1e-3,
                              overlap_tolerance=1e-3,
                              trim_leading=False,
                              trim_trailing=False,
                              verbose=False):
    """
    Align waveforms such that their inner product (noise weighted) is optimal
    without requiring any phase or time shift.

    The appropriate time and phase shifts are determined iteratively and applied
    to the second set of (hplus, hcross) vectors.
    """
    #############################################################################
    # First copy over data into local memory, ensure lengths of time and
    # frequency domain vectors are consistent, and compute the maximized overlap
    #
    # 1) Cast into time-series
    h_plus1 = TimeSeries(hplus1,
                         epoch=hplus1._epoch,
                         delta_t=hplus1.delta_t,
                         dtype=hplus1.dtype,
                         copy=True)
    h_cross1 = TimeSeries(hcross1,
                          epoch=hplus1._epoch,
                          delta_t=hplus1.delta_t,
                          dtype=hplus1.dtype,
                          copy=True)
    h_plus2 = TimeSeries(hplus2,
                         epoch=hplus2._epoch,
                         delta_t=hplus2.delta_t,
                         dtype=hplus2.dtype,
                         copy=True)
    h_cross2 = TimeSeries(hcross2,
                          epoch=hplus2._epoch,
                          delta_t=hplus2.delta_t,
                          dtype=hplus2.dtype,
                          copy=True)
    #
    # 2) Ensure both input hplus vectors are equal in length
    if len(hplus2) > len(hplus1):
        h_plus1.append_zeros(len(hplus2) - len(hplus1))
        h_cross1.append_zeros(len(hplus2) - len(hplus1))
    elif len(hplus2) < len(hplus1):
        h_plus2.append_zeros(len(hplus1) - len(hplus2))
        h_cross2.append_zeros(len(hplus1) - len(hplus2))
    #
    # 3) Set the upper frequency cutoff to Nyquist if not set by User
    if high_frequency_cutoff == None:
        high_frequency_cutoff = 1. / h_plus1.delta_t / 2.
    #
    # 4) Compute LIGO noise psd
    if psd == None:
        raise IOError("Need compatible psd [or name] as input!")
    elif type(psd) == str:
        htilde = make_frequency_series(h_plus1)
        psd_name = psd
        psd = from_string(psd_name, len(htilde), htilde.delta_f,
                          low_frequency_cutoff)
    ##
    # 5) Calculate Overlap (maximized) before alignment
    m = match(h_plus1,
              h_plus2,
              psd=psd,
              low_frequency_cutoff=low_frequency_cutoff,
              high_frequency_cutoff=high_frequency_cutoff)
    optimal_overlap = m[0]  # FIXME
    if verbose:
        print(("Overlap BEFORE ALIGNMENT:",
               overlap_cplx(h_plus1,
                            h_plus2,
                            psd=psd,
                            low_frequency_cutoff=low_frequency_cutoff,
                            high_frequency_cutoff=high_frequency_cutoff,
                            normalized=True)))
        print(("Match BEFORE ALIGNMENT:", m))
    #############################################################################
    # Iterate to obtain the correct phase and time shifts, using which we
    # align the two waveforms such that their unmaximized and maximized overlaps
    # agree.

    #
    # 1) Initialize phase/time offset counters
    t_shift_counter = 0
    ph_shift_counter = 0
    #
    # 2) Initialize initial garbage values to enter the while loop
    idx = 0
    ph_shift = t_shift = 1e9
    olap = 0 + 0j
    #
    # 3) Iteration begins
    # >>>>>>
    while np.abs(ph_shift) > phase_tolerance or \
            np.abs(t_shift) > h_plus1.delta_t or \
            np.abs(np.abs(olap.real) - optimal_overlap) > overlap_tolerance:
        if idx == 0:
            hp2, hc2 = h_plus2, h_cross2
        #
        # 1) Determine the phase and time shifts for optimal match
        #    by comparing hplus1/hcross1 with hp2/hc2 which is phase/time shifted
        #    in previous iteration
        snr, corr, snr_norm = matched_filter_core(h_plus1, hp2, psd,
                                                  low_frequency_cutoff,
                                                  high_frequency_cutoff, None)
        max_snr, max_id = snr.abs_max_loc()

        if max_id != 0:
            t_shift = snr.delta_t * (len(snr) - max_id)
        else:
            t_shift = snr.delta_t * max_id

        ph_shift = np.angle(snr[max_id])

        #
        # 2) Add them to running time/phase offset counter
        t_shift_counter += t_shift
        ph_shift_counter += ph_shift
        #
        if verbose:
            print((" >> Iteration %d\n" % (idx + 1)))
            print(("max_id = %d, id_shift = %d" %
                   (max_id, int(t_shift / snr.delta_t))))
            print(("t_shift = %f,\n ph_shift = %f" % (t_shift, ph_shift)))
        #
        ####
        # 3) Shift the second hp/hc pair (ORIGINAL) by cumulative phase/time offset
        hp2, hc2 = shift_waveform_phase_time(h_plus2,
                                             h_cross2,
                                             tsign * t_shift_counter,
                                             phsign * ph_shift_counter,
                                             verbose=verbose)
        #
        ###
        # 4) As time shifting can change array lengths, equalize again, compute psd
        ##
        if len(h_plus1) > len(hp2):
            hp2.append_zeros(len(h_plus1) - len(hp2))
            htilde = make_frequency_series(h_plus1)
            psd = from_string(psd_name, len(htilde), htilde.delta_f,
                              low_frequency_cutoff)
        elif len(h_plus1) < len(hp2):
            h_plus1.append_zeros(len(hp2) - len(h_plus1))
            htilde = make_frequency_series(h_plus1)
            psd = from_string(psd_name, len(htilde), htilde.delta_f,
                              low_frequency_cutoff)
        #
        # 5) Compute UNMAXIMIZED overlap.
        olap = overlap_cplx(h_plus1,
                            hp2,
                            psd=psd,
                            low_frequency_cutoff=low_frequency_cutoff,
                            high_frequency_cutoff=high_frequency_cutoff,
                            normalized=True)
        if verbose:
            print(("Overlap AFTER ALIGNMENT = ", olap))
            print(("Optimal Overlap = ", optimal_overlap))
        #
        idx += 1
        if verbose:
            print("\n")
    # >>>>>>
    # 3) Iteration ended.

    #############################################################################
    # Verify the alignment
    ###
    if verify:
        #
        print("Verifying time alignment...")
        #
        # 1) Determine the phase and time shifts for optimal match
        snr, corr, snr_norm = matched_filter_core(h_plus1, hp2, psd,
                                                  low_frequency_cutoff,
                                                  high_frequency_cutoff, None)
        max_snr, max_id = snr.abs_max_loc()
        if verbose:
            print(
                ("Post-Alignment Index of MAX SNR (should be 0 or 1 or %d): %d"
                 % (len(snr) - 1, max_id)))
            print(("Length of whole SNR time-series: ", len(snr)))
        #
        # 2) Test if current time shift is within tolerance
        if max_id != 0 and max_id != 1 and \
                max_id != (len(snr)-1) and max_id != (len(snr)-2):
            raise RuntimeError("Warning: ALIGNMENT NOT CORRECT (see above)")
        else:
            print("Alignment in time correct..")
        #
        # 3) Test if current phase shift is within tolerance
        print("Verifying phase alignment...")
        ph_shift = np.angle(snr[max_id])
        if np.abs(ph_shift) > phase_tolerance:
            if verbose:
                print(("dphi, dphi+pi, dphi-pi: ", ph_shift, ph_shift + np.pi,
                       ph_shift - np.pi))
                print(
                    ("dphi/pi, dphi*pi: ", ph_shift / np.pi, ph_shift * np.pi))
            raise RuntimeError(
                "Warning: Phasing alignment possibly incorrect.")
        else:
            if verbose:
                print(("Post-Alignmend Phase shift (should be < %.2e): %.2e" %
                       (phase_tolerance, np.abs(ph_shift))))
            print(("Alignment in phasing correct.. (within tol %.2e)" %
                   phase_tolerance))
        #

    #############################################################################
    # TRIM the output arrays and return
    if trim_trailing:
        hp2 = trim_trailing_zeros(hp2)
        hc2 = trim_trailing_zeros(hc2)
    if trim_leading:
        hp2 = trim_leading_zeros(hp2)
        hc2 = trim_leading_zeros(hc2)
    #
    return hplus1, hcross1, hp2, hc2