Example #1
0
    def lal(self):
        """Produces a LAL time series object equivalent to self.

        Returns
        -------
        lal_data : {lal.*TimeSeries}
            LAL time series object containing the same data as self.
            The actual type depends on the sample's dtype.  If the epoch of
            self is 'None', the epoch of the returned LAL object will be
            LIGOTimeGPS(0,0); otherwise, the same as that of self.

        Raises
        ------
        TypeError
            If time series is stored in GPU memory.
        """
        lal_data = None
        ep = self._epoch

        if self._data.dtype == _numpy.float32:
            lal_data = _lal.CreateREAL4TimeSeries("",ep,0,self.delta_t,_lal.SecondUnit,len(self))
        elif self._data.dtype == _numpy.float64:
            lal_data = _lal.CreateREAL8TimeSeries("",ep,0,self.delta_t,_lal.SecondUnit,len(self))
        elif self._data.dtype == _numpy.complex64:
            lal_data = _lal.CreateCOMPLEX8TimeSeries("",ep,0,self.delta_t,_lal.SecondUnit,len(self))
        elif self._data.dtype == _numpy.complex128:
            lal_data = _lal.CreateCOMPLEX16TimeSeries("",ep,0,self.delta_t,_lal.SecondUnit,len(self))

        lal_data.data.data[:] = self.numpy()

        return lal_data
Example #2
0
    def lal(self):
        """ Returns a LAL Object that contains this data """

        lal_data = None
        if type(self._data) is not _numpy.ndarray:
            raise TypeError("Cannot return lal type from the GPU")
        elif self._data.dtype == _numpy.float32:
            lal_data = _lal.CreateREAL4TimeSeries("", self._epoch, 0,
                                                  self.delta_t,
                                                  _lal.lalSecondUnit,
                                                  len(self))
        elif self._data.dtype == _numpy.float64:
            lal_data = _lal.CreateREAL8TimeSeries("", self._epoch, 0,
                                                  self.delta_t,
                                                  _lal.lalSecondUnit,
                                                  len(self))
        elif self._data.dtype == _numpy.complex64:
            lal_data = _lal.CreateCOMPLEX8TimeSeries("", self._epoch, 0,
                                                     self.delta_t,
                                                     _lal.lalSecondUnit,
                                                     len(self))
        elif self._data.dtype == _numpy.complex128:
            lal_data = _lal.CreateCOMPLEX16TimeSeries("", self._epoch, 0,
                                                      self.delta_t,
                                                      _lal.lalSecondUnit,
                                                      len(self))

        lal_data.data.data[:] = self._data

        return lal_data
 def snr_time_series(self):
     try:
         name = self._snr_name
     except ValueError:
         # C interface raises ValueError if the internal snr
         # pointer is NULL
         return None
     series = lal.CreateCOMPLEX8TimeSeries(
         name,
         lal.LIGOTimeGPS(self._snr_epoch_gpsSeconds,
                         self._snr_epoch_gpsNanoSeconds), self._snr_f0,
         self._snr_deltaT, lal.Unit(self._snr_sampleUnits),
         self._snr_data_length)
     # we want to be able to keep the table row object in memory
     # for an extended period of time so we need to be able to
     # release the memory used by the SNR time series when we no
     # longer need it, and so we copy the data here instead of
     # holding a reference to the original memory.  if we
     # allowed references to the original memory to leak out
     # into Python land we could never know if it's safe to free
     # it
     series.data.data[:] = self._snr_data
     return series
Example #4
0
    def _make_series(self, array, epoch, row_number):
        """For internal use only."""
        para = {
            "name":
            "%s_%d_%d" % (self.instrument, self.bank_number, row_number),
            "epoch": epoch,
            "deltaT": self.deltaT,
            "f0": 0,
            "sampleUnits": lal.DimensionlessUnit,
            "length": len(array)
        }
        if array.dtype == numpy.float32:
            tseries = lal.CreateREAL4TimeSeries(**para)
        elif array.dtype == numpy.float64:
            tseries = lal.CreateREAL8TimeSeries(**para)
        elif array.dtype == numpy.complex64:
            tseries = lal.CreateCOMPLEX8TimeSeries(**para)
        elif array.dtype == numpy.complex128:
            tseries = lal.CreateCOMPLEX16TimeSeries(**para)
        else:
            raise ValueError("unsupported type : %s " % array.dtype)

        tseries.data.data = array
        return tseries
Example #5
0
def condition(event,
              waveform='o2-uberbank',
              f_low=30.0,
              enable_snr_series=True,
              f_high_truncate=0.95):

    if len(event.singles) == 0:
        raise ValueError('Cannot localize an event with zero detectors.')

    singles = event.singles
    if not enable_snr_series:
        singles = [single for single in singles if single.snr is not None]

    ifos = [single.detector for single in singles]

    # Extract SNRs from table.
    snrs = np.ma.asarray([
        np.ma.masked if single.snr is None else single.snr
        for single in singles
    ])

    # Look up physical parameters for detector.
    detectors = [
        lalsimulation.DetectorPrefixToLALDetector(str(ifo)) for ifo in ifos
    ]
    responses = np.asarray([det.response for det in detectors])
    locations = np.asarray([det.location for det in detectors]) / lal.C_SI

    # Power spectra for each detector.
    psds = [single.psd for single in singles]
    psds = [
        filter.InterpolatedPSD(filter.abscissa(psd),
                               psd.data.data,
                               f_high_truncate=f_high_truncate) for psd in psds
    ]

    log.debug('calculating templates')
    H = filter.sngl_inspiral_psd(waveform, f_min=f_low, **event.template_args)

    log.debug('calculating noise PSDs')
    HS = [filter.signal_psd_series(H, S) for S in psds]

    # Signal models for each detector.
    log.debug('calculating Fisher matrix elements')
    signal_models = [filter.SignalModel(_) for _ in HS]

    # Get SNR=1 horizon distances for each detector.
    horizons = np.asarray([
        signal_model.get_horizon_distance() for signal_model in signal_models
    ])

    weights = np.ma.asarray([
        1 / np.square(signal_model.get_crb_toa_uncert(snr))
        for signal_model, snr in zip(signal_models, snrs)
    ])

    # Center detector array.
    locations -= (np.sum(locations * weights.reshape(-1, 1), axis=0) /
                  np.sum(weights))

    if enable_snr_series:
        snr_series = [single.snr_series for single in singles]
        if all(s is None for s in snr_series):
            snr_series = None
    else:
        snr_series = None

    # Maximum barycentered arrival time error:
    # |distance from array barycenter to furthest detector| / c + 5 ms.
    # For LHO+LLO, this is 15.0 ms.
    # For an arbitrary terrestrial detector network, the maximum is 26.3 ms.
    max_abs_t = np.max(np.sqrt(np.sum(np.square(locations), axis=1))) + 0.005

    if snr_series is None:
        log.warning("No SNR time series found, so we are creating a "
                    "zero-noise SNR time series from the whitened template's "
                    "autocorrelation sequence. The sky localization "
                    "uncertainty may be underestimated.")

        acors, sample_rates = zip(
            *[filter.autocorrelation(_, max_abs_t) for _ in HS])
        sample_rate = sample_rates[0]
        deltaT = 1 / sample_rate
        nsamples = len(acors[0])
        assert all(sample_rate == _ for _ in sample_rates)
        assert all(nsamples == len(_) for _ in acors)
        nsamples = nsamples * 2 - 1

        snr_series = []
        for acor, single in zip(acors, singles):
            series = lal.CreateCOMPLEX8TimeSeries('fake SNR', 0, 0, deltaT,
                                                  lal.StrainUnit, nsamples)
            series.epoch = single.time - 0.5 * (nsamples - 1) * deltaT
            acor = np.concatenate((np.conj(acor[:0:-1]), acor))
            series.data.data = single.snr * filter.exp_i(single.phase) * acor
            snr_series.append(series)

    # Ensure that all of the SNR time series have the same sample rate.
    # FIXME: for now, the Python wrapper expects all of the SNR time sries to
    # also be the same length.
    deltaT = snr_series[0].deltaT
    sample_rate = 1 / deltaT
    if any(deltaT != series.deltaT for series in snr_series):
        raise ValueError('BAYESTAR does not yet support SNR time series with '
                         'mixed sample rates')

    # Ensure that all of the SNR time series have odd lengths.
    if any(len(series.data.data) % 2 == 0 for series in snr_series):
        raise ValueError('SNR time series must have odd lengths')

    # Trim time series to the desired length.
    max_abs_n = int(np.ceil(max_abs_t * sample_rate))
    desired_length = 2 * max_abs_n - 1
    for i, series in enumerate(snr_series):
        length = len(series.data.data)
        if length > desired_length:
            snr_series[i] = lal.CutCOMPLEX8TimeSeries(
                series, length // 2 + 1 - max_abs_n, desired_length)

    # FIXME: for now, the Python wrapper expects all of the SNR time sries to
    # also be the same length.
    nsamples = len(snr_series[0].data.data)
    if any(nsamples != len(series.data.data) for series in snr_series):
        raise ValueError('BAYESTAR does not yet support SNR time series of '
                         'mixed lengths')

    # Perform sanity checks that the middle sample of the SNR time series match
    # the sngl_inspiral records to the nearest sample (plus the smallest
    # representable LIGOTimeGPS difference of 1 nanosecond).
    for ifo, single, series in zip(ifos, singles, snr_series):
        shift = np.abs(0.5 * (nsamples - 1) * series.deltaT +
                       float(series.epoch - single.time))
        if shift >= deltaT + 1e-8:
            raise ValueError('BAYESTAR expects the SNR time series to be '
                             'centered on the single-detector trigger times, '
                             'but {} was off by {} s'.format(ifo, shift))

    # Extract the TOAs in GPS nanoseconds from the SNR time series, assuming
    # that the trigger happened in the middle.
    toas_ns = [
        series.epoch.ns() + 1e9 * 0.5 *
        (len(series.data.data) - 1) * series.deltaT for series in snr_series
    ]

    # Collect all of the SNR series in one array.
    snr_series = np.vstack([series.data.data for series in snr_series])

    # Center times of arrival and compute GMST at mean arrival time.
    # Pre-center in integer nanoseconds to preserve precision of
    # initial datatype.
    epoch = sum(toas_ns) // len(toas_ns)
    toas = 1e-9 * (np.asarray(toas_ns) - epoch)
    mean_toa = np.average(toas, weights=weights)
    toas -= mean_toa
    epoch += int(np.round(1e9 * mean_toa))
    epoch = lal.LIGOTimeGPS(0, int(epoch))

    # Translate SNR time series back to time of first sample.
    toas -= 0.5 * (nsamples - 1) * deltaT

    return epoch, sample_rate, toas, snr_series, responses, locations, horizons
def localize(event,
             waveform='o2-uberbank',
             f_low=30.0,
             min_distance=None,
             max_distance=None,
             prior_distance_power=None,
             cosmology=False,
             method='toa_phoa_snr',
             nside=-1,
             chain_dump=None,
             enable_snr_series=True,
             f_high_truncate=0.95):
    """Convenience function to produce a sky map from LIGO-LW rows. Note that
    min_distance and max_distance should be in Mpc.

    Returns a 'NESTED' ordering HEALPix image as a Numpy array.
    """
    frame = inspect.currentframe()
    argstr = inspect.formatargvalues(*inspect.getargvalues(frame))
    start_time = lal.GPSTimeNow()

    singles = event.singles
    if not enable_snr_series:
        singles = [single for single in singles if single.snr is not None]

    ifos = [single.detector for single in singles]

    # Extract SNRs from table.
    snrs = np.ma.asarray([
        np.ma.masked if single.snr is None else single.snr
        for single in singles
    ])

    # Look up physical parameters for detector.
    detectors = [
        lalsimulation.DetectorPrefixToLALDetector(str(ifo)) for ifo in ifos
    ]
    responses = np.asarray([det.response for det in detectors])
    locations = np.asarray([det.location for det in detectors])

    # Power spectra for each detector.
    psds = [single.psd for single in singles]
    psds = [
        timing.InterpolatedPSD(filter.abscissa(psd),
                               psd.data.data,
                               f_high_truncate=f_high_truncate) for psd in psds
    ]

    log.debug('calculating templates')
    H = filter.sngl_inspiral_psd(waveform, f_min=f_low, **event.template_args)

    log.debug('calculating noise PSDs')
    HS = [filter.signal_psd_series(H, S) for S in psds]

    # Signal models for each detector.
    log.debug('calculating Fisher matrix elements')
    signal_models = [timing.SignalModel(_) for _ in HS]

    # Get SNR=1 horizon distances for each detector.
    horizons = np.asarray([
        signal_model.get_horizon_distance() for signal_model in signal_models
    ])

    weights = np.ma.asarray([
        1 / np.square(signal_model.get_crb_toa_uncert(snr))
        for signal_model, snr in zip(signal_models, snrs)
    ])

    # Center detector array.
    locations -= np.sum(locations * weights.reshape(-1, 1),
                        axis=0) / np.sum(weights)

    if cosmology:
        log.warn('Enabling cosmological prior. ' 'This feature is UNREVIEWED.')

    if enable_snr_series:
        log.warn('Enabling input of SNR time series. '
                 'This feature is UNREVIEWED.')
        snr_series = [single.snr_series for single in singles]
        if all(s is None for s in snr_series):
            snr_series = None
    else:
        snr_series = None

    # Maximum barycentered arrival time error:
    # |distance from array barycenter to furthest detector| / c + 5 ms.
    # For LHO+LLO, this is 15.0 ms.
    # For an arbitrary terrestrial detector network, the maximum is 26.3 ms.
    max_abs_t = np.max(np.sqrt(np.sum(np.square(locations / lal.C_SI),
                                      axis=1))) + 0.005

    if snr_series is None:
        log.warn(
            "No SNR time series found, so we are creating a zero-noise "
            "SNR time series from the whitened template's autocorrelation "
            "sequence. The sky localization uncertainty may be "
            "underestimated.")

        acors, sample_rates = zip(
            *[filter.autocorrelation(_, max_abs_t) for _ in HS])
        sample_rate = sample_rates[0]
        deltaT = 1 / sample_rate
        nsamples = len(acors[0])
        assert all(sample_rate == _ for _ in sample_rates)
        assert all(nsamples == len(_) for _ in acors)
        nsamples = nsamples * 2 - 1

        snr_series = []
        for acor, single in zip(acors, singles):
            series = lal.CreateCOMPLEX8TimeSeries('fake SNR', 0, 0, deltaT,
                                                  lal.StrainUnit, nsamples)
            series.epoch = single.time - 0.5 * (nsamples - 1) * deltaT
            acor = np.concatenate((np.conj(acor[:0:-1]), acor))
            series.data.data = single.snr * filter.exp_i(single.phase) * acor
            snr_series.append(series)

    # Ensure that all of the SNR time series have the same sample rate.
    # FIXME: for now, the Python wrapper expects all of the SNR time sries to
    # also be the same length.
    deltaT = snr_series[0].deltaT
    sample_rate = 1 / deltaT
    if any(deltaT != series.deltaT for series in snr_series):
        raise ValueError('BAYESTAR does not yet support SNR time series with '
                         'mixed sample rates')

    # Ensure that all of the SNR time series have odd lengths.
    if any(len(series.data.data) % 2 == 0 for series in snr_series):
        raise ValueError('SNR time series must have odd lengths')

    # Trim time series to the desired length.
    max_abs_n = int(np.ceil(max_abs_t * sample_rate))
    desired_length = 2 * max_abs_n - 1
    for i, series in enumerate(snr_series):
        length = len(series.data.data)
        if length > desired_length:
            snr_series[i] = lal.CutCOMPLEX8TimeSeries(
                series, length // 2 + 1 - max_abs_n, desired_length)

    # FIXME: for now, the Python wrapper expects all of the SNR time sries to
    # also be the same length.
    nsamples = len(snr_series[0].data.data)
    if any(nsamples != len(series.data.data) for series in snr_series):
        raise ValueError('BAYESTAR does not yet support SNR time series of '
                         'mixed lengths')

    # Perform sanity checks that the middle sample of the SNR time series match
    # the sngl_inspiral records. Relax valid interval slightly from
    # +/- 0.5 deltaT to +/- 0.6 deltaT for floating point roundoff error.
    for single, series in zip(singles, snr_series):
        if np.abs(0.5 * (nsamples - 1) * series.deltaT +
                  float(series.epoch - single.time)) >= 0.6 * deltaT:
            raise ValueError('BAYESTAR expects the SNR time series to be '
                             'centered on the single-detector trigger times')

    # Extract the TOAs in GPS nanoseconds from the SNR time series, assuming
    # that the trigger happened in the middle.
    toas_ns = [
        series.epoch.ns() + 1e9 * 0.5 *
        (len(series.data.data) - 1) * series.deltaT for series in snr_series
    ]

    # Collect all of the SNR series in one array.
    snr_series = np.vstack([series.data.data for series in snr_series])

    # Center times of arrival and compute GMST at mean arrival time.
    # Pre-center in integer nanoseconds to preserve precision of
    # initial datatype.
    epoch = sum(toas_ns) // len(toas_ns)
    toas = 1e-9 * (np.asarray(toas_ns) - epoch)
    # FIXME: np.average does not yet support masked arrays.
    # Replace with np.average when numpy 1.13.0 is available.
    mean_toa = np.sum(toas * weights) / np.sum(weights)
    toas -= mean_toa
    epoch += int(np.round(1e9 * mean_toa))
    epoch = lal.LIGOTimeGPS(0, int(epoch))
    gmst = lal.GreenwichMeanSiderealTime(epoch)

    # Translate SNR time series back to time of first sample.
    toas -= 0.5 * (nsamples - 1) * deltaT

    # If minimum distance is not specified, then default to 0 Mpc.
    if min_distance is None:
        min_distance = 0

    # If maximum distance is not specified, then default to the SNR=4
    # horizon distance of the most sensitive detector.
    if max_distance is None:
        max_distance = max(horizons) / 4

    # If prior_distance_power is not specified, then default to 2
    # (p(r) ~ r^2, uniform in volume).
    if prior_distance_power is None:
        prior_distance_power = 2

    # Raise an exception if 0 Mpc is the minimum effective distance and the
    # prior is of the form r**k for k<0
    if min_distance == 0 and prior_distance_power < 0:
        raise ValueError(
            ('Prior is a power law r^k with k={}, '
             'undefined at min_distance=0').format(prior_distance_power))

    # Time and run sky localization.
    log.debug('starting computationally-intensive section')
    if method == 'toa_phoa_snr':
        skymap, log_bci, log_bsn = _sky_map.toa_phoa_snr(
            min_distance, max_distance, prior_distance_power, cosmology, gmst,
            sample_rate, toas, snr_series, responses, locations, horizons)
        skymap = Table(skymap)
        skymap.meta['log_bci'] = log_bci
        skymap.meta['log_bsn'] = log_bsn
    elif method == 'toa_phoa_snr_mcmc':
        skymap = localize_emcee(
            logl=_sky_map.log_likelihood_toa_phoa_snr,
            loglargs=(gmst, sample_rate, toas, snr_series, responses,
                      locations, horizons),
            logp=toa_phoa_snr_log_prior,
            logpargs=(min_distance, max_distance, prior_distance_power,
                      max_abs_t),
            xmin=[0, -1, min_distance, -1, 0, 0],
            xmax=[2 * np.pi, 1, max_distance, 1, 2 * np.pi, 2 * max_abs_t],
            nside=nside,
            chain_dump=chain_dump)
    else:
        raise ValueError('Unrecognized method: %s' % method)

    # Convert distance moments to parameters
    distmean = skymap.columns.pop('DISTMEAN')
    diststd = skymap.columns.pop('DISTSTD')
    skymap['DISTMU'], skymap['DISTSIGMA'], skymap['DISTNORM'] = \
        distance.moments_to_parameters(distmean, diststd)

    # Add marginal distance moments
    good = np.isfinite(distmean) & np.isfinite(diststd)
    prob = (moc.uniq2pixarea(skymap['UNIQ']) * skymap['PROBDENSITY'])[good]
    distmean = distmean[good]
    diststd = diststd[good]
    rbar = (prob * distmean).sum()
    r2bar = (prob * (np.square(diststd) + np.square(distmean))).sum()
    skymap.meta['distmean'] = rbar
    skymap.meta['diststd'] = np.sqrt(r2bar - np.square(rbar))

    log.debug('finished computationally-intensive section')
    end_time = lal.GPSTimeNow()

    # Fill in metadata and return.
    program, _ = os.path.splitext(os.path.basename(sys.argv[0]))
    skymap.meta['creator'] = 'BAYESTAR'
    skymap.meta['origin'] = 'LIGO/Virgo'
    skymap.meta['vcs_info'] = vcs_info
    skymap.meta['gps_time'] = float(epoch)
    skymap.meta['runtime'] = float(end_time - start_time)
    skymap.meta['instruments'] = {single.detector for single in singles}
    skymap.meta['gps_creation_time'] = end_time
    skymap.meta['history'] = [
        '', 'Generated by calling the following Python function:',
        '{}.{}{}'.format(__name__, frame.f_code.co_name, argstr), '',
        'This was the command line that started the program:',
        ' '.join([program] + sys.argv[1:])
    ]

    return skymap
Example #7
0
def ligolw_sky_map(sngl_inspirals,
                   waveform,
                   f_low,
                   min_distance=None,
                   max_distance=None,
                   prior_distance_power=None,
                   method="toa_phoa_snr",
                   psds=None,
                   nside=-1,
                   chain_dump=None,
                   phase_convention='antifindchirp',
                   snr_series=None,
                   enable_snr_series=False):
    """Convenience function to produce a sky map from LIGO-LW rows. Note that
    min_distance and max_distance should be in Mpc.

    Returns a 'NESTED' ordering HEALPix image as a Numpy array.
    """

    # Ensure that sngl_inspiral is either a single template or a list of
    # identical templates
    for key in 'mass1 mass2 spin1x spin1y spin1z spin2x spin2y spin2z'.split():
        if hasattr(sngl_inspirals[0], key):
            value = getattr(sngl_inspirals[0], key)
            if any(value != getattr(_, key) for _ in sngl_inspirals):
                raise ValueError(
                    '{0} field is not the same for all detectors'.format(key))

    ifos = [sngl_inspiral.ifo for sngl_inspiral in sngl_inspirals]

    # Extract SNRs from table.
    snrs = np.ma.asarray([
        np.ma.masked if sngl_inspiral.snr is None else sngl_inspiral.snr
        for sngl_inspiral in sngl_inspirals
    ])

    # Look up physical parameters for detector.
    detectors = [
        lalsimulation.DetectorPrefixToLALDetector(str(ifo)) for ifo in ifos
    ]
    responses = np.asarray([det.response for det in detectors])
    locations = np.asarray([det.location for det in detectors])

    # Power spectra for each detector.
    if psds is None:
        psds = [timing.get_noise_psd_func(ifo) for ifo in ifos]

    log.debug('calculating templates')
    H = filter.sngl_inspiral_psd(sngl_inspirals[0], waveform, f_min=f_low)

    log.debug('calculating noise PSDs')
    HS = [filter.signal_psd_series(H, S) for S in psds]

    # Signal models for each detector.
    log.debug('calculating Fisher matrix elements')
    signal_models = [timing.SignalModel(_) for _ in HS]

    # Get SNR=1 horizon distances for each detector.
    horizons = np.asarray([
        signal_model.get_horizon_distance() for signal_model in signal_models
    ])

    weights = np.ma.asarray([
        1 / np.square(signal_model.get_crb_toa_uncert(snr))
        for signal_model, snr in zip(signal_models, snrs)
    ])

    # Center detector array.
    locations -= np.sum(locations * weights.reshape(-1, 1),
                        axis=0) / np.sum(weights)

    if enable_snr_series:
        log.warn(
            'Enabling input of SNR time series. This feature is UNREVIEWED.')
    else:
        snr_series = None

    # Maximum barycentered arrival time error:
    # |distance from array barycenter to furthest detector| / c + 5 ms.
    # For LHO+LLO, this is 15.0 ms.
    # For an arbitrary terrestrial detector network, the maximum is 26.3 ms.
    max_abs_t = np.max(np.sqrt(np.sum(np.square(locations / lal.C_SI),
                                      axis=1))) + 0.005

    if snr_series is None:
        log.warn(
            "No SNR time series found, so we are creating a zero-noise "
            "SNR time series from the whitened template's autocorrelation "
            "sequence. The sky localization uncertainty may be "
            "underestimated.")

        acors, sample_rates = zip(
            *[filter.autocorrelation(_, max_abs_t) for _ in HS])
        sample_rate = sample_rates[0]
        deltaT = 1 / sample_rate
        nsamples = len(acors[0])
        assert all(sample_rate == _ for _ in sample_rates)
        assert all(nsamples == len(_) for _ in acors)
        nsamples = nsamples * 2 - 1

        snr_series = []
        for acor, sngl in zip(acors, sngl_inspirals):
            series = lal.CreateCOMPLEX8TimeSeries('fake SNR', 0, 0, deltaT,
                                                  lal.StrainUnit, nsamples)
            series.epoch = sngl.end - 0.5 * (nsamples - 1) * deltaT
            acor = np.concatenate((np.conj(acor[:0:-1]), acor))
            if phase_convention.lower() == 'antifindchirp':
                # The matched filter phase convention does NOT affect the
                # template autocorrelation sequence; however it DOES affect
                # the maximum-likelihood phase estimate AND the SNR time series.
                # So if we are going to apply the anti-findchirp phase
                # correction later, we'll have to apply a complex conjugate to
                # the autocorrelation sequence to cancel it here.
                acor = np.conj(acor)
            series.data.data = sngl.snr * filter.exp_i(sngl.coa_phase) * acor
            snr_series.append(series)

    # Ensure that all of the SNR time series have the same sample rate.
    # FIXME: for now, the Python wrapper expects all of the SNR time sries to
    # also be the same length.
    deltaT = snr_series[0].deltaT
    sample_rate = 1 / deltaT
    if any(deltaT != series.deltaT for series in snr_series):
        raise ValueError(
            'BAYESTAR does not yet support SNR time series with mixed sample rates'
        )

    # Ensure that all of the SNR time series have odd lengths.
    if any(len(series.data.data) % 2 == 0 for series in snr_series):
        raise ValueError('SNR time series must have odd lengths')

    # Trim time series to the desired length.
    max_abs_n = int(np.ceil(max_abs_t * sample_rate))
    desired_length = 2 * max_abs_n - 1
    for i, series in enumerate(snr_series):
        length = len(series.data.data)
        if length > desired_length:
            snr_series[i] = lal.CutCOMPLEX8TimeSeries(
                series, length // 2 + 1 - max_abs_n, desired_length)

    # FIXME: for now, the Python wrapper expects all of the SNR time sries to
    # also be the same length.
    nsamples = len(snr_series[0].data.data)
    if any(nsamples != len(series.data.data) for series in snr_series):
        raise ValueError(
            'BAYESTAR does not yet support SNR time series of mixed lengths')

    # Perform sanity checks that the middle sample of the SNR time series match
    # the sngl_inspiral records.
    for sngl_inspiral, series in zip(sngl_inspirals, snr_series):
        if np.abs(0.5 * (nsamples - 1) * series.deltaT +
                  float(series.epoch - sngl_inspiral.end)) >= 0.5 * deltaT:
            raise ValueError(
                'BAYESTAR expects the SNR time series to be centered on the sngl_inspiral end times'
            )

    # Extract the TOAs in GPS nanoseconds from the SNR time series, assuming
    # that the trigger happened in the middle.
    toas_ns = [
        series.epoch.ns() + 1e9 * 0.5 *
        (len(series.data.data) - 1) * series.deltaT for series in snr_series
    ]

    # Collect all of the SNR series in one array.
    snr_series = np.vstack([series.data.data for series in snr_series])

    # Fudge factor for excess estimation error in gstlal_inspiral.
    fudge = 0.83
    snr_series *= fudge

    # If using 'findchirp' phase convention rather than gstlal/mbta,
    # then flip signs of phases.
    if phase_convention.lower() == 'antifindchirp':
        log.warn('Using anti-FINDCHIRP phase convention; inverting phases. '
                 'This is currently the default and it is appropriate for '
                 'gstlal and MBTA but not pycbc as of observing run 1 ("O1"). '
                 'The default setting is likely to change in the future.')
        snr_series = np.conj(snr_series)

    # Center times of arrival and compute GMST at mean arrival time.
    # Pre-center in integer nanoseconds to preserve precision of
    # initial datatype.
    epoch = sum(toas_ns) // len(toas_ns)
    toas = 1e-9 * (np.asarray(toas_ns) - epoch)
    # FIXME: np.average does not yet support masked arrays.
    # Replace with np.average when numpy 1.13.0 is available.
    mean_toa = np.sum(toas * weights) / np.sum(weights)
    toas -= mean_toa
    epoch += int(np.round(1e9 * mean_toa))
    epoch = lal.LIGOTimeGPS(0, int(epoch))
    gmst = lal.GreenwichMeanSiderealTime(epoch)

    # Translate SNR time series back to time of first sample.
    toas -= 0.5 * (nsamples - 1) * deltaT

    # If minimum distance is not specified, then default to 0 Mpc.
    if min_distance is None:
        min_distance = 0

    # If maximum distance is not specified, then default to the SNR=4
    # horizon distance of the most sensitive detector.
    if max_distance is None:
        max_distance = max(horizons) / 4

    # If prior_distance_power is not specified, then default to 2
    # (p(r) ~ r^2, uniform in volume).
    if prior_distance_power is None:
        prior_distance_power = 2

    # Raise an exception if 0 Mpc is the minimum effective distance and the prior
    # is of the form r**k for k<0
    if min_distance == 0 and prior_distance_power < 0:
        raise ValueError(
            ("Prior is a power law r^k with k={}, " +
             "undefined at min_distance=0").format(prior_distance_power))

    # Rescale distances to horizon distance of most sensitive detector.
    max_horizon = np.max(horizons)
    horizons /= max_horizon
    min_distance /= max_horizon
    max_distance /= max_horizon

    # Time and run sky localization.
    log.debug('starting computationally-intensive section')
    start_time = lal.GPSTimeNow()
    if method == "toa_phoa_snr":
        skymap = Table(
            _sky_map.toa_phoa_snr(min_distance, max_distance,
                                  prior_distance_power, gmst, sample_rate,
                                  toas, snr_series, responses, locations,
                                  horizons))
    elif method == "toa_phoa_snr_mcmc":
        skymap = emcee_sky_map(
            logl=_sky_map.log_likelihood_toa_phoa_snr,
            loglargs=(gmst, sample_rate, toas, snr_series, responses,
                      locations, horizons),
            logp=toa_phoa_snr_log_prior,
            logpargs=(min_distance, max_distance, prior_distance_power,
                      max_abs_t),
            xmin=[0, -1, min_distance, -1, 0, 0],
            xmax=[2 * np.pi, 1, max_distance, 1, 2 * np.pi, 2 * max_abs_t],
            nside=nside,
            chain_dump=chain_dump,
            max_horizon=max_horizon * fudge)
    else:
        raise ValueError("Unrecognized method: %s" % method)

    # Convert distance moments to parameters
    distmean = skymap.columns.pop('DISTMEAN')
    diststd = skymap.columns.pop('DISTSTD')
    skymap['DISTMU'], skymap['DISTSIGMA'], skymap['DISTNORM'] = \
        distance.moments_to_parameters(distmean, diststd)

    # Add marginal distance moments
    good = np.isfinite(distmean) & np.isfinite(diststd)
    prob = (moc.uniq2pixarea(skymap['UNIQ']) * skymap['PROBDENSITY'])[good]
    distmean = distmean[good]
    diststd = diststd[good]
    rbar = (prob * distmean).sum()
    r2bar = (prob * (np.square(diststd) + np.square(distmean))).sum()
    skymap.meta['distmean'] = rbar
    skymap.meta['diststd'] = np.sqrt(r2bar - np.square(rbar))

    # Rescale
    rescale = max_horizon * fudge
    skymap['DISTMU'] *= rescale
    skymap['DISTSIGMA'] *= rescale
    skymap.meta['distmean'] *= rescale
    skymap.meta['diststd'] *= rescale
    skymap['DISTNORM'] /= np.square(rescale)

    end_time = lal.GPSTimeNow()
    log.debug('finished computationally-intensive section')

    # Fill in metadata and return.
    skymap.meta['creator'] = 'BAYESTAR'
    skymap.meta['origin'] = 'LIGO/Virgo'
    skymap.meta['gps_time'] = float(epoch)
    skymap.meta['runtime'] = float(end_time - start_time)
    skymap.meta['instruments'] = {
        sngl_inspiral.ifo
        for sngl_inspiral in sngl_inspirals
    }
    skymap.meta['gps_creation_time'] = end_time

    return skymap
Example #8
0
def ligolw_sky_map(sngl_inspirals,
                   waveform,
                   f_low,
                   min_distance=None,
                   max_distance=None,
                   prior_distance_power=None,
                   method="toa_phoa_snr",
                   psds=None,
                   nside=-1,
                   chain_dump=None,
                   phase_convention='antifindchirp',
                   snr_series=None,
                   enable_snr_series=False):
    """Convenience function to produce a sky map from LIGO-LW rows. Note that
    min_distance and max_distance should be in Mpc.

    Returns a 'NESTED' ordering HEALPix image as a Numpy array.
    """

    # Ensure that sngl_inspiral is either a single template or a list of
    # identical templates
    for key in 'mass1 mass2 spin1x spin1y spin1z spin2x spin2y spin2z'.split():
        if hasattr(sngl_inspirals[0], key):
            value = getattr(sngl_inspirals[0], key)
            if any(value != getattr(_, key) for _ in sngl_inspirals):
                raise ValueError(
                    '{0} field is not the same for all detectors'.format(key))

    ifos = [sngl_inspiral.ifo for sngl_inspiral in sngl_inspirals]

    # Extract SNRs from table.
    snrs = np.asarray([sngl_inspiral.snr for sngl_inspiral in sngl_inspirals])

    # Look up physical parameters for detector.
    detectors = [
        lalsimulation.DetectorPrefixToLALDetector(str(ifo)) for ifo in ifos
    ]
    responses = np.asarray([det.response for det in detectors])
    locations = np.asarray([det.location for det in detectors])

    # Power spectra for each detector.
    if psds is None:
        psds = [timing.get_noise_psd_func(ifo) for ifo in ifos]

    H = filter.sngl_inspiral_psd(sngl_inspirals[0], waveform, f_min=f_low)
    HS = [filter.signal_psd_series(H, S) for S in psds]

    # Signal models for each detector.
    signal_models = [timing.SignalModel(_) for _ in HS]

    # Get SNR=1 horizon distances for each detector.
    horizons = np.asarray([
        signal_model.get_horizon_distance() for signal_model in signal_models
    ])

    weights = [
        1 / np.square(signal_model.get_crb_toa_uncert(snr))
        for signal_model, snr in zip(signal_models, snrs)
    ]

    # Center detector array.
    locations -= np.average(locations, weights=weights, axis=0)

    if enable_snr_series:
        log.warn(
            'Enabling input of SNR time series. This feature is UNREVIEWED.')
    else:
        snr_series = None

    if snr_series is None:
        log.warn(
            "No SNR time series found, so we are creating a zero-noise "
            "SNR time series from the whitened template's autocorrelation "
            "sequence. The sky localization uncertainty may be "
            "underestimated.")

        # Maximum barycentered arrival time error:
        # |distance from array barycenter to furthest detector| / c + 5 ms.
        # For LHO+LLO, this is 15.0 ms.
        # For an arbitrary terrestrial detector network, the maximum is 26.3 ms.
        max_abs_t = np.max(
            np.sqrt(np.sum(np.square(locations / lal.C_SI), axis=1))) + 0.005

        acors, sample_rates = zip(
            *[filter.autocorrelation(_, max_abs_t) for _ in HS])
        sample_rate = sample_rates[0]
        deltaT = 1 / sample_rate
        nsamples = len(acors[0])
        assert all(sample_rate == _ for _ in sample_rates)
        assert all(nsamples == len(_) for _ in acors)
        nsamples = nsamples * 2 - 1

        snr_series = []
        for acor, sngl in zip(acors, sngl_inspirals):
            series = lal.CreateCOMPLEX8TimeSeries('fake SNR', 0, 0, deltaT,
                                                  lal.StrainUnit, nsamples)
            series.epoch = sngl.end - 0.5 * (nsamples - 1) * deltaT
            acor = np.concatenate((np.conj(acor[:0:-1]), acor))
            if phase_convention.lower() == 'antifindchirp':
                # The matched filter phase convention does NOT affect the
                # template autocorrelation sequence; however it DOES affect
                # the maximum-likelihood phase estimate AND the SNR time series.
                # So if we are going to apply the anti-findchirp phase
                # correction later, we'll have to apply a complex conjugate to
                # the autocorrelation sequence to cancel it here.
                acor = np.conj(acor)
            series.data.data = sngl.snr * filter.exp_i(sngl.coa_phase) * acor
            snr_series.append(series)

    # Ensure that all of the SNR time series have the same sample rate.
    # FIXME: for now, the Python wrapper expects all of the SNR time sries to
    # also be the same length.
    deltaT = snr_series[0].deltaT
    sample_rate = 1 / deltaT
    if any(deltaT != series.deltaT for series in snr_series):
        raise ValueError(
            'BAYESTAR does not yet support SNR time series with mixed sample rates'
        )

    # FIXME: for now, the Python wrapper expects all of the SNR time sries to
    # also be the same length.
    nsamples = len(snr_series[0].data.data)
    if any(nsamples != len(series.data.data) for series in snr_series):
        raise ValueError(
            'BAYESTAR does not yet support SNR time series of mixed lengths')

    # Perform sanity checks that the middle sample of the SNR time series match
    # the sngl_inspiral records.
    for sngl_inspiral, series in zip(sngl_inspirals, snr_series):
        if np.abs(0.5 * (nsamples - 1) * series.deltaT +
                  float(series.epoch - sngl_inspiral.end)) >= 0.5 * deltaT:
            raise ValueError(
                'BAYESTAR expects the SNR time series to be centered on the sngl_inspiral end times'
            )

    # Extract the TOAs in GPS nanoseconds from the SNR time series, assuming
    # that the trigger happened in the middle.
    toas_ns = [
        series.epoch.ns() + 1e9 * 0.5 *
        (len(series.data.data) - 1) * series.deltaT for series in snr_series
    ]

    # Collect all of the SNR series in one array.
    snr_series = np.vstack([series.data.data for series in snr_series])

    # Fudge factor for excess estimation error in gstlal_inspiral.
    fudge = 0.83
    snr_series *= fudge

    # If using 'findchirp' phase convention rather than gstlal/mbta,
    # then flip signs of phases.
    if phase_convention.lower() == 'antifindchirp':
        log.warn('Using anti-FINDCHIRP phase convention; inverting phases. '
                 'This is currently the default and it is appropriate for '
                 'gstlal and MBTA but not pycbc as of observing run 1 ("O1"). '
                 'The default setting is likely to change in the future.')
        snr_series = np.conj(snr_series)

    # Center times of arrival and compute GMST at mean arrival time.
    # Pre-center in integer nanoseconds to preserve precision of
    # initial datatype.
    epoch = sum(toas_ns) // len(toas_ns)
    toas = 1e-9 * (np.asarray(toas_ns) - epoch)
    mean_toa = np.average(toas, weights=weights, axis=0)
    toas -= mean_toa
    epoch += int(np.round(1e9 * mean_toa))
    epoch = lal.LIGOTimeGPS(0, int(epoch))
    gmst = lal.GreenwichMeanSiderealTime(epoch)

    # Translate SNR time series back to time of first sample.
    toas -= 0.5 * (nsamples - 1) * deltaT

    # If minimum distance is not specified, then default to 0 Mpc.
    if min_distance is None:
        min_distance = 0

    # If maximum distance is not specified, then default to the SNR=4
    # horizon distance of the most sensitive detector.
    if max_distance is None:
        max_distance = max(horizons) / 4

    # If prior_distance_power is not specified, then default to 2
    # (p(r) ~ r^2, uniform in volume).
    if prior_distance_power is None:
        prior_distance_power = 2

    # Raise an exception if 0 Mpc is the minimum effective distance and the prior
    # is of the form r**k for k<0
    if min_distance == 0 and prior_distance_power < 0:
        raise ValueError(
            ("Prior is a power law r^k with k={}, " +
             "undefined at min_distance=0").format(prior_distance_power))

    # Rescale distances to horizon distance of most sensitive detector.
    max_horizon = np.max(horizons)
    horizons /= max_horizon
    min_distance /= max_horizon
    max_distance /= max_horizon

    # Use KDE for density estimation?
    if method.endswith('_kde'):
        method = method[:-4]
        kde = True
    else:
        kde = False

    # Time and run sky localization.
    start_time = time.time()
    if method == "toa_phoa_snr":
        prob = _sky_map.toa_phoa_snr(min_distance, max_distance,
                                     prior_distance_power, gmst, sample_rate,
                                     toas, snr_series, responses, locations,
                                     horizons, nside).T
    elif method == "toa_phoa_snr_mcmc":
        prob = emcee_sky_map(
            logl=_sky_map.log_likelihood_toa_phoa_snr,
            loglargs=(gmst, sample_rate, toas, snr_series, responses,
                      locations, horizons),
            logp=toa_phoa_snr_log_prior,
            logpargs=(min_distance, max_distance, prior_distance_power,
                      max_abs_t),
            xmin=[0, -1, min_distance, -1, 0, 0],
            xmax=[2 * np.pi, 1, max_distance, 1, 2 * np.pi, 2 * max_abs_t],
            nside=nside,
            kde=kde,
            chain_dump=chain_dump,
            max_horizon=max_horizon)
    else:
        raise ValueError("Unrecognized method: %s" % method)
    prob[1] *= max_horizon * fudge
    prob[2] *= max_horizon * fudge
    prob[3] /= np.square(max_horizon * fudge)
    end_time = time.time()

    # Find elapsed run time.
    elapsed_time = end_time - start_time

    # Done!
    return prob, epoch, elapsed_time
def simulate_snr(ra,
                 dec,
                 psi,
                 inc,
                 distance,
                 epoch,
                 gmst,
                 H,
                 S,
                 response,
                 location,
                 measurement_error='zero-noise'):
    from scipy.interpolate import interp1d

    from ..bayestar import filter
    from ..bayestar.interpolation import interpolate_max

    duration = 0.1

    # Calculate whitened template autocorrelation sequence.
    HS = filter.signal_psd_series(H, S)
    n = len(HS.data.data)
    acor, sample_rate = filter.autocorrelation(HS, duration)

    # Calculate time, amplitude, and phase.
    u = np.cos(inc)
    u2 = np.square(u)
    signal_model = filter.SignalModel(HS)
    horizon = signal_model.get_horizon_distance()
    Fplus, Fcross = lal.ComputeDetAMResponse(response, ra, dec, psi, gmst)
    toa = lal.TimeDelayFromEarthCenter(location, ra, dec, epoch)
    z = (0.5 * (1 + u2) * Fplus + 1j * u * Fcross) * horizon / distance

    # Calculate complex autocorrelation sequence.
    snr_series = z * np.concatenate((acor[:0:-1].conj(), acor))

    # If requested, add noise.
    if measurement_error == 'gaussian-noise':
        sigmasq = 4 * np.sum(HS.deltaF * np.abs(HS.data.data))
        amp = 4 * n * HS.deltaF**0.5 * np.sqrt(HS.data.data / sigmasq)
        N = lal.CreateCOMPLEX16FrequencySeries('', HS.epoch, HS.f0, HS.deltaF,
                                               HS.sampleUnits, n)
        N.data.data = amp * (np.random.randn(n) + 1j * np.random.randn(n))
        noise_term, sample_rate_2 = filter.autocorrelation(N,
                                                           2 * duration -
                                                           1 / sample_rate,
                                                           normalize=False)
        assert sample_rate == sample_rate_2
        snr_series += noise_term

    # Shift SNR series to the nearest sample.
    int_samples, frac_samples = divmod(
        (1e-9 * epoch.gpsNanoSeconds + toa) * sample_rate, 1)
    if frac_samples > 0.5:
        int_samples += 1
        frac_samples -= 1
    epoch = lal.LIGOTimeGPS(epoch.gpsSeconds, 0)
    n = len(acor) - 1
    mprime = np.arange(-n, n + 1)
    m = mprime + frac_samples
    re, im = (interp1d(m, x, kind='cubic', bounds_error=False,
                       fill_value=0)(mprime)
              for x in (snr_series.real, snr_series.imag))
    snr_series = re + 1j * im

    # Find the trigger values.
    i_nearest = np.argmax(np.abs(
        snr_series[n - n // 2:n + n // 2 + 1])) + n - n // 2
    i_interp, z_interp = interpolate_max(i_nearest,
                                         snr_series,
                                         n // 2,
                                         method='lanczos')
    toa = epoch + (int_samples + i_interp - n) / sample_rate
    snr = np.abs(z_interp)
    phase = np.angle(z_interp)

    # Shift and truncate the SNR time series.
    epoch += (int_samples + i_nearest - n - n // 2) / sample_rate
    snr_series = snr_series[(i_nearest - n // 2):(i_nearest + n // 2 + 1)]
    tseries = lal.CreateCOMPLEX8TimeSeries('snr', epoch, 0, 1 / sample_rate,
                                           lal.DimensionlessUnit,
                                           len(snr_series))
    tseries.data.data = snr_series
    return horizon, snr, phase, toa, tseries