コード例 #1
0
ファイル: postprocess.py プロジェクト: llondon6/lalsuite-mmrd
def get_detector_pair_axis(ifo1, ifo2, gmst):
    """Find the sky position where the line between two detectors pierces the
    celestial sphere.

    Parameters
    ----------

    ifo1 : str or `~lal.Detector` or `~np.ndarray`
        The first detector; either the name of the detector (e.g. `'H1'`), or a
        `lal.Detector` object (e.g., as returned by
        `lalsimulation.DetectorPrefixToLALDetector('H1')` or the geocentric
        Cartesian position of the detection in meters.

    ifo2 : str or `~lal.Detector` or `~np.ndarray`
        The second detector; same as described above.

    gmst : float
        The Greenwich mean sidereal time in radians, as returned by
        `lal.GreenwichMeanSiderealTime`.

    Returns
    -------

    pole_ra : float
        The right ascension in radians at which a ray from `ifo1` to `ifo2`
        would pierce the celestial sphere.

    pole_dec : float
        The declination in radians at which a ray from `ifo1` to `ifo2` would
        pierce the celestial sphere.

    light_travel_time : float
        The light travel time from `ifo1` to `ifo2` in seconds.
    """

    # Get location of detectors if ifo1, ifo2 are LAL detector structs
    try:
        ifo1 = lalsimulation.DetectorPrefixToLALDetector(ifo1)
    except TypeError:
        pass
    try:
        ifo1 = ifo1.location
    except AttributeError:
        pass
    try:
        ifo2 = lalsimulation.DetectorPrefixToLALDetector(ifo2)
    except TypeError:
        pass
    try:
        ifo2 = ifo2.location
    except AttributeError:
        pass

    n = ifo2 - ifo1
    light_travel_time = np.sqrt(np.sum(np.square(n))) / lal.C_SI
    (theta, ), (phi, ) = hp.vec2ang(n)
    pole_ra = (gmst + phi) % (2 * np.pi)
    pole_dec = 0.5 * np.pi - theta
    return pole_ra, pole_dec, light_travel_time
コード例 #2
0
ファイル: detectors.py プロジェクト: canocy/pyburst
    def __init__(self, detector):
        """
        detector  -- label string of the detector
        descriptor -- LAL descriptor
        location -- geographic location of the detector
        response -- response matrix

        """
        self.name = detector
        self.descriptor = lal.CachedDetectors[DETECTOR_SITES[detector]]
        self.location = lalsimulation.DetectorPrefixToLALDetector(
            detector).location
        self.response = lalsimulation.DetectorPrefixToLALDetector(
            detector).response
コード例 #3
0
ファイル: detector.py プロジェクト: bhooshan-gadre/pycbc-dev
    def __init__(self, detector_name, reference_time=1126259462.0):
        """ Create class representing a gravitational-wave detector

        Parameters
        ----------
        detector_name: str
            The two character detector string, i.e. H1, L1, V1, K1, I1
        reference_time: float
            Default is time of GW150914. In this case, the earth's rotation
        will be estimated from a reference time. If 'None', we will
        calculate the time for each gps time requested explicitly
        using a slower but higher precision method.

        """
        self.name = str(detector_name)
        self.frDetector = lalsimulation.DetectorPrefixToLALDetector(self.name)
        self.response = self.frDetector.response
        self.location = self.frDetector.location
        self.latitude = self.frDetector.frDetector.vertexLatitudeRadians
        self.longitude = self.frDetector.frDetector.vertexLongitudeRadians

        self.reference_time = reference_time
        if reference_time is not None:
            self.sday = float(sday.si.scale)
            self.gmst_reference = gmst_accurate(reference_time)
コード例 #4
0
 def __init__(self, detector_name):
     self.name = str(detector_name)
     self.frDetector = lalsimulation.DetectorPrefixToLALDetector(self.name)
     self.response = self.frDetector.response
     self.location = self.frDetector.location
     self.latitude = self.frDetector.frDetector.vertexLatitudeRadians
     self.longitude = self.frDetector.frDetector.vertexLongitudeRadians
コード例 #5
0
def make_bbh(hp, hc, fs, ra, dec, psi, det):
    """
    turns hplus and hcross into a detector output
    applies antenna response and
    and applies correct time delays to each detector
    """

    # make basic time vector
    tvec = np.arange(len(hp)) / float(fs)

    # compute antenna response and apply
    Fp, Fc, _, _ = antenna.response(0.0, ra, dec, 0, psi, 'radians', det)
    ht = hp * Fp + hc * Fc  # overwrite the timeseries vector to reuse it

    # compute time delays relative to Earth centre
    frDetector = lalsimulation.DetectorPrefixToLALDetector(det)
    tdelay = lal.TimeDelayFromEarthCenter(frDetector.location, ra, dec, 0.0)
    print '{}: computed {} Earth centre time delay = {}'.format(
        time.asctime(), det, tdelay)
    # interpolate to get time shifted signal
    ht_tck = interpolate.splrep(tvec, ht, s=0)
    hp_tck = interpolate.splrep(tvec, hp, s=0)
    hc_tck = interpolate.splrep(tvec, hc, s=0)
    tnew = tvec + tdelay
    new_ht = interpolate.splev(tnew, ht_tck, der=0, ext=1)
    new_hp = interpolate.splev(tnew, hp_tck, der=0, ext=1)
    new_hc = interpolate.splev(tnew, hc_tck, der=0, ext=1)

    return new_ht, new_hp, new_hc
コード例 #6
0
ファイル: gw_template_maker.py プロジェクト: jmcginn/GenNet
def make_bbh(hp, hc, fs, ra, dec, psi, det):
    """ Turns hplus and hcross into a detector output
    applies antenna response and
    and applies correct time delays to each detector

    Parameters
    ----------
    hp:
        h-plus version of GW waveform
    hc:
        h-cross version of GW waveform
    fs:
        sampling frequency
    ra:
        right ascension
    dec:
        declination
    psi:
        polarization angle        
    det:
        detector

    Returns
    -------
    ht:
        combined h-plus and h-cross version of waveform
    hp:
        h-plus version of GW waveform 
    hc:
        h-cross version of GW waveform
    """
    # make basic time vector
    tvec = np.arange(len(hp)) / float(fs)

    # compute antenna response and apply
    Fp, Fc, _, _ = antenna.response(float(event_time), ra, dec, 0, psi,
                                    'radians', det)
    ht = hp * Fp + hc * Fc  # overwrite the timeseries vector to reuse it

    # compute time delays relative to Earth centre
    frDetector = lalsimulation.DetectorPrefixToLALDetector(det)
    tdelay = lal.TimeDelayFromEarthCenter(frDetector.location, ra, dec,
                                          float(event_time))
    if verb:
        print '{}: computed {} Earth centre time delay = {}'.format(
            time.asctime(), det, tdelay)

    # interpolate to get time shifted signal
    ht_tck = interpolate.splrep(tvec, ht, s=0)
    hp_tck = interpolate.splrep(tvec, hp, s=0)
    hc_tck = interpolate.splrep(tvec, hc, s=0)
    if gw_tmp: tnew = tvec - tdelay
    else:
        tnew = tvec - tdelay  # + (np.random.uniform(low=-0.037370920181274414,high=0.0055866241455078125))
    new_ht = interpolate.splev(tnew, ht_tck, der=0, ext=1)
    new_hp = interpolate.splev(tnew, hp_tck, der=0, ext=1)
    new_hc = interpolate.splev(tnew, hc_tck, der=0, ext=1)

    return ht, hp, hc
コード例 #7
0
def inject(hplus, hcross, ifo, psd):
    # Generate colored noise
    x = filter.colored_noise(epoch, data_duration, sample_rate, psd)

    # Project injection for this detector.
    detector = lalsimulation.DetectorPrefixToLALDetector(ifo)
    lalsimulation.SimInjectDetectorStrainREAL8TimeSeries(
        x, hplus, hcross, ra, dec, psi, detector, None)

    # Done!
    return x
コード例 #8
0
def compute_arrival_time_at_detector(det, RA, DEC, tref):
    """
    Function to compute the time of arrival at a detector
    from the time of arrival at the geocenter.

    'det' is a detector prefix string (e.g. 'H1')
    'RA' and 'DEC' are right ascension and declination (in radians)
    'tref' is the reference time at the geocenter.  It can be either a float (in which case the return is a float) or a GPSTime object (in which case it returns a GPSTime)
    """
    detector = lalsim.DetectorPrefixToLALDetector(det)
    # if tref is a float or a GPSTime object,
    # it shoud be automagically converted in the appropriate way
    return tref + lal.TimeDelayFromEarthCenter(detector.location, RA, DEC, tref)
コード例 #9
0
def _get_detector_location(ifo):
    if isinstance(ifo, six.string_types):
        try:
            import lalsimulation
        except ImportError:
            raise RuntimeError('Looking up detectors by name '
                               'requires the lalsimulation package.')
        ifo = lalsimulation.DetectorPrefixToLALDetector(ifo)
    try:
        ifo = ifo.location
    except AttributeError:
        pass
    return ifo
コード例 #10
0
def generate_for_detector(source, ifos, sample_rate, epoch, distance,
                          total_mass, ra, dec, psi):
    '''
    Generate an injection for a given waveform.
    Parameters
    ----------
    source : ``minke.Source`` object
       The source which should generate the waveform.
    ifos : list
       A list of interferometer initialisms, e.g. ['L1', 'H1']
    sample_rate : int
       The sample rate in hertz for the generated signal.
    epoch : str
       The epoch (start time) for the injection.
       Note that this should be provided as a string to prevent overflows.
    distance : float
       The distance for the injection, in megaparsecs.
    total_mass : float
       The total mass for the injected signal.
    ra : float
        The right ascension of the source, in radians.
    dec : float
        The declination of the source, in radians.
    psi : float
        The polarisation angle of the source, in radians. '''

    nr_waveform = source.datafile
    data = {}
    data['data'] = {}
    data['times'] = {}
    data['meta'] = {}
    data['epoch'] = epoch
    data['meta']['ra'] = ra
    data['meta']['dec'] = dec
    data['meta']['psi'] = psi
    data['meta']['distance'] = distance
    data['meta']['total_mass'] = total_mass
    data['meta']['sample rate'] = sample_rate
    data['meta']['waveform'] = nr_waveform

    for ifo in ifos:
        det = lalsimulation.DetectorPrefixToLALDetector(ifo)
        hp, hx = source._generate(half=True, epoch=epoch, rate=sample_rate)[:2]
        h_tot = lalsimulation.SimDetectorStrainREAL8TimeSeries(
            hp, hx, ra, dec, psi, det)
        data['data'][ifo] = h_tot.data.data.tolist()
        data['times'][ifo] = np.linspace(0,
                                         len(h_tot.data.data) * h_tot.deltaT,
                                         len(h_tot.data.data)).tolist()

    return data
コード例 #11
0
def complex_antenna_factor(det, RA, DEC, psi, tref):
    """
    Function to compute the complex-valued antenna pattern function:
    F+ + i Fx

    'det' is a detector prefix string (e.g. 'H1')
    'RA' and 'DEC' are right ascension and declination (in radians)
    'psi' is the polarization angle
    'tref' is the reference GPS time
    """
    detector = lalsim.DetectorPrefixToLALDetector(det)
    Fp, Fc = lal.ComputeDetAMResponse(detector.response, RA, DEC, psi, lal.GreenwichMeanSiderealTime(tref))

    return Fp + 1j * Fc
    def __pD_zH0(self, H0):
        """
        Detection probability over a range of redshifts and H0s,
        returned as an interpolated function.

        Parameters
        ----------
        H0 : float
            value of Hubble constant in kms-1Mpc-1

        Returns
        -------
        interpolated probabilities of detection over an array of
        luminosity distances, for a specific value of H0
        """
        lal_detectors = [lalsim.DetectorPrefixToLALDetector(name)
                                for name in self.detectors]
        
        network_rhosq = np.zeros((self.Nsamps, 1))
        prob = np.zeros(len(self.z_array))
        i=0
        bar = progressbar.ProgressBar()
        for z in bar(self.z_array):
            dl = self.cosmo.dl_zH0(z, H0)
            factor=1+z
            for n in range(self.Nsamps):
                if self.full_waveform is True: 
                    hp,hc = self.simulate_waveform(factor*self.m1[n], factor*self.m2[n], dl, self.incs[n], self.phis[n])
                    rhosqs = [self.snr_squared_waveform(hp,hc,self.RAs[n],self.Decs[n],self.psis[n], 0., det)
                              for det in self.detectors]

                else:
                    rhosqs = [self.__snr_squared(self.RAs[n], self.Decs[n],
                              self.m1[n], self.m2[n], self.incs[n], self.psis[n],
                              det, 0.0, self.z_array[i], H0)
                              for det in self.detectors]
                network_rhosq[n] = np.sum(rhosqs)

            survival = ncx2.sf(self.snr_threshold**2, 2*len(self.detectors), network_rhosq)  
            prob[i] = np.sum(survival, 0)/self.Nsamps
            i+=1
            
        return prob
コード例 #13
0
def vector_complex_antenna_factor(rsp, RA, DEC, psi, tref):
    # Everything is now an array except rsp whcih is
    # the same as it used to be.

    detector = lalsim.DetectorPrefixToLALDetector(rsp)
    rsp = detector.response

    RA = np.array(RA, copy=False, ndmin=1)
    DEC = np.array(DEC, copy=False, ndmin=1)
    psi = np.array(psi, copy=False, ndmin=1)
    tref = np.array(tref, copy=False, ndmin=1)

    N = len(RA)
    # Hour angle

    gha = np.array(tref - RA, dtype=float)

    cosgha = np.cos(gha)
    singha = np.sin(gha)
    cosdec = np.cos(DEC)
    sindec = np.sin(DEC)
    cospsi = np.cos(psi)
    sinpsi = np.sin(psi)

    X = np.zeros((3, N))
    Y = np.zeros((3, N))

    X[0, :] = -cospsi * singha - sinpsi * cosgha * sindec
    X[1, :] = -cospsi * cosgha + sinpsi * singha * sindec
    X[2, :] = sinpsi * cosdec

    Y[0, :] = sinpsi * singha - cospsi * cosgha * sindec
    Y[1, :] = sinpsi * cosgha + cospsi * singha * sindec
    Y[2, :] = cospsi * cosdec
    Fp, Fc = np.zeros(N), np.zeros(N)
    Fp = np.einsum('ji,jk,ki->i', X, rsp, X) - np.einsum(
        'ji,jk,ki->i', Y, rsp, Y)
    Fc = np.einsum('ji,jk,ki->i', Y, rsp, X) + np.einsum(
        'ji,jk,ki->i', X, rsp, Y)

    return Fp + 1j * Fc
コード例 #14
0
def vector_compute_arrival_time_at_detector(det, RA, DEC, tref, tref_geo=None):
    """
    Transfer a geocentered reference time 'tref' to the detector-based arrival time as a function of sky position.
    If the treef_geo argument is specified, then the tref_geo is used to calculate an hour angle as provided by
    `XLALGreenwichMeanSiderealTime`. This can significantly speed up the function with a sacrifice (likely minimal)
    of accuracy.
    """
    RA = np.array(RA, copy=False, ndmin=1)
    DEC = np.array(DEC, copy=False, ndmin=1)
    tref = np.array(tref, copy=False, ndmin=1)

    # Calculate hour angle
    if tref_geo is None:
        time = np.array([lal.GreenwichMeanSiderealTime(t) for t in tref],
                        dtype=float)
    else:
        # FIXME: Could be called once and moved outside
        time = lal.GreenwichMeanSiderealTime(tref_geo)
    hr_angle = np.array(time - RA, dtype=float)

    DEC = np.array(DEC, dtype=float)

    # compute spherical coordinate position of the detector
    # based on hour angle and declination
    cos_hr = np.cos(hr_angle)
    sin_hr = np.sin(hr_angle)

    cos_dec = np.cos(DEC)
    sin_dec = np.sin(DEC)
    # compute source vector
    source_xyz = np.array([cos_dec * cos_hr, cos_dec * -sin_hr, sin_dec])
    # get the detector vector
    # must be careful - the C function is designed to compute the
    # light time delay between two detectors as \vec{det2} - \vec{det1}.
    # But timeDelay.c gets the earth center time delay by passing
    # {0., 0., 0.} as the 2nd arg. So det_xyz needs an extra - sign.
    # FIXME: Could be called once and moved outside
    det_xyz = -lalsim.DetectorPrefixToLALDetector(det).location

    return tref + np.dot(np.transpose(det_xyz), source_xyz) / lal.C_SI
    def __Fcross(self, detector, RA, Dec, psi, gmst):
        """
        Computes the 'plus' antenna pattern

        Parameters
        ----------
        detector : str
            name of detector in network (eg 'H1', 'L1')
        RA,Dec : float
            sky location of the event in radians
        psi : float
            source polarisation in radians
        gmst : float
            Greenwich Mean Sidereal Time in seconds

        Returns
        -------
        float
            F_x antenna response
        """
        detector = lalsim.DetectorPrefixToLALDetector(detector)
        return lal.ComputeDetAMResponse(detector.response, RA,
                                        Dec, psi, gmst)[1]
コード例 #16
0
ファイル: detector.py プロジェクト: rahuldhurkunde/pycbc
    def __init__(self, detector_name, reference_time=1126259462.0):
        """ Create class representing a gravitational-wave detector
        Parameters
        ----------
        detector_name: str
            The two-character detector string, i.e. H1, L1, V1, K1, I1
        reference_time: float
            Default is time of GW150914. In this case, the earth's rotation
        will be estimated from a reference time. If 'None', we will
        calculate the time for each gps time requested explicitly
        using a slower but higher precision method.
        """
        self.name = str(detector_name)

        if detector_name in [pfx for pfx, name in get_available_detectors()]:
            import lalsimulation as lalsim
            self._lal = lalsim.DetectorPrefixToLALDetector(self.name)
            self.response = self._lal.response
            self.location = self._lal.location
        elif detector_name in _custom_ground_detectors:
            self.info = _custom_ground_detectors[detector_name]
            self.response = self.info['response']
            self.location = self.info['location']
        else:
            raise ValueError("Unkown detector {}".format(detector_name))

        loc = coordinates.EarthLocation(self.location[0],
                                        self.location[1],
                                        self.location[2],
                                        unit=meter)
        self.latitude = loc.lat.rad
        self.longitude = loc.lon.rad

        self.reference_time = reference_time
        self.sday = None
        self.gmst_reference = None
コード例 #17
0
    def test_antenna_pattern_vs_lal(self):
        gmst = lal.GreenwichMeanSiderealTime(self.gpstime)
        f_bilby = np.zeros((self.trial, 6))
        f_lal = np.zeros((self.trial, 6))

        for n, ifo_name in enumerate(self.ifo_names):
            response = lalsimulation.DetectorPrefixToLALDetector(
                self.lal_prefixes[ifo_name]).response
            ifo = self.ifos[n]
            for i in range(self.trial):
                ra = 2. * np.pi * np.random.uniform()
                dec = np.pi * np.random.uniform() - np.pi / 2.
                psi = np.pi * np.random.uniform()
                f_lal[i] = lal.ComputeDetAMResponseExtraModes(
                    response, ra, dec, psi, gmst)
                for m, pol in enumerate(self.polarizations):
                    f_bilby[i,
                            m] = ifo.antenna_response(ra, dec, self.gpstime,
                                                      psi, pol)

            std = np.std(f_bilby - f_lal, axis=0)
            for m, pol in enumerate(self.polarizations):
                with self.subTest(':'.join((ifo_name, pol))):
                    self.assertAlmostEqual(std[m], 0.0, places=7)
コード例 #18
0
data_dict_T ={}
fig_list = {}
indx = 0
t_ref = wfP.P.tref   # This SHOULD NOT BE USED, if you want to time-align your signal
if opts.t_ref:
    t_ref = float(opts.t_ref)
else:
    print(" Warning: no reference time, it will be very difficult to time-align your signals ")
for ifo in ['H1', 'L1', 'V1']:
    indx += 1
    fig_list[ifo] = indx
    plt.figure(indx)
    wfP.P.detector = ifo
    data_dict_T[ifo] = wfP.real_hoft(no_memory=opts.no_memory,hybrid_use=opts.hybrid_use,hybrid_method=opts.hybrid_method)
    tvals = data_dict_T[ifo].deltaT*np.arange(data_dict_T[ifo].data.length) + float(data_dict_T[ifo].epoch)
    det = lalsim.DetectorPrefixToLALDetector(ifo)
    print(ifo, " T_peak =", P.tref + lal.TimeDelayFromEarthCenter(det.location, P.phi, P.theta, P.tref) - t_ref)
    if opts.verbose:
        plt.plot(tvals - wfP.P.tref, data_dict_T[ifo].data.data)

    np.savetxt(ifo+"_nr_"+group+"_"+str(param)+"_event_"+str(opts.event)+".dat", np.array([tvals -t_ref, data_dict_T[ifo].data.data]).T)

if opts.verbose and not opts.save_plots:
    plt.show()
if opts.save_plots:
    for ifo in  fig_list:
        plt.figure(fig_list[ifo])
        plt.savefig("response-"+str(ifo)+group+"_"+str(param)+fig_extension)
        

コード例 #19
0
ファイル: __init__.py プロジェクト: cui-wenfeng/ligo.skymap
def condition(event,
              waveform='o2-uberbank',
              f_low=30.0,
              enable_snr_series=True,
              f_high_truncate=0.95):

    if len(event.singles) == 0:
        raise ValueError('Cannot localize an event with zero detectors.')

    singles = event.singles
    if not enable_snr_series:
        singles = [single for single in singles if single.snr is not None]

    ifos = [single.detector for single in singles]

    # Extract SNRs from table.
    snrs = np.ma.asarray([
        np.ma.masked if single.snr is None else single.snr
        for single in singles
    ])

    # Look up physical parameters for detector.
    detectors = [
        lalsimulation.DetectorPrefixToLALDetector(str(ifo)) for ifo in ifos
    ]
    responses = np.asarray([det.response for det in detectors])
    locations = np.asarray([det.location for det in detectors]) / lal.C_SI

    # Power spectra for each detector.
    psds = [single.psd for single in singles]
    psds = [
        filter.InterpolatedPSD(filter.abscissa(psd),
                               psd.data.data,
                               f_high_truncate=f_high_truncate) for psd in psds
    ]

    log.debug('calculating templates')
    H = filter.sngl_inspiral_psd(waveform, f_min=f_low, **event.template_args)

    log.debug('calculating noise PSDs')
    HS = [filter.signal_psd_series(H, S) for S in psds]

    # Signal models for each detector.
    log.debug('calculating Fisher matrix elements')
    signal_models = [filter.SignalModel(_) for _ in HS]

    # Get SNR=1 horizon distances for each detector.
    horizons = np.asarray([
        signal_model.get_horizon_distance() for signal_model in signal_models
    ])

    weights = np.ma.asarray([
        1 / np.square(signal_model.get_crb_toa_uncert(snr))
        for signal_model, snr in zip(signal_models, snrs)
    ])

    # Center detector array.
    locations -= (np.sum(locations * weights.reshape(-1, 1), axis=0) /
                  np.sum(weights))

    if enable_snr_series:
        snr_series = [single.snr_series for single in singles]
        if all(s is None for s in snr_series):
            snr_series = None
    else:
        snr_series = None

    # Maximum barycentered arrival time error:
    # |distance from array barycenter to furthest detector| / c + 5 ms.
    # For LHO+LLO, this is 15.0 ms.
    # For an arbitrary terrestrial detector network, the maximum is 26.3 ms.
    max_abs_t = np.max(np.sqrt(np.sum(np.square(locations), axis=1))) + 0.005

    if snr_series is None:
        log.warning("No SNR time series found, so we are creating a "
                    "zero-noise SNR time series from the whitened template's "
                    "autocorrelation sequence. The sky localization "
                    "uncertainty may be underestimated.")

        acors, sample_rates = zip(
            *[filter.autocorrelation(_, max_abs_t) for _ in HS])
        sample_rate = sample_rates[0]
        deltaT = 1 / sample_rate
        nsamples = len(acors[0])
        assert all(sample_rate == _ for _ in sample_rates)
        assert all(nsamples == len(_) for _ in acors)
        nsamples = nsamples * 2 - 1

        snr_series = []
        for acor, single in zip(acors, singles):
            series = lal.CreateCOMPLEX8TimeSeries('fake SNR', 0, 0, deltaT,
                                                  lal.StrainUnit, nsamples)
            series.epoch = single.time - 0.5 * (nsamples - 1) * deltaT
            acor = np.concatenate((np.conj(acor[:0:-1]), acor))
            series.data.data = single.snr * filter.exp_i(single.phase) * acor
            snr_series.append(series)

    # Ensure that all of the SNR time series have the same sample rate.
    # FIXME: for now, the Python wrapper expects all of the SNR time sries to
    # also be the same length.
    deltaT = snr_series[0].deltaT
    sample_rate = 1 / deltaT
    if any(deltaT != series.deltaT for series in snr_series):
        raise ValueError('BAYESTAR does not yet support SNR time series with '
                         'mixed sample rates')

    # Ensure that all of the SNR time series have odd lengths.
    if any(len(series.data.data) % 2 == 0 for series in snr_series):
        raise ValueError('SNR time series must have odd lengths')

    # Trim time series to the desired length.
    max_abs_n = int(np.ceil(max_abs_t * sample_rate))
    desired_length = 2 * max_abs_n - 1
    for i, series in enumerate(snr_series):
        length = len(series.data.data)
        if length > desired_length:
            snr_series[i] = lal.CutCOMPLEX8TimeSeries(
                series, length // 2 + 1 - max_abs_n, desired_length)

    # FIXME: for now, the Python wrapper expects all of the SNR time sries to
    # also be the same length.
    nsamples = len(snr_series[0].data.data)
    if any(nsamples != len(series.data.data) for series in snr_series):
        raise ValueError('BAYESTAR does not yet support SNR time series of '
                         'mixed lengths')

    # Perform sanity checks that the middle sample of the SNR time series match
    # the sngl_inspiral records to the nearest sample (plus the smallest
    # representable LIGOTimeGPS difference of 1 nanosecond).
    for ifo, single, series in zip(ifos, singles, snr_series):
        shift = np.abs(0.5 * (nsamples - 1) * series.deltaT +
                       float(series.epoch - single.time))
        if shift >= deltaT + 1e-8:
            raise ValueError('BAYESTAR expects the SNR time series to be '
                             'centered on the single-detector trigger times, '
                             'but {} was off by {} s'.format(ifo, shift))

    # Extract the TOAs in GPS nanoseconds from the SNR time series, assuming
    # that the trigger happened in the middle.
    toas_ns = [
        series.epoch.ns() + 1e9 * 0.5 *
        (len(series.data.data) - 1) * series.deltaT for series in snr_series
    ]

    # Collect all of the SNR series in one array.
    snr_series = np.vstack([series.data.data for series in snr_series])

    # Center times of arrival and compute GMST at mean arrival time.
    # Pre-center in integer nanoseconds to preserve precision of
    # initial datatype.
    epoch = sum(toas_ns) // len(toas_ns)
    toas = 1e-9 * (np.asarray(toas_ns) - epoch)
    mean_toa = np.average(toas, weights=weights)
    toas -= mean_toa
    epoch += int(np.round(1e9 * mean_toa))
    epoch = lal.LIGOTimeGPS(0, int(epoch))

    # Translate SNR time series back to time of first sample.
    toas -= 0.5 * (nsamples - 1) * deltaT

    return epoch, sample_rate, toas, snr_series, responses, locations, horizons
コード例 #20
0
# Create a CoincMap table.
coinc_map_table = lsctables.New(lsctables.CoincMapTable)
out_xmldoc.childNodes[0].appendChild(coinc_map_table)

# Create a CoincEvent table.
coinc_table = lsctables.New(lsctables.CoincTable)
out_xmldoc.childNodes[0].appendChild(coinc_table)

# Create a CoincInspiral table.
coinc_inspiral_table = lsctables.New(lsctables.CoincInspiralTable)
out_xmldoc.childNodes[0].appendChild(coinc_inspiral_table)

# Precompute values that are common to all simulations.
detectors = [
    lalsimulation.DetectorPrefixToLALDetector(ifo) for ifo in opts.detector
]
responses = [det.response for det in detectors]
locations = [det.location for det in detectors]

for sim_inspiral in progress.iterate(sim_inspiral_table):

    # Unpack some values from the row in the table.
    m1 = sim_inspiral.mass1
    m2 = sim_inspiral.mass2
    f_low = sim_inspiral.f_lower if opts.f_low is None else opts.f_low
    DL = sim_inspiral.distance
    ra = sim_inspiral.longitude
    dec = sim_inspiral.latitude
    inc = sim_inspiral.inclination
    phi = sim_inspiral.coa_phase
コード例 #21
0
ファイル: detector.py プロジェクト: ueno-phys/pycbc
 def __init__(self, detector_name):
     self.name = str(detector_name)
     self.frDetector = lalsimulation.DetectorPrefixToLALDetector(self.name)
     self.response = self.frDetector.response
     self.location = self.frDetector.location
コード例 #22
0
def get_WF_and_snr(inj, PSD, sample_rate, instrument="H1", plot_dir=None):
    "Given an injection row, it computes the WF and the SNR"
    #https://git.ligo.org/lscsoft/gstlal/-/blob/precession_hm-0.2/gstlal-inspiral/bin/gstlal_inspiral_injection_snr
    #PSD should be a lal PSD obj
    assert instrument in ["H1", "L1", "V1"]

    injtime = inj.time_geocent

    sample_rate = 16384.0

    approximant = lalsimulation.GetApproximantFromString(str(inj.waveform))
    f_min = inj.f_lower

    h_plus, h_cross = lalsimulation.SimInspiralTD(m1=inj.mass1 * lal.MSUN_SI,
                                                  m2=inj.mass2 * lal.MSUN_SI,
                                                  S1x=inj.spin1x,
                                                  S1y=inj.spin1y,
                                                  S1z=inj.spin1z,
                                                  S2x=inj.spin2x,
                                                  S2y=inj.spin2y,
                                                  S2z=inj.spin2z,
                                                  distance=inj.distance * 1e6 *
                                                  lal.PC_SI,
                                                  inclination=inj.inclination,
                                                  phiRef=inj.coa_phase,
                                                  longAscNodes=0.0,
                                                  eccentricity=0.0,
                                                  meanPerAno=0.0,
                                                  deltaT=1.0 / sample_rate,
                                                  f_min=f_min,
                                                  f_ref=0.0,
                                                  LALparams=None,
                                                  approximant=approximant)

    h_plus.epoch += injtime
    h_cross.epoch += injtime

    # Compute strain in the chosen detector.
    h = lalsimulation.SimDetectorStrainREAL8TimeSeries(
        h_plus, h_cross, inj.longitude, inj.latitude, inj.polarization,
        lalsimulation.DetectorPrefixToLALDetector(instrument))
    #Compute the SNR
    if PSD is not None:
        snr = lalsimulation.MeasureSNR(h, PSD, options.flow, options.fmax)
    else:
        snr = 0.

    if isinstance(plot_dir, str):
        plt.figure()
        plt.title(
            "(m1, m2, s1 (x,y,z), s2 (x,y,z), d_L) =\n {0:.2f} {1:.2f} {2:.2f} {3:.2f} {4:.2f} {5:.2f} {6:.2f} {7:.2f} {8:.2f} "
            .format(inj.mass1, inj.mass2, inj.spin1x, inj.spin1y, inj.spin1z,
                    inj.spin2x, inj.spin2y, inj.spin2z, inj.distance))
        plt.plot(
            np.linspace(0,
                        len(h_plus.data.data) / sample_rate,
                        len(h_plus.data.data)), h_plus.data.data)
        plt.savefig(plot_dir + '/inj_{}.png'.format(injtime))
        #plt.show()
        plt.close('all')

    return (h.data.data, snr)
コード例 #23
0
def localize(event,
             waveform='o2-uberbank',
             f_low=30.0,
             min_distance=None,
             max_distance=None,
             prior_distance_power=None,
             cosmology=False,
             method='toa_phoa_snr',
             nside=-1,
             chain_dump=None,
             enable_snr_series=True,
             f_high_truncate=0.95):
    """Convenience function to produce a sky map from LIGO-LW rows. Note that
    min_distance and max_distance should be in Mpc.

    Returns a 'NESTED' ordering HEALPix image as a Numpy array.
    """
    frame = inspect.currentframe()
    argstr = inspect.formatargvalues(*inspect.getargvalues(frame))
    start_time = lal.GPSTimeNow()

    singles = event.singles
    if not enable_snr_series:
        singles = [single for single in singles if single.snr is not None]

    ifos = [single.detector for single in singles]

    # Extract SNRs from table.
    snrs = np.ma.asarray([
        np.ma.masked if single.snr is None else single.snr
        for single in singles
    ])

    # Look up physical parameters for detector.
    detectors = [
        lalsimulation.DetectorPrefixToLALDetector(str(ifo)) for ifo in ifos
    ]
    responses = np.asarray([det.response for det in detectors])
    locations = np.asarray([det.location for det in detectors])

    # Power spectra for each detector.
    psds = [single.psd for single in singles]
    psds = [
        timing.InterpolatedPSD(filter.abscissa(psd),
                               psd.data.data,
                               f_high_truncate=f_high_truncate) for psd in psds
    ]

    log.debug('calculating templates')
    H = filter.sngl_inspiral_psd(waveform, f_min=f_low, **event.template_args)

    log.debug('calculating noise PSDs')
    HS = [filter.signal_psd_series(H, S) for S in psds]

    # Signal models for each detector.
    log.debug('calculating Fisher matrix elements')
    signal_models = [timing.SignalModel(_) for _ in HS]

    # Get SNR=1 horizon distances for each detector.
    horizons = np.asarray([
        signal_model.get_horizon_distance() for signal_model in signal_models
    ])

    weights = np.ma.asarray([
        1 / np.square(signal_model.get_crb_toa_uncert(snr))
        for signal_model, snr in zip(signal_models, snrs)
    ])

    # Center detector array.
    locations -= np.sum(locations * weights.reshape(-1, 1),
                        axis=0) / np.sum(weights)

    if cosmology:
        log.warn('Enabling cosmological prior. ' 'This feature is UNREVIEWED.')

    if enable_snr_series:
        log.warn('Enabling input of SNR time series. '
                 'This feature is UNREVIEWED.')
        snr_series = [single.snr_series for single in singles]
        if all(s is None for s in snr_series):
            snr_series = None
    else:
        snr_series = None

    # Maximum barycentered arrival time error:
    # |distance from array barycenter to furthest detector| / c + 5 ms.
    # For LHO+LLO, this is 15.0 ms.
    # For an arbitrary terrestrial detector network, the maximum is 26.3 ms.
    max_abs_t = np.max(np.sqrt(np.sum(np.square(locations / lal.C_SI),
                                      axis=1))) + 0.005

    if snr_series is None:
        log.warn(
            "No SNR time series found, so we are creating a zero-noise "
            "SNR time series from the whitened template's autocorrelation "
            "sequence. The sky localization uncertainty may be "
            "underestimated.")

        acors, sample_rates = zip(
            *[filter.autocorrelation(_, max_abs_t) for _ in HS])
        sample_rate = sample_rates[0]
        deltaT = 1 / sample_rate
        nsamples = len(acors[0])
        assert all(sample_rate == _ for _ in sample_rates)
        assert all(nsamples == len(_) for _ in acors)
        nsamples = nsamples * 2 - 1

        snr_series = []
        for acor, single in zip(acors, singles):
            series = lal.CreateCOMPLEX8TimeSeries('fake SNR', 0, 0, deltaT,
                                                  lal.StrainUnit, nsamples)
            series.epoch = single.time - 0.5 * (nsamples - 1) * deltaT
            acor = np.concatenate((np.conj(acor[:0:-1]), acor))
            series.data.data = single.snr * filter.exp_i(single.phase) * acor
            snr_series.append(series)

    # Ensure that all of the SNR time series have the same sample rate.
    # FIXME: for now, the Python wrapper expects all of the SNR time sries to
    # also be the same length.
    deltaT = snr_series[0].deltaT
    sample_rate = 1 / deltaT
    if any(deltaT != series.deltaT for series in snr_series):
        raise ValueError('BAYESTAR does not yet support SNR time series with '
                         'mixed sample rates')

    # Ensure that all of the SNR time series have odd lengths.
    if any(len(series.data.data) % 2 == 0 for series in snr_series):
        raise ValueError('SNR time series must have odd lengths')

    # Trim time series to the desired length.
    max_abs_n = int(np.ceil(max_abs_t * sample_rate))
    desired_length = 2 * max_abs_n - 1
    for i, series in enumerate(snr_series):
        length = len(series.data.data)
        if length > desired_length:
            snr_series[i] = lal.CutCOMPLEX8TimeSeries(
                series, length // 2 + 1 - max_abs_n, desired_length)

    # FIXME: for now, the Python wrapper expects all of the SNR time sries to
    # also be the same length.
    nsamples = len(snr_series[0].data.data)
    if any(nsamples != len(series.data.data) for series in snr_series):
        raise ValueError('BAYESTAR does not yet support SNR time series of '
                         'mixed lengths')

    # Perform sanity checks that the middle sample of the SNR time series match
    # the sngl_inspiral records. Relax valid interval slightly from
    # +/- 0.5 deltaT to +/- 0.6 deltaT for floating point roundoff error.
    for single, series in zip(singles, snr_series):
        if np.abs(0.5 * (nsamples - 1) * series.deltaT +
                  float(series.epoch - single.time)) >= 0.6 * deltaT:
            raise ValueError('BAYESTAR expects the SNR time series to be '
                             'centered on the single-detector trigger times')

    # Extract the TOAs in GPS nanoseconds from the SNR time series, assuming
    # that the trigger happened in the middle.
    toas_ns = [
        series.epoch.ns() + 1e9 * 0.5 *
        (len(series.data.data) - 1) * series.deltaT for series in snr_series
    ]

    # Collect all of the SNR series in one array.
    snr_series = np.vstack([series.data.data for series in snr_series])

    # Center times of arrival and compute GMST at mean arrival time.
    # Pre-center in integer nanoseconds to preserve precision of
    # initial datatype.
    epoch = sum(toas_ns) // len(toas_ns)
    toas = 1e-9 * (np.asarray(toas_ns) - epoch)
    # FIXME: np.average does not yet support masked arrays.
    # Replace with np.average when numpy 1.13.0 is available.
    mean_toa = np.sum(toas * weights) / np.sum(weights)
    toas -= mean_toa
    epoch += int(np.round(1e9 * mean_toa))
    epoch = lal.LIGOTimeGPS(0, int(epoch))
    gmst = lal.GreenwichMeanSiderealTime(epoch)

    # Translate SNR time series back to time of first sample.
    toas -= 0.5 * (nsamples - 1) * deltaT

    # If minimum distance is not specified, then default to 0 Mpc.
    if min_distance is None:
        min_distance = 0

    # If maximum distance is not specified, then default to the SNR=4
    # horizon distance of the most sensitive detector.
    if max_distance is None:
        max_distance = max(horizons) / 4

    # If prior_distance_power is not specified, then default to 2
    # (p(r) ~ r^2, uniform in volume).
    if prior_distance_power is None:
        prior_distance_power = 2

    # Raise an exception if 0 Mpc is the minimum effective distance and the
    # prior is of the form r**k for k<0
    if min_distance == 0 and prior_distance_power < 0:
        raise ValueError(
            ('Prior is a power law r^k with k={}, '
             'undefined at min_distance=0').format(prior_distance_power))

    # Time and run sky localization.
    log.debug('starting computationally-intensive section')
    if method == 'toa_phoa_snr':
        skymap, log_bci, log_bsn = _sky_map.toa_phoa_snr(
            min_distance, max_distance, prior_distance_power, cosmology, gmst,
            sample_rate, toas, snr_series, responses, locations, horizons)
        skymap = Table(skymap)
        skymap.meta['log_bci'] = log_bci
        skymap.meta['log_bsn'] = log_bsn
    elif method == 'toa_phoa_snr_mcmc':
        skymap = localize_emcee(
            logl=_sky_map.log_likelihood_toa_phoa_snr,
            loglargs=(gmst, sample_rate, toas, snr_series, responses,
                      locations, horizons),
            logp=toa_phoa_snr_log_prior,
            logpargs=(min_distance, max_distance, prior_distance_power,
                      max_abs_t),
            xmin=[0, -1, min_distance, -1, 0, 0],
            xmax=[2 * np.pi, 1, max_distance, 1, 2 * np.pi, 2 * max_abs_t],
            nside=nside,
            chain_dump=chain_dump)
    else:
        raise ValueError('Unrecognized method: %s' % method)

    # Convert distance moments to parameters
    distmean = skymap.columns.pop('DISTMEAN')
    diststd = skymap.columns.pop('DISTSTD')
    skymap['DISTMU'], skymap['DISTSIGMA'], skymap['DISTNORM'] = \
        distance.moments_to_parameters(distmean, diststd)

    # Add marginal distance moments
    good = np.isfinite(distmean) & np.isfinite(diststd)
    prob = (moc.uniq2pixarea(skymap['UNIQ']) * skymap['PROBDENSITY'])[good]
    distmean = distmean[good]
    diststd = diststd[good]
    rbar = (prob * distmean).sum()
    r2bar = (prob * (np.square(diststd) + np.square(distmean))).sum()
    skymap.meta['distmean'] = rbar
    skymap.meta['diststd'] = np.sqrt(r2bar - np.square(rbar))

    log.debug('finished computationally-intensive section')
    end_time = lal.GPSTimeNow()

    # Fill in metadata and return.
    program, _ = os.path.splitext(os.path.basename(sys.argv[0]))
    skymap.meta['creator'] = 'BAYESTAR'
    skymap.meta['origin'] = 'LIGO/Virgo'
    skymap.meta['vcs_info'] = vcs_info
    skymap.meta['gps_time'] = float(epoch)
    skymap.meta['runtime'] = float(end_time - start_time)
    skymap.meta['instruments'] = {single.detector for single in singles}
    skymap.meta['gps_creation_time'] = end_time
    skymap.meta['history'] = [
        '', 'Generated by calling the following Python function:',
        '{}.{}{}'.format(__name__, frame.f_code.co_name, argstr), '',
        'This was the command line that started the program:',
        ' '.join([program] + sys.argv[1:])
    ]

    return skymap
コード例 #24
0
def ligolw_sky_map(sngl_inspirals,
                   waveform,
                   f_low,
                   min_distance=None,
                   max_distance=None,
                   prior_distance_power=None,
                   method="toa_phoa_snr",
                   psds=None,
                   nside=-1,
                   chain_dump=None,
                   phase_convention='antifindchirp',
                   snr_series=None,
                   enable_snr_series=False):
    """Convenience function to produce a sky map from LIGO-LW rows. Note that
    min_distance and max_distance should be in Mpc.

    Returns a 'NESTED' ordering HEALPix image as a Numpy array.
    """

    # Ensure that sngl_inspiral is either a single template or a list of
    # identical templates
    for key in 'mass1 mass2 spin1x spin1y spin1z spin2x spin2y spin2z'.split():
        if hasattr(sngl_inspirals[0], key):
            value = getattr(sngl_inspirals[0], key)
            if any(value != getattr(_, key) for _ in sngl_inspirals):
                raise ValueError(
                    '{0} field is not the same for all detectors'.format(key))

    ifos = [sngl_inspiral.ifo for sngl_inspiral in sngl_inspirals]

    # Extract SNRs from table.
    snrs = np.ma.asarray([
        np.ma.masked if sngl_inspiral.snr is None else sngl_inspiral.snr
        for sngl_inspiral in sngl_inspirals
    ])

    # Look up physical parameters for detector.
    detectors = [
        lalsimulation.DetectorPrefixToLALDetector(str(ifo)) for ifo in ifos
    ]
    responses = np.asarray([det.response for det in detectors])
    locations = np.asarray([det.location for det in detectors])

    # Power spectra for each detector.
    if psds is None:
        psds = [timing.get_noise_psd_func(ifo) for ifo in ifos]

    log.debug('calculating templates')
    H = filter.sngl_inspiral_psd(sngl_inspirals[0], waveform, f_min=f_low)

    log.debug('calculating noise PSDs')
    HS = [filter.signal_psd_series(H, S) for S in psds]

    # Signal models for each detector.
    log.debug('calculating Fisher matrix elements')
    signal_models = [timing.SignalModel(_) for _ in HS]

    # Get SNR=1 horizon distances for each detector.
    horizons = np.asarray([
        signal_model.get_horizon_distance() for signal_model in signal_models
    ])

    weights = np.ma.asarray([
        1 / np.square(signal_model.get_crb_toa_uncert(snr))
        for signal_model, snr in zip(signal_models, snrs)
    ])

    # Center detector array.
    locations -= np.sum(locations * weights.reshape(-1, 1),
                        axis=0) / np.sum(weights)

    if enable_snr_series:
        log.warn(
            'Enabling input of SNR time series. This feature is UNREVIEWED.')
    else:
        snr_series = None

    # Maximum barycentered arrival time error:
    # |distance from array barycenter to furthest detector| / c + 5 ms.
    # For LHO+LLO, this is 15.0 ms.
    # For an arbitrary terrestrial detector network, the maximum is 26.3 ms.
    max_abs_t = np.max(np.sqrt(np.sum(np.square(locations / lal.C_SI),
                                      axis=1))) + 0.005

    if snr_series is None:
        log.warn(
            "No SNR time series found, so we are creating a zero-noise "
            "SNR time series from the whitened template's autocorrelation "
            "sequence. The sky localization uncertainty may be "
            "underestimated.")

        acors, sample_rates = zip(
            *[filter.autocorrelation(_, max_abs_t) for _ in HS])
        sample_rate = sample_rates[0]
        deltaT = 1 / sample_rate
        nsamples = len(acors[0])
        assert all(sample_rate == _ for _ in sample_rates)
        assert all(nsamples == len(_) for _ in acors)
        nsamples = nsamples * 2 - 1

        snr_series = []
        for acor, sngl in zip(acors, sngl_inspirals):
            series = lal.CreateCOMPLEX8TimeSeries('fake SNR', 0, 0, deltaT,
                                                  lal.StrainUnit, nsamples)
            series.epoch = sngl.end - 0.5 * (nsamples - 1) * deltaT
            acor = np.concatenate((np.conj(acor[:0:-1]), acor))
            if phase_convention.lower() == 'antifindchirp':
                # The matched filter phase convention does NOT affect the
                # template autocorrelation sequence; however it DOES affect
                # the maximum-likelihood phase estimate AND the SNR time series.
                # So if we are going to apply the anti-findchirp phase
                # correction later, we'll have to apply a complex conjugate to
                # the autocorrelation sequence to cancel it here.
                acor = np.conj(acor)
            series.data.data = sngl.snr * filter.exp_i(sngl.coa_phase) * acor
            snr_series.append(series)

    # Ensure that all of the SNR time series have the same sample rate.
    # FIXME: for now, the Python wrapper expects all of the SNR time sries to
    # also be the same length.
    deltaT = snr_series[0].deltaT
    sample_rate = 1 / deltaT
    if any(deltaT != series.deltaT for series in snr_series):
        raise ValueError(
            'BAYESTAR does not yet support SNR time series with mixed sample rates'
        )

    # Ensure that all of the SNR time series have odd lengths.
    if any(len(series.data.data) % 2 == 0 for series in snr_series):
        raise ValueError('SNR time series must have odd lengths')

    # Trim time series to the desired length.
    max_abs_n = int(np.ceil(max_abs_t * sample_rate))
    desired_length = 2 * max_abs_n - 1
    for i, series in enumerate(snr_series):
        length = len(series.data.data)
        if length > desired_length:
            snr_series[i] = lal.CutCOMPLEX8TimeSeries(
                series, length // 2 + 1 - max_abs_n, desired_length)

    # FIXME: for now, the Python wrapper expects all of the SNR time sries to
    # also be the same length.
    nsamples = len(snr_series[0].data.data)
    if any(nsamples != len(series.data.data) for series in snr_series):
        raise ValueError(
            'BAYESTAR does not yet support SNR time series of mixed lengths')

    # Perform sanity checks that the middle sample of the SNR time series match
    # the sngl_inspiral records.
    for sngl_inspiral, series in zip(sngl_inspirals, snr_series):
        if np.abs(0.5 * (nsamples - 1) * series.deltaT +
                  float(series.epoch - sngl_inspiral.end)) >= 0.5 * deltaT:
            raise ValueError(
                'BAYESTAR expects the SNR time series to be centered on the sngl_inspiral end times'
            )

    # Extract the TOAs in GPS nanoseconds from the SNR time series, assuming
    # that the trigger happened in the middle.
    toas_ns = [
        series.epoch.ns() + 1e9 * 0.5 *
        (len(series.data.data) - 1) * series.deltaT for series in snr_series
    ]

    # Collect all of the SNR series in one array.
    snr_series = np.vstack([series.data.data for series in snr_series])

    # Fudge factor for excess estimation error in gstlal_inspiral.
    fudge = 0.83
    snr_series *= fudge

    # If using 'findchirp' phase convention rather than gstlal/mbta,
    # then flip signs of phases.
    if phase_convention.lower() == 'antifindchirp':
        log.warn('Using anti-FINDCHIRP phase convention; inverting phases. '
                 'This is currently the default and it is appropriate for '
                 'gstlal and MBTA but not pycbc as of observing run 1 ("O1"). '
                 'The default setting is likely to change in the future.')
        snr_series = np.conj(snr_series)

    # Center times of arrival and compute GMST at mean arrival time.
    # Pre-center in integer nanoseconds to preserve precision of
    # initial datatype.
    epoch = sum(toas_ns) // len(toas_ns)
    toas = 1e-9 * (np.asarray(toas_ns) - epoch)
    # FIXME: np.average does not yet support masked arrays.
    # Replace with np.average when numpy 1.13.0 is available.
    mean_toa = np.sum(toas * weights) / np.sum(weights)
    toas -= mean_toa
    epoch += int(np.round(1e9 * mean_toa))
    epoch = lal.LIGOTimeGPS(0, int(epoch))
    gmst = lal.GreenwichMeanSiderealTime(epoch)

    # Translate SNR time series back to time of first sample.
    toas -= 0.5 * (nsamples - 1) * deltaT

    # If minimum distance is not specified, then default to 0 Mpc.
    if min_distance is None:
        min_distance = 0

    # If maximum distance is not specified, then default to the SNR=4
    # horizon distance of the most sensitive detector.
    if max_distance is None:
        max_distance = max(horizons) / 4

    # If prior_distance_power is not specified, then default to 2
    # (p(r) ~ r^2, uniform in volume).
    if prior_distance_power is None:
        prior_distance_power = 2

    # Raise an exception if 0 Mpc is the minimum effective distance and the prior
    # is of the form r**k for k<0
    if min_distance == 0 and prior_distance_power < 0:
        raise ValueError(
            ("Prior is a power law r^k with k={}, " +
             "undefined at min_distance=0").format(prior_distance_power))

    # Rescale distances to horizon distance of most sensitive detector.
    max_horizon = np.max(horizons)
    horizons /= max_horizon
    min_distance /= max_horizon
    max_distance /= max_horizon

    # Time and run sky localization.
    log.debug('starting computationally-intensive section')
    start_time = lal.GPSTimeNow()
    if method == "toa_phoa_snr":
        skymap = Table(
            _sky_map.toa_phoa_snr(min_distance, max_distance,
                                  prior_distance_power, gmst, sample_rate,
                                  toas, snr_series, responses, locations,
                                  horizons))
    elif method == "toa_phoa_snr_mcmc":
        skymap = emcee_sky_map(
            logl=_sky_map.log_likelihood_toa_phoa_snr,
            loglargs=(gmst, sample_rate, toas, snr_series, responses,
                      locations, horizons),
            logp=toa_phoa_snr_log_prior,
            logpargs=(min_distance, max_distance, prior_distance_power,
                      max_abs_t),
            xmin=[0, -1, min_distance, -1, 0, 0],
            xmax=[2 * np.pi, 1, max_distance, 1, 2 * np.pi, 2 * max_abs_t],
            nside=nside,
            chain_dump=chain_dump,
            max_horizon=max_horizon * fudge)
    else:
        raise ValueError("Unrecognized method: %s" % method)

    # Convert distance moments to parameters
    distmean = skymap.columns.pop('DISTMEAN')
    diststd = skymap.columns.pop('DISTSTD')
    skymap['DISTMU'], skymap['DISTSIGMA'], skymap['DISTNORM'] = \
        distance.moments_to_parameters(distmean, diststd)

    # Add marginal distance moments
    good = np.isfinite(distmean) & np.isfinite(diststd)
    prob = (moc.uniq2pixarea(skymap['UNIQ']) * skymap['PROBDENSITY'])[good]
    distmean = distmean[good]
    diststd = diststd[good]
    rbar = (prob * distmean).sum()
    r2bar = (prob * (np.square(diststd) + np.square(distmean))).sum()
    skymap.meta['distmean'] = rbar
    skymap.meta['diststd'] = np.sqrt(r2bar - np.square(rbar))

    # Rescale
    rescale = max_horizon * fudge
    skymap['DISTMU'] *= rescale
    skymap['DISTSIGMA'] *= rescale
    skymap.meta['distmean'] *= rescale
    skymap.meta['diststd'] *= rescale
    skymap['DISTNORM'] /= np.square(rescale)

    end_time = lal.GPSTimeNow()
    log.debug('finished computationally-intensive section')

    # Fill in metadata and return.
    skymap.meta['creator'] = 'BAYESTAR'
    skymap.meta['origin'] = 'LIGO/Virgo'
    skymap.meta['gps_time'] = float(epoch)
    skymap.meta['runtime'] = float(end_time - start_time)
    skymap.meta['instruments'] = {
        sngl_inspiral.ifo
        for sngl_inspiral in sngl_inspirals
    }
    skymap.meta['gps_creation_time'] = end_time

    return skymap
コード例 #25
0
def P_instruments_given_signal(horizon_distances,
                               n_samples=500000,
                               min_instruments=2):
    """
	Precomputed P(ifos | {horizon distances}, signal).
	Returns a dictionary containing probabilities of each instrument producing a trigger
	given the instruments' horizon distances.
	"""
    if n_samples < 1:
        raise ValueError("n_samples=%d must be >= 1" % n_samples)
    if min_instruments < 1:
        raise ValueError("min_instruments=%d must be >= 1" % min_instruments)
    # get instrument names
    names = tuple(horizon_distances.keys())
    # get the horizon distances in the same order
    DH = numpy.array(tuple(horizon_distances.values()))
    # get detector responses in the same order
    resps = [
        lalsimulation.DetectorPrefixToLALDetector(str(inst)).response
        for inst in names
    ]

    # initialize output.
    result = dict.fromkeys(
        (frozenset(instruments) for n in range(min_instruments,
                                               len(names) + 1)
         for instruments in itertools.combinations(names, n)), 0.0)
    if not result:
        raise ValueError(
            "not enough instruments in horizon_distances to satisfy min_instruments"
        )

    # check for no-op
    if (DH != 0.).sum() < min_instruments:
        # not enough instruments are on to form a coinc with
        # the minimum required instruments. this is not
        # considered an error condition, returns p=0 for all
        # probabilities. NOTE result is not normalizable
        return result

    # we select random uniformly-distributed right assensions
    # so there's no point in also choosing random GMSTs and any
    # vlaue is as good as any other
    gmst = 0.0

    # in the loop, we'll need a sequence of integers to enumerate
    # instruments. construct it here to avoid doing it repeatedly in
    # the loop
    indices = tuple(range(len(names)))

    # run the sample the requested # of iterations. save some
    # symbols to avoid doing module attribute look-ups in the
    # loop
    acos = math.acos
    random_uniform = random.uniform
    twopi = 2. * math.pi
    pi_2 = math.pi / 2.
    xlal_am_resp = lal.ComputeDetAMResponse

    for i in xrange(n_samples):
        # select random sky location and source orbital
        # plane inclination
        # the signal is linearly polaraized, and h_cross = 0
        # is assumed, so we need only F+ (its absolute value)
        ra = random_uniform(0., twopi)
        dec = pi_2 - acos(random_uniform(-1., 1.))
        psi = random_uniform(0., twopi)
        fplus = tuple(
            abs(xlal_am_resp(resp, ra, dec, psi, gmst)[0]) for resp in resps)

        # 1/8 ratio of inverse SNR to distance for each instrument
        # (1/8 because horizon distance is defined for an SNR of 8,
        # and we've omitted that factor for performance)
        snr_times_D_over_8 = DH * fplus

        # the volume visible to each instrument given the
        # requirement that a source be above the SNR threshold is
        #
        # V = [constant] *(8 * snr_times_D_over_8 / snr_thresh)**3
        #
        # but in the end we'll only need ratio of these volumes, so
        # we can omit the proportionality constant anad we can also
        # omit the factor of (8/snr_thresh)**3.
        # NOTE this assumes all the detectors have same SNR threshold
        V_at_snr_threshold = snr_times_D_over_8**3.

        # order[0] is the index of instrument that can see sources the
        # farthest, order[1] is index of instrument that can see
        # sources the next farthest, ...
        order = sorted(indices,
                       key=V_at_snr_threshold.__getitem__,
                       reverse=True)
        ordered_names = tuple(names[i] for i in order)

        # instrument combination and volume of space (up to
        # irrelevant proportionality constant) visible to that
        # combination given the requirement that a source be above
        # the SNR threshold in that combination. sequence of
        # instrument combinations is left as a generator expression
        # for lazy evaluation
        instruments = (frozenset(ordered_names[:n])
                       for n in xrange(min_instruments,
                                       len(order) + 1))
        V = tuple(V_at_snr_threshold[i] for i in order[min_instruments - 1:])

        # for each instrument combination, probability that a
        # source visible to at least the minimum required number of
        # instruments is visible to that combination (here is where
        # the proportionality constant and factor (8/snr_threshold)**3
        # drop out of the calculation
        P = tuple(x / V[0] for x in V)

        # accumulate result. p - pnext is the probability that a
        # source (that is visible to at least the minimum required
        # number of instruments) is visible to that combination of
        # instruments and not any other combination of instruments
        for key, p, pnext in zip(instruments, P, P[1:] + (0., )):
            result[key] += p - pnext

    # normalize
    for key in result:
        result[key] /= n_samples

    #
    # make sure it's normalized. allow an all-0 result in the event
    # that too few instruments are available to ever form coincs.
    #

    total = sum(sorted(result.values()))
    assert abs(
        total - 1.
    ) < 1e-13 or total == 0., "result insufficiently well normalized: %s, sum = %g" % (
        result, total)
    if total != 0:
        for key in result:
            result[key] /= total

    #
    # done
    #

    return result
コード例 #26
0
    def likelihood_function(right_ascension, declination, phi_orb, inclination,
                            psi, distance):
        '''
	# FIXME  - this is pretty bad
	max_tpb = 1024 # Max number of threads per block for GPU: FIXME should be an --option
	ntimes = len(rholms['H1'][(2,0)]) # FIXME this should be dynamic 
	nmodes = len(rholms['H1'].keys()) # FIXME this should be dynamic too 
	nclmns  = numpy.int32( ntimes + max_tpb - (ntimes % max_tpb) ) # Number of cols to pad w/ 0s 
	nsamps = len(right_ascension)
	'''

        ###
        import math
        right_ascension = numpy.linspace(0.1, math.pi, nsamps) + 1.0
        declination = numpy.linspace(0.1, math.pi, nsamps) + 2.0
        tref = numpy.linspace(0.1, math.pi, nsamps) + 3.0
        phi_orb = numpy.linspace(0.1, math.pi, nsamps) + 4.0
        inclination = numpy.linspace(0.1, math.pi, nsamps) + 5.0
        psi = numpy.linspace(0.1, math.pi, nsamps) + 6.0
        #distance = numpy.linspace(0.1, math.pi, nsamps) + 7.0
        distance = numpy.ones(nsamps).astype(numpy.float64)
        ###
        '''
	right_ascension = right_ascension.astype(numpy.float64)
	declination = declination.astype(numpy.float64)
	tref = numpy.array([fiducial_epoch for item in right_ascension]).astype(numpy.float64)
	phi_orb = phi_orb.astype(numpy.float64)
	inclination = inclination.astype(numpy.float64)
	psi = psi.astype(numpy.float64)
	distance = distance.astype(numpy.float64)*1.e6*lal.PC_SI

	# Pass a block of zeros onto the GPU to hold results from each detector
	lnL_block = numpy.zeros((nsamps * nmodes, nclmns)).astype(numpy.float64)
	lnL_block_gpu = gpuarray.to_gpu(lnL_block)
	'''

        for det in rholms.keys():
            # Convert the crossterms to matrices
            CTU = numpy.zeros(len(cross_termsU[det].keys()),
                              dtype=numpy.complex128)
            CTV = numpy.zeros(len(cross_termsV[det].keys()),
                              dtype=numpy.complex128)
            sort_terms_keys = sorted(cross_termsU[det],
                                     key=lambda tup: (tup[0][1], tup[1][1]))
            for i in range(0, len(cross_termsU[det].keys())):
                CTU[i] = cross_termsU[det][sort_terms_keys[i]]
                CTV[i] = cross_termsV[det][sort_terms_keys[i]]
            side = numpy.sqrt(len(cross_termsU[det].keys()))
            CTU = CTU.reshape((side, side))
            CTV = CTV.reshape((side, side))

            det_tns = numpy.array(
                lalsim.DetectorPrefixToLALDetector(det).response).astype(
                    numpy.float64)
            lnL_block_gpu += factored_likelihood.factored_log_likelihood_time_marginalized_gpu(
                mod, right_ascension, declination, tref, phi_orb, inclination,
                psi, distance, det_tns, rholms[det], CTU, CTV)

        print("Marginalizing over Time... \n")
        network_lnL_marg_gpu = factored_likelihood.marginalize_all_lnL(
            mod, lnL_block_gpu, nmodes, nsamps, ntimes, nclmns,
            tvals[1] - tvals[0])
        print("Done marginalizing over time. \n")

        # OLD STUFF
        # use EXTREMELY many bits
        lnL = numpy.zeros(right_ascension.shape, dtype=numpy.float128)
        # PRB: can we move this loop inside the factored_likelihood? It might help.
        i = 0
        # choose an array at the target sampling rate. P is inherited globally
        for ph, th, phr, ic, ps, di in zip(right_ascension, declination,
                                           phi_orb, inclination, psi,
                                           distance):
            P.phi = ph  # right ascension
            P.theta = th  # declination
            P.tref = tref[i]  #fiducial_epoch  # see 'tvals', above
            P.phiref = phr  # ref. orbital phase
            P.incl = ic  # inclination
            P.psi = ps  # polarization angle
            P.dist = di * 1.e6 * lal.PC_SI  # luminosity distance
            lnL[i] = factored_likelihood.factored_log_likelihood_time_marginalized(
                tvals,
                P,
                rholms_intp,
                rholms,
                cross_termsU,
                cross_termsV,
                det_epochs,
                opts.l_max,
                interpolate=opts.interpolate_time)
            i += 1
        import pdb
        pdb.set_trace()
        return numpy.exp(lnL)
コード例 #27
0
def SNRPDF(instruments,
           horizon_distances,
           snr_cutoff,
           n_samples=200000,
           bins=rate.ATanLogarithmicBins(3.6, 1e3, 150)):
    """
	Precomputed SNR PDF for each detector
	Returns a BinnedArray containing
	P(snr_{inst1}, snr_{inst2}, ... | signal seen in exactly
		{inst1, inst2, ...} in a network of instruments
		with a given set of horizon distances)
	
	i.e., the joint probability density of observing a set of
	SNRs conditional on them being the result of signal that
	has been recovered in a given subset of the instruments in
	a network of instruments with a given set of horizon
	distances.

	The axes of the PDF correspond to the instruments in
	alphabetical order.  The binning used for all axes is set
	with the bins parameter.

	The n_samples parameter sets the number of iterations for
	the internal Monte Carlo sampling loop.
	"""
    if n_samples < 1:
        raise ValueError("n_samples=%d must be >= 1" % n_samples)

    # get instrument names
    instruments = sorted(instruments)
    if len(instruments) < 1:
        raise ValueError(instruments)
    # get the horizon distances in the same order
    DH_times_8 = 8. * numpy.array(
        [horizon_distances[inst] for inst in instruments])
    # get detector responses in the same order
    resps = [
        lalsimulation.DetectorPrefixToLALDetector(str(inst)).response
        for inst in instruments
    ]

    # get horizon distances and responses of remaining
    # instruments (order doesn't matter as long as they're in
    # the same order)
    DH_times_8_other = 8. * numpy.array([
        dist
        for inst, dist in horizon_distances.items() if inst not in instruments
    ])
    resps_other = tuple(
        lalsimulation.DetectorPrefixToLALDetector(str(inst)).response
        for inst in horizon_distances if inst not in instruments)

    # initialize the PDF array, and pre-construct the sequence of
    # snr, d(snr) tuples. since the last SNR bin probably has
    # infinite size, we remove it from the sequence
    # (meaning the PDF will be left 0 in that bin)
    pdf = rate.BinnedArray(rate.NDBins([bins] * len(instruments)))
    snr_sequence = rate.ATanLogarithmicBins(3.6, 1e3, 500)
    snr_snrlo_snrhi_sequence = numpy.array(
        zip(snr_sequence.centres(), snr_sequence.lower(),
            snr_sequence.upper())[:-1])

    # compute the SNR at which to begin iterations over bins
    assert type(snr_cutoff) is float
    snr_min = snr_cutoff - 3.0
    assert snr_min > 0.0

    # we select random uniformly-distributed right assensions
    # so there's no point in also choosing random GMSTs and any
    # vlaue is as good as any other
    gmst = 0.0

    # run the sample the requested # of iterations. save some
    # symbols to avoid doing module attribute look-ups in the
    # loop
    acos = math.acos
    random_uniform = random.uniform
    twopi = 2. * math.pi
    pi_2 = math.pi / 2.
    xlal_am_resp = lal.ComputeDetAMResponse
    # FIXME:  scipy.stats.rice.rvs broken on reference OS.
    # switch to it when we can rely on a new-enough scipy
    #rice_rvs = stats.rice.rvs	# broken on reference OS
    # the .reshape is needed in the event that x is a 1x1
    # array:  numpy returns a scalar from sqrt(), but we must
    # have something that we can iterate over
    rice_rvs = lambda x: numpy.sqrt(stats.ncx2.rvs(2., x**2.)).reshape(x.shape)

    for i in xrange(n_samples):
        # select random sky location and source orbital
        # plane inclination
        # the signal is linearly polaraized, and h_cross = 0
        # is assumed, so we need only F+ (its absolute value).
        ra = random_uniform(0., twopi)
        dec = pi_2 - acos(random_uniform(-1., 1.))
        psi = random_uniform(0., twopi)
        fplus = tuple(
            abs(xlal_am_resp(resp, ra, dec, psi, gmst)[0]) for resp in resps)

        # 1/8 ratio of inverse SNR to distance for each instrument
        # (1/8 because horizon distance is defined for an SNR of 8,
        # and we've omitted that factor for performance)
        snr_times_D = DH_times_8 * fplus

        # snr * D in instrument whose SNR grows fastest
        # with decreasing D
        max_snr_times_D = snr_times_D.max()

        # snr_times_D.min() / snr_min = the furthest a
        # source can be and still be above snr_min in all
        # instruments involved.  max_snr_times_D / that
        # distance = the SNR that distance corresponds to
        # in the instrument whose SNR grows fastest with
        # decreasing distance --- the SNR the source has in
        # the most sensitive instrument when visible to all
        # instruments in the combo
        try:
            start_index = snr_sequence[max_snr_times_D /
                                       (snr_times_D.min() / snr_min)]
        except ZeroDivisionError:
            # one of the instruments that must be able
            # to see the event is blind to it
            continue

        # min_D_other is minimum distance at which source
        # becomes visible in an instrument that isn't
        # involved.  max_snr_times_D / min_D_other gives
        # the SNR in the most sensitive instrument at which
        # the source becomes visible to one of the
        # instruments not allowed to participate
        if len(DH_times_8_other):
            min_D_other = (DH_times_8_other * fplus).min() / snr_cutoff
            try:
                end_index = snr_sequence[max_snr_times_D / min_D_other] + 1
            except ZeroDivisionError:
                # all instruments that must not see
                # it are blind to it
                end_index = None
        else:
            # there are no other instruments
            end_index = None

        # if start_index >= end_index then in order for the
        # source to be close enough to be visible in all
        # the instruments that must see it it is already
        # visible to one or more instruments that must not.
        # don't need to check for this, the for loop that
        # comes next will simply not have any iterations.

        # iterate over the nominal SNRs (= noise-free SNR
        # in the most sensitive instrument) at which we
        # will add weight to the PDF.  from the SNR in
        # most sensitive instrument, the distance to the
        # source is:
        #
        #	D = max_snr_times_D / snr
        #
        # and the (noise-free) SNRs in all instruments are:
        #
        #	snr_times_D / D
        #
        # scipy's Rice-distributed RV code is used to
        # add the effect of background noise, converting
        # the noise-free SNRs into simulated observed SNRs
        #
        # number of sources b/w Dlo and Dhi:
        #
        #	d count \propto D^2 |dD|
        #	  count \propto Dhi^3 - Dlo**3
        D_Dhi_Dlo_sequence = max_snr_times_D / snr_snrlo_snrhi_sequence[
            start_index:end_index]
        for snr, weight in zip(
                rice_rvs(snr_times_D /
                         numpy.reshape(D_Dhi_Dlo_sequence[:, 0],
                                       (len(D_Dhi_Dlo_sequence), 1))),
                D_Dhi_Dlo_sequence[:, 1]**3. - D_Dhi_Dlo_sequence[:, 2]**3.):
            pdf[tuple(snr)] += weight

    # check for divide-by-zeros that weren't caught.  also
    # finds nans if they are there
    assert numpy.isfinite(pdf.array).all()

    # convolve samples with gaussian kernel
    rate.filter_array(pdf.array,
                      rate.gaussian_window(*(1.875, ) * len(pdf.array.shape)))
    # protect against round-off in FFT convolution leading to
    # negative valuesin the PDF
    numpy.clip(pdf.array, 0., PosInf, pdf.array)
    # zero counts in bins that are below the trigger threshold.
    # have to convert SNRs to indexes ourselves and adjust so
    # that we don't zero the bin in which the SNR threshold
    # falls
    range_all = slice(None, None)
    range_low = slice(None, pdf.bins[0][snr_cutoff])
    for i in xrange(len(instruments)):
        slices = [range_all] * len(instruments)
        slices[i] = range_low
    # convert bin counts to normalized PDF
    pdf.to_pdf()
    # one last sanity check
    assert numpy.isfinite(pdf.array).all()
    # done
    return pdf
コード例 #28
0
ファイル: sky_map.py プロジェクト: llondon6/lalsuite-mmrd
def ligolw_sky_map(sngl_inspirals,
                   waveform,
                   f_low,
                   min_distance=None,
                   max_distance=None,
                   prior_distance_power=None,
                   method="toa_phoa_snr",
                   psds=None,
                   nside=-1,
                   chain_dump=None,
                   phase_convention='antifindchirp',
                   snr_series=None,
                   enable_snr_series=False):
    """Convenience function to produce a sky map from LIGO-LW rows. Note that
    min_distance and max_distance should be in Mpc.

    Returns a 'NESTED' ordering HEALPix image as a Numpy array.
    """

    # Ensure that sngl_inspiral is either a single template or a list of
    # identical templates
    for key in 'mass1 mass2 spin1x spin1y spin1z spin2x spin2y spin2z'.split():
        if hasattr(sngl_inspirals[0], key):
            value = getattr(sngl_inspirals[0], key)
            if any(value != getattr(_, key) for _ in sngl_inspirals):
                raise ValueError(
                    '{0} field is not the same for all detectors'.format(key))

    ifos = [sngl_inspiral.ifo for sngl_inspiral in sngl_inspirals]

    # Extract SNRs from table.
    snrs = np.asarray([sngl_inspiral.snr for sngl_inspiral in sngl_inspirals])

    # Look up physical parameters for detector.
    detectors = [
        lalsimulation.DetectorPrefixToLALDetector(str(ifo)) for ifo in ifos
    ]
    responses = np.asarray([det.response for det in detectors])
    locations = np.asarray([det.location for det in detectors])

    # Power spectra for each detector.
    if psds is None:
        psds = [timing.get_noise_psd_func(ifo) for ifo in ifos]

    H = filter.sngl_inspiral_psd(sngl_inspirals[0], waveform, f_min=f_low)
    HS = [filter.signal_psd_series(H, S) for S in psds]

    # Signal models for each detector.
    signal_models = [timing.SignalModel(_) for _ in HS]

    # Get SNR=1 horizon distances for each detector.
    horizons = np.asarray([
        signal_model.get_horizon_distance() for signal_model in signal_models
    ])

    weights = [
        1 / np.square(signal_model.get_crb_toa_uncert(snr))
        for signal_model, snr in zip(signal_models, snrs)
    ]

    # Center detector array.
    locations -= np.average(locations, weights=weights, axis=0)

    if enable_snr_series:
        log.warn(
            'Enabling input of SNR time series. This feature is UNREVIEWED.')
    else:
        snr_series = None

    if snr_series is None:
        log.warn(
            "No SNR time series found, so we are creating a zero-noise "
            "SNR time series from the whitened template's autocorrelation "
            "sequence. The sky localization uncertainty may be "
            "underestimated.")

        # Maximum barycentered arrival time error:
        # |distance from array barycenter to furthest detector| / c + 5 ms.
        # For LHO+LLO, this is 15.0 ms.
        # For an arbitrary terrestrial detector network, the maximum is 26.3 ms.
        max_abs_t = np.max(
            np.sqrt(np.sum(np.square(locations / lal.C_SI), axis=1))) + 0.005

        acors, sample_rates = zip(
            *[filter.autocorrelation(_, max_abs_t) for _ in HS])
        sample_rate = sample_rates[0]
        deltaT = 1 / sample_rate
        nsamples = len(acors[0])
        assert all(sample_rate == _ for _ in sample_rates)
        assert all(nsamples == len(_) for _ in acors)
        nsamples = nsamples * 2 - 1

        snr_series = []
        for acor, sngl in zip(acors, sngl_inspirals):
            series = lal.CreateCOMPLEX8TimeSeries('fake SNR', 0, 0, deltaT,
                                                  lal.StrainUnit, nsamples)
            series.epoch = sngl.end - 0.5 * (nsamples - 1) * deltaT
            acor = np.concatenate((np.conj(acor[:0:-1]), acor))
            if phase_convention.lower() == 'antifindchirp':
                # The matched filter phase convention does NOT affect the
                # template autocorrelation sequence; however it DOES affect
                # the maximum-likelihood phase estimate AND the SNR time series.
                # So if we are going to apply the anti-findchirp phase
                # correction later, we'll have to apply a complex conjugate to
                # the autocorrelation sequence to cancel it here.
                acor = np.conj(acor)
            series.data.data = sngl.snr * filter.exp_i(sngl.coa_phase) * acor
            snr_series.append(series)

    # Ensure that all of the SNR time series have the same sample rate.
    # FIXME: for now, the Python wrapper expects all of the SNR time sries to
    # also be the same length.
    deltaT = snr_series[0].deltaT
    sample_rate = 1 / deltaT
    if any(deltaT != series.deltaT for series in snr_series):
        raise ValueError(
            'BAYESTAR does not yet support SNR time series with mixed sample rates'
        )

    # FIXME: for now, the Python wrapper expects all of the SNR time sries to
    # also be the same length.
    nsamples = len(snr_series[0].data.data)
    if any(nsamples != len(series.data.data) for series in snr_series):
        raise ValueError(
            'BAYESTAR does not yet support SNR time series of mixed lengths')

    # Perform sanity checks that the middle sample of the SNR time series match
    # the sngl_inspiral records.
    for sngl_inspiral, series in zip(sngl_inspirals, snr_series):
        if np.abs(0.5 * (nsamples - 1) * series.deltaT +
                  float(series.epoch - sngl_inspiral.end)) >= 0.5 * deltaT:
            raise ValueError(
                'BAYESTAR expects the SNR time series to be centered on the sngl_inspiral end times'
            )

    # Extract the TOAs in GPS nanoseconds from the SNR time series, assuming
    # that the trigger happened in the middle.
    toas_ns = [
        series.epoch.ns() + 1e9 * 0.5 *
        (len(series.data.data) - 1) * series.deltaT for series in snr_series
    ]

    # Collect all of the SNR series in one array.
    snr_series = np.vstack([series.data.data for series in snr_series])

    # Fudge factor for excess estimation error in gstlal_inspiral.
    fudge = 0.83
    snr_series *= fudge

    # If using 'findchirp' phase convention rather than gstlal/mbta,
    # then flip signs of phases.
    if phase_convention.lower() == 'antifindchirp':
        log.warn('Using anti-FINDCHIRP phase convention; inverting phases. '
                 'This is currently the default and it is appropriate for '
                 'gstlal and MBTA but not pycbc as of observing run 1 ("O1"). '
                 'The default setting is likely to change in the future.')
        snr_series = np.conj(snr_series)

    # Center times of arrival and compute GMST at mean arrival time.
    # Pre-center in integer nanoseconds to preserve precision of
    # initial datatype.
    epoch = sum(toas_ns) // len(toas_ns)
    toas = 1e-9 * (np.asarray(toas_ns) - epoch)
    mean_toa = np.average(toas, weights=weights, axis=0)
    toas -= mean_toa
    epoch += int(np.round(1e9 * mean_toa))
    epoch = lal.LIGOTimeGPS(0, int(epoch))
    gmst = lal.GreenwichMeanSiderealTime(epoch)

    # Translate SNR time series back to time of first sample.
    toas -= 0.5 * (nsamples - 1) * deltaT

    # If minimum distance is not specified, then default to 0 Mpc.
    if min_distance is None:
        min_distance = 0

    # If maximum distance is not specified, then default to the SNR=4
    # horizon distance of the most sensitive detector.
    if max_distance is None:
        max_distance = max(horizons) / 4

    # If prior_distance_power is not specified, then default to 2
    # (p(r) ~ r^2, uniform in volume).
    if prior_distance_power is None:
        prior_distance_power = 2

    # Raise an exception if 0 Mpc is the minimum effective distance and the prior
    # is of the form r**k for k<0
    if min_distance == 0 and prior_distance_power < 0:
        raise ValueError(
            ("Prior is a power law r^k with k={}, " +
             "undefined at min_distance=0").format(prior_distance_power))

    # Rescale distances to horizon distance of most sensitive detector.
    max_horizon = np.max(horizons)
    horizons /= max_horizon
    min_distance /= max_horizon
    max_distance /= max_horizon

    # Use KDE for density estimation?
    if method.endswith('_kde'):
        method = method[:-4]
        kde = True
    else:
        kde = False

    # Time and run sky localization.
    start_time = time.time()
    if method == "toa_phoa_snr":
        prob = _sky_map.toa_phoa_snr(min_distance, max_distance,
                                     prior_distance_power, gmst, sample_rate,
                                     toas, snr_series, responses, locations,
                                     horizons, nside).T
    elif method == "toa_phoa_snr_mcmc":
        prob = emcee_sky_map(
            logl=_sky_map.log_likelihood_toa_phoa_snr,
            loglargs=(gmst, sample_rate, toas, snr_series, responses,
                      locations, horizons),
            logp=toa_phoa_snr_log_prior,
            logpargs=(min_distance, max_distance, prior_distance_power,
                      max_abs_t),
            xmin=[0, -1, min_distance, -1, 0, 0],
            xmax=[2 * np.pi, 1, max_distance, 1, 2 * np.pi, 2 * max_abs_t],
            nside=nside,
            kde=kde,
            chain_dump=chain_dump,
            max_horizon=max_horizon)
    else:
        raise ValueError("Unrecognized method: %s" % method)
    prob[1] *= max_horizon * fudge
    prob[2] *= max_horizon * fudge
    prob[3] /= np.square(max_horizon * fudge)
    end_time = time.time()

    # Find elapsed run time.
    elapsed_time = end_time - start_time

    # Done!
    return prob, epoch, elapsed_time
コード例 #29
0
    def real_hoft(self, Fp=None, Fc=None):
        """
        Returns the real-valued h(t) that would be produced in a single instrument.
        Translates epoch as needed.
        Based on 'hoft' in lalsimutils.py
        """
        # Create complex timessereis
        htC = self.complex_hoft(
            force_T=1. / self.P.deltaF, deltaT=self.P.deltaT
        )  # note P.tref is NOT used in the low-level code
        TDlen = htC.data.length
        if rosDebug:
            print("Size sanity check ", TDlen,
                  1 / (self.P.deltaF * self.P.deltaT))
            print(" Raw complex magnitude , ", np.max(htC.data.data))

        # Create working buffers to extract data from it -- wasteful.
        hp = lal.CreateREAL8TimeSeries("h(t)", htC.epoch, 0., self.P.deltaT,
                                       lalsimutils.lsu_DimensionlessUnit,
                                       TDlen)
        hc = lal.CreateREAL8TimeSeries("h(t)", htC.epoch, 0., self.P.deltaT,
                                       lalsimutils.lsu_DimensionlessUnit,
                                       TDlen)
        hT = lal.CreateREAL8TimeSeries("h(t)", htC.epoch, 0., self.P.deltaT,
                                       lalsimutils.lsu_DimensionlessUnit,
                                       TDlen)
        # Copy data components over
        hp.data.data = np.real(htC.data.data)
        hc.data.data = np.imag(htC.data.data)
        # transform as in lalsimutils.hoft
        if Fp != None and Fc != None:
            hp.data.data *= Fp
            hc.data.data *= Fc
            hp = lal.AddREAL8TimeSeries(hp, hc)
            hoft = hp
        elif self.P.radec == False:
            fp = lalsimutils.Fplus(self.P.theta, self.P.phi, self.P.psi)
            fc = lalsimutils.Fcross(self.P.theta, self.P.phi, self.P.psi)
            hp.data.data *= fp
            hc.data.data *= fc
            hp.data.data = lal.AddREAL8TimeSeries(hp, hc)
            hoft = hp
        else:
            # Note epoch must be applied FIRST, to make sure the correct event time is being used to construct the modulation functions
            hp.epoch = hp.epoch + self.P.tref
            hc.epoch = hc.epoch + self.P.tref
            if rosDebug:
                print(" Real h(t) before detector weighting, ",
                      np.max(hp.data.data), np.max(hc.data.data))
            hoft = lalsim.SimDetectorStrainREAL8TimeSeries(
                hp,
                hc,  # beware, this MAY alter the series length??
                self.P.phi,
                self.P.theta,
                self.P.psi,
                lalsim.DetectorPrefixToLALDetector(str(self.P.detector)))
            hoft = lal.CutREAL8TimeSeries(
                hoft, 0, hp.data.length)  # force same length as before??
            if rosDebug:
                print("Size before and after detector weighting ",
                      hp.data.length, hoft.data.length)
        if rosDebug:
            print(" Real h_{IFO}(t) generated, pre-taper : max strain =",
                  np.max(hoft.data.data))
        if self.P.taper != lalsimutils.lsu_TAPER_NONE:  # Taper if requested
            lalsim.SimInspiralREAL8WaveTaper(hoft.data, self.P.taper)
        if self.P.deltaF is not None:
            TDlen = int(1. / self.P.deltaF * 1. / self.P.deltaT)
            print("Size sanity check 2 ",
                  int(1. / self.P.deltaF * 1. / self.P.deltaT),
                  hoft.data.length)
            assert TDlen >= hoft.data.length
            npts = hoft.data.length
            hoft = lal.ResizeREAL8TimeSeries(hoft, 0, TDlen)
            # Zero out the last few data elements -- NOT always reliable for all architectures; SHOULD NOT BE NECESSARY
            hoft.data.data[npts:TDlen] = 0

        if rosDebug:
            print(" Real h_{IFO}(t) generated : max strain =",
                  np.max(hoft.data.data))
        return hoft
コード例 #30
0
out_xmldoc.childNodes[0].appendChild(coinc_def_table)
coinc_def = ligolw_thinca.InspiralCoincDef
coinc_def_id = coinc_def_table.get_next_id()
coinc_def.coinc_def_id = coinc_def_id
coinc_def_table.append(coinc_def)

# Create a CoincMap table.
coinc_map_table = lsctables.New(lsctables.CoincMapTable)
out_xmldoc.childNodes[0].appendChild(coinc_map_table)

# Create a CoincEvent table.
coinc_table = lsctables.New(lsctables.CoincTable)
out_xmldoc.childNodes[0].appendChild(coinc_table)

# Precompute values that are common to all simulations.
detectors = [lalsimulation.DetectorPrefixToLALDetector(ifo)
    for ifo in opts.detector]
responses = [det.response for det in detectors]
locations = [det.location for det in detectors]

for sim_inspiral in progress.iterate(sim_inspiral_table):

    # Unpack some values from the row in the table.
    m1 = sim_inspiral.mass1
    m2 = sim_inspiral.mass2
    f_low = sim_inspiral.f_lower if opts.f_low is None else opts.f_low
    DL = sim_inspiral.distance
    ra = sim_inspiral.longitude
    dec = sim_inspiral.latitude
    inc = sim_inspiral.inclination
    phi = sim_inspiral.coa_phase