Exemplo n.º 1
0
    def test_resample_common(self):

        # Test with resample=False
        ts_short1 = ts.TimeSeries([1, 2, 3, 4, 5], [11, 12, 13, 14, 15])
        ts_short2 = ts.TimeSeries([0, 2, 3, 5], [20, 22, 23, 25])
        ts_short3 = ts.TimeSeries([0, 6, 7, 8], [20, 26, 27, 28])

        new_ts_short1, new_ts_short2 = series.sample_common(
            [ts_short1, ts_short2]
        )

        self.assertTrue(np.allclose(new_ts_short1.t, [2, 3, 5]))
        self.assertTrue(np.allclose(new_ts_short2.t, [2, 3, 5]))
        self.assertTrue(np.allclose(new_ts_short1.y, [12, 13, 15]))
        self.assertTrue(np.allclose(new_ts_short2.y, [22, 23, 25]))

        # Test no common point
        with self.assertRaises(ValueError):
            series.sample_common([ts_short1, ts_short2, ts_short3])

        times1 = np.linspace(0, 2 * np.pi, 5000)
        times2 = np.linspace(np.pi, 3 * np.pi, 5000)
        times3 = np.linspace(np.pi, 2 * np.pi, 5000)
        sins1 = np.sin(times1)
        sins2 = np.sin(times2)
        sins3 = np.sin(times3)

        ts1 = ts.TimeSeries(times1, sins1)
        ts2 = ts.TimeSeries(times2, sins2)

        new_ts1, new_ts2 = series.sample_common([ts1, ts2], resample=True)

        self.assertTrue(np.allclose(new_ts1.y, sins3))

        # Test with piecewise_constant = True

        new_ts1_c, new_ts2_c = series.sample_common(
            [ts1, ts2], piecewise_constant=True, resample=True
        )

        # The accuracy is not as great
        self.assertTrue(np.allclose(new_ts1_c.y, sins3, atol=1e-3))

        # Test a case in which there's no resampling
        newer_ts1, newer_ts2 = series.sample_common(
            [new_ts1, new_ts2], resample=True
        )
        self.assertTrue(np.allclose(new_ts1.y, sins3))

        # Case with different lengths
        times1_longer = np.append(-1, np.linspace(0, 2 * np.pi, 5000))
        sins1_longer = np.sin(times1_longer)
        ts1_longer = ts.TimeSeries(times1_longer, sins1_longer)

        ts1_res, ts2_res = series.sample_common(
            [ts2, ts1_longer], resample=True
        )
        self.assertTrue(np.allclose(ts1_res.y, sins3))
Exemplo n.º 2
0
def compute_horizons_separation(horizon1, horizon2, resample=True):
    """Compute the coordinate separation between the centroids of two horizons.

    The information from the apparent horizons is used (contained in the
    BHDiagnostics files).

    :param horizon1: First horizon.
    :type horizon1: :py:class:`~.OneHorizon`
    :param horizon2: Second horizon.
    :type horizon2: :py:class:`~.OneHorizon`

    :returns: Coordinate distance between the two centroids, sampled over both
              the horizons are available.
    :rtype: :py:class:`~.TimeSeries`

    """

    # We add sample_common to make sure that everything is defined on the same
    # interval.
    (
        cen1_x,
        cen1_y,
        cen1_z,
        cen2_x,
        cen2_y,
        cen2_z,
    ) = sample_common(
        (
            horizon1.ah.centroid_x,
            horizon1.ah.centroid_y,
            horizon1.ah.centroid_z,
            horizon2.ah.centroid_x,
            horizon2.ah.centroid_y,
            horizon2.ah.centroid_z,
        ),
        resample=resample,
    )

    return np.sqrt((cen1_x - cen2_x)**2 + (cen1_y - cen2_y)**2 +
                   (cen1_z - cen2_z)**2)
Exemplo n.º 3
0
    def inner_product(
        self,
        other,
        fmin=0,
        fmax=np.inf,
        noises=None,
        same_domain=False,
    ):
        r"""Compute the (network) inner product with another :py:class:`~.FrequencySeries`.

        This is defined as:

        :math:`(h_1, h_2) = 4 \Re \int_{f_min}^{f_max} \frac{h_1 h_2^*}{S_n}`

        where ``S_n`` is the noise curve, and ``h_1``, ``h_2`` the series.

        In case multiple noise curves are supplied, compute

        :math:`(h_1, h_2) = \sum_{detectors}
        4 \Re \int_{f_min}^{f_max} \frac{h_1 h_2^*}{S_n}`

        This is the network inner product. To compute this quantity, you have
        to provide a list of noises.

        We assume that the :py:class:`~.FrequencySeries` are zero outside of the
        interval of definition, so if ``fmax`` (``fmin``) is larger (smaller)
        than the one available, it is effectively set to the one available.

        Since Fourier typically transforms explode at fmin = 0, the result of
        the integration is highly sensitive to regions near that frequency.

        If ``same_domain`` is True, it is assumed that all the
        :py:class:`~.FrequencySeries` involved are defined over the same
        frequencies. Turning this on speeds up computations, but it will result
        in incorrect results if the assumption is violated. If it is False, the
        domain of definition of the series is checked, if it is not the same for
        all the series, then they will be resampled.

        :param other: Second frequency series in the inner product.
        :type other: :py:class:`.FrequencySeries`
        :param fmin: Remove frequencies below this value.
        :type fmin: float
        :param fmax: Remove frequencies above this value.
        :type fmax: float
        :param noise: If None, no weight is applied.
        :type noise: :py:class:`.FrequencySeries`, list
                     of :py:class:`.FrequencySeries` or None
        :param same_domain: Whether to assume that the :py:class:`~.FrequencySeries`
                            are defined over the same frequencies. If you can
                            guarantee this, the computation will be faster.
        :type same_domain: bool

        :returns: Inner product between ``self`` and ``other``.
        :rtype: float

        """
        if not isinstance(other, type(self)):
            raise TypeError("The other object is not a FrequencySeries")

        if ((not isinstance(noises, type(self)))
                and (not isinstance(noises, list)) and (noises is not None)):
            raise TypeError("Noise is not (a list of) FrequencySeries or None")

        if fmin >= fmax:
            raise ValueError("fmin has to be smaller than fmax")

        if fmin < 0:
            raise ValueError("fmin has to be non-negative")

        if noises is None:
            # If noises is None, it means that the weight is one everywhere so,
            # we prepare a FrequencySeries that has the same frequencies as
            # self.
            # Everything will be resampled to a common set
            noises = FrequencySeries(self.f, np.ones_like(self.fft))

        # "res" = "resampled"
        to_be_res_list = [self, other]
        # Check if noises is a list, in that case add all the elements to
        # to to_be_res_list
        if isinstance(noises, list):
            to_be_res_list.extend(noises)
        else:
            # noises is not a list, just append it
            to_be_res_list.append(noises)

        if not same_domain:
            # Noises typically have Lorentian features, better to use a 0d
            # spline, so we enable piecewise_constant
            [res_self, res_other,
             *res_noises] = sample_common(to_be_res_list,
                                          resample=True,
                                          piecewise_constant=True)
        else:
            [res_self, res_other, *res_noises] = to_be_res_list

        for series in [res_self, res_other, *res_noises]:
            series.negative_frequencies_remove()
            series.band_pass(fmin=fmin, fmax=fmax)

        # Sum all the integrands
        integrand = FrequencySeries(res_self.f, np.zeros_like(res_self.fft))

        # We manipulate directly the fft fields because we have already
        # established that the series are defined on the same frequencies.
        # This is faster because it skips several sanity checks.
        for res_noise in res_noises:
            integrand += res_self * res_other.conjugate() / res_noise

        # 4 Re * \int
        # To align with PyCBC we do a rectangular integration here instead of
        # a trapeziodial one
        return 4 * np.sum(integrand.fft.real) * integrand.df