예제 #1
0
def test_space_translation():
    """Compare code-transformed waveform to analytically transformed waveform"""
    print("")
    ell_max = 8
    for s in range(-2, 2+1):
        for ell in range(abs(s), ell_max+1):
            print("\tWorking on spin s =", s, ", ell =", ell)
            for m in range(-ell, ell+1):
                for space_translation in [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]:
                    w_m1 = (samples.single_mode_proportional_to_time(s=s, ell=ell, m=m)
                            .transform(space_translation=space_translation))
                    w_m2 = samples.single_mode_proportional_to_time_supertranslated(s=s, ell=ell, m=m,
                                                                                    space_translation=space_translation)
                    i1A = np.argmin(abs(w_m1.t-(w_m1.t[0]+2*np.linalg.norm(space_translation))))
                    i1B = np.argmin(abs(w_m1.t-(w_m1.t[-1]-2*np.linalg.norm(space_translation))))
                    i2A = np.argmin(abs(w_m2.t-w_m1.t[i1A]))
                    i2B = np.argmin(abs(w_m2.t-w_m1.t[i1B]))
                    assert np.allclose(w_m1.t[i1A:i1B+1], w_m2.t[i2A:i2B+1], rtol=0.0, atol=1e-16), \
                        (w_m1.t[i1A], w_m2.t[i2A], w_m1.t[i1B], w_m2.t[i2B],
                         w_m1.t[i1A:i1B+1].shape, w_m2.t[i2A:i2B+1].shape)
                    data1 = w_m1.data[i1A:i1B+1]
                    data2 = w_m2.data[i2A:i2B+1]
                    assert np.allclose(data1, data2, rtol=0.0, atol=5e-14), \
                        ([s, ell, m],
                         space_translation,
                         [abs(data1-data2).max(),
                          data1.ravel()[np.argmax(abs(data1-data2))],
                          data2.ravel()[np.argmax(abs(data1-data2))]],
                         [np.unravel_index(np.argmax(abs(data1-data2)), data1.shape)[0],
                          list(sf.LM_range(abs(s), ell_max)[np.unravel_index(np.argmax(abs(data1-data2)),
                                                                             data1.shape)[1]])])
예제 #2
0
def test_LM_index(ell_max):
    for ell_min in range(ell_max + 1):
        LM = sf.LM_range(ell_min, ell_max)
        for ell in range(ell_min, ell_max + 1):
            for m in range(-ell, ell + 1):
                assert np.array_equal(np.array([ell, m]),
                                      LM[sf.LM_index(ell, m, ell_min)])
예제 #3
0
    def __init__(self, *args, **kwargs):
        """Initializer for WaveformModes object

        This initializer is primarily a wrapper around the WaveformBase initializer.  See the docstring of
        WaveformBase for more details.  The only difference in calling is that this takes two additional keyword
        parameters:

        Keyword parameters
        ------------------
        ell_min : int, defaults to 0
        ell_max : int, defaults to -1

        """
        if len(args) == 1 and isinstance(args[0], type(self)):
            other = args[0]
            # Do not directly access __ell_min, __ell_max, or __LM outside of this initializer function; use ell_min,
            #  ell_max, or LM instead
            self.__ell_min = other.__ell_min
            self.__ell_max = other.__ell_max
            self.__LM = np.copy(other.__LM)
        else:
            # Do not directly access __ell_min, __ell_max, or __LM outside of this initializer function; use ell_min,
            # ell_max, or LM instead
            self.__ell_min = kwargs.pop("ell_min", 0)
            self.__ell_max = kwargs.pop("ell_max", -1)
            self.__LM = sf.LM_range(self.__ell_min, self.__ell_max)
        super().__init__(*args, **kwargs)
예제 #4
0
def test_LM_range(ell_max):
    for l_max in range(ell_max + 1):
        for l_min in range(l_max + 1):
            assert np.array_equal(
                sf.LM_range(l_min, l_max),
                np.array([[ell, m] for ell in range(l_min, l_max + 1)
                          for m in range(-ell, ell + 1)]))
예제 #5
0
 def ell_max(self, new_ell_max):
     self.__ell_max = new_ell_max
     self.__LM = sf.LM_range(self.ell_min, self.ell_max)
     if self.n_modes != self.__LM.shape[0]:
         warning = "\nWaveform's data.shape={0} does not agree with ".format(self.data.shape) \
                   + "(ell_min,ell_max)=({0},{1}).\n".format(self.ell_min, self.ell_max) \
                   + "Hopefully you are about to reset `data`.  To suppress this warning,\n" \
                   + "reset `data` before resetting ell_min and/or ell_max."
         warnings.warn(warning)
예제 #6
0
def test_supertranslation_inverses():
    w1 = samples.random_waveform_proportional_to_time(rotating=False)
    ell_max = 4
    for ellpp, mpp in sf.LM_range(0, ell_max):
        supertranslation = np.zeros((sf.LM_total_size(0, ell_max), ),
                                    dtype=complex)
        if mpp == 0:
            supertranslation[sf.LM_index(ellpp, mpp, 0)] = 1.0
        elif mpp < 0:
            supertranslation[sf.LM_index(ellpp, mpp, 0)] = 1.0
            supertranslation[sf.LM_index(ellpp, -mpp, 0)] = (-1.0)**mpp
        elif mpp > 0:
            supertranslation[sf.LM_index(ellpp, mpp, 0)] = 1.0j
            supertranslation[sf.LM_index(ellpp, -mpp, 0)] = (-1.0)**mpp * -1.0j
        max_displacement = abs(
            spinsfast.salm2map(supertranslation, 0, ell_max, 4 * ell_max + 1,
                               4 * ell_max + 1)).max()
        w2 = copy.deepcopy(w1)
        w2 = w2.transform(supertranslation=supertranslation)
        w2 = w2.transform(supertranslation=-supertranslation)

        i1A = np.argmin(abs(w1.t - (w1.t[0] + 3 * max_displacement)))
        i1B = np.argmin(abs(w1.t - (w1.t[-1] - 3 * max_displacement)))
        i2A = np.argmin(abs(w2.t - w1.t[i1A]))
        i2B = np.argmin(abs(w2.t - w1.t[i1B]))
        try:
            assert np.allclose(w1.t[i1A:i1B + 1],
                               w2.t[i2A:i2B + 1],
                               rtol=0.0,
                               atol=1e-15), (
                                   w1.t[i1A],
                                   w2.t[i2A],
                                   w1.t[i1B],
                                   w2.t[i2B],
                                   w1.t[i1A:i1B + 1].shape,
                                   w2.t[i2A:i2B + 1].shape,
                               )
        except ValueError:
            print("Indices:\n\t", i1A, i1B, i2A, i2B)
            print("Times:\n\t", w1.t[i1A], w1.t[i1B], w2.t[i2A], w2.t[i2B])
            raise
        data1 = w1.data[i1A:i1B + 1]
        data2 = w2.data[i2A:i2B + 1]
        try:
            assert np.allclose(data1, data2, rtol=5e-10, atol=5e-14), [
                abs(data1 - data2).max(),
                data1.ravel()[np.argmax(abs(data1 - data2))],
                data2.ravel()[np.argmax(abs(data1 - data2))],
                np.unravel_index(np.argmax(abs(data1 - data2)), data1.shape),
            ]
            # list(sf.LM_range(0, ell_max)[np.unravel_index(np.argmax(abs(data1-data2)),
            #                                               data1.shape)[1]])])
        except:
            print("Indices:\n\t", i1A, i1B, i2A, i2B)
            print("Times:\n\t", w1.t[i1A], w1.t[i1B], w2.t[i2A], w2.t[i2B])
            raise
예제 #7
0
 def ell_min(self, new_ell_min):
     self.__ell_min = new_ell_min
     self.__LM = sf.LM_range(self.ell_min, self.ell_max)
     if self.n_modes != self.__LM.shape[0]:
         warning = (
             f"\nWaveform's data.shape={self.data.shape} does not agree with "
             + f"(ell_min,ell_max)=({self.ell_min},{self.ell_max}).\n" +
             "Hopefully you are about to reset `data`.  To suppress this warning,\n"
             + "reset `data` before resetting ell_min and/or ell_max.")
         warnings.warn(warning)
예제 #8
0
 def ells(self, new_ells):
     """Setting both at once can be necessary when changing the shape of `data`"""
     self.__ell_min = new_ells[0]
     self.__ell_max = new_ells[1]
     self.__LM = sf.LM_range(self.ell_min, self.ell_max)
     if self.n_modes != self.__LM.shape[0]:
         warning = "\nWaveform's data.shape={0} does not agree with ".format(self.data.shape) \
                   + "(ell_min,ell_max)=({0},{1}).\n".format(self.ell_min, self.ell_max) \
                   + "Hopefully you are about to reset `data`.  To avoid this warning,\n" \
                   + "reset `data` before resetting ell_min and/or ell_max."
         warnings.warn(warning)
예제 #9
0
def test_hyper_translation():
    """Compare code-transformed waveform to analytically transformed waveform"""
    print("")
    ell_max = 4
    for s in range(-2, 2+1):
        for ell in range(abs(s), ell_max+1):
            for m in range(-ell, ell+1):
                print("\tWorking on spin s =", s, ", ell =", ell, ", m =", m)
                for ellpp, mpp in sf.LM_range(2, ell_max):
                    supertranslation = np.zeros((sf.LM_total_size(0, ell_max),), dtype=complex)
                    if mpp == 0:
                        supertranslation[sf.LM_index(ellpp, mpp, 0)] = 1.0
                    elif mpp < 0:
                        supertranslation[sf.LM_index(ellpp, mpp, 0)] = 1.0
                        supertranslation[sf.LM_index(ellpp, -mpp, 0)] = (-1.0)**mpp
                    elif mpp > 0:
                        supertranslation[sf.LM_index(ellpp, mpp, 0)] = 1.0j
                        supertranslation[sf.LM_index(ellpp, -mpp, 0)] = (-1.0)**mpp * -1.0j
                    max_displacement = abs(spinsfast.salm2map(supertranslation, 0,
                                                              ell_max, 4*ell_max+1, 4*ell_max+1)).max()
                    w_m1 = (samples.single_mode_proportional_to_time(s=s, ell=ell, m=m)
                            .transform(supertranslation=supertranslation))
                    w_m2 = samples.single_mode_proportional_to_time_supertranslated(s=s, ell=ell, m=m,
                                                                                    supertranslation=supertranslation)
                    i1A = np.argmin(abs(w_m1.t-(w_m1.t[0]+2*max_displacement)))
                    i1B = np.argmin(abs(w_m1.t-(w_m1.t[-1]-2*max_displacement)))
                    i2A = np.argmin(abs(w_m2.t-w_m1.t[i1A]))
                    i2B = np.argmin(abs(w_m2.t-w_m1.t[i1B]))
                    assert np.allclose(w_m1.t[i1A:i1B+1], w_m2.t[i2A:i2B+1], rtol=0.0, atol=1e-16), \
                        (w_m1.t[i1A], w_m2.t[i2A], w_m1.t[i1B], w_m2.t[i2B],
                         w_m1.t[i1A:i1B+1].shape, w_m2.t[i2A:i2B+1].shape)
                    data1 = w_m1.data[i1A:i1B+1]
                    data2 = w_m2.data[i2A:i2B+1]
                    assert np.allclose(data1, data2, rtol=0.0, atol=5e-14), \
                        ([s, ell, m],
                         supertranslation,
                         [abs(data1-data2).max(),
                          data1.ravel()[np.argmax(abs(data1-data2))], data2.ravel()[np.argmax(abs(data1-data2))]],
                         [np.unravel_index(np.argmax(abs(data1-data2)), data1.shape)[0],
                          list(sf.LM_range(abs(s), ell_max)[np.unravel_index(np.argmax(abs(data1-data2)),
                                                                             data1.shape)[1]])])
예제 #10
0
def test_SWSH_conjugation(special_angles, ell_max):
    # {s}Y{l,m}.conjugate() = (-1.)**(s+m) {-s}Y{l,-m}
    indices1 = sf.LM_range(0, ell_max)
    indices2 = np.array([[ell, -m] for ell, m in indices1])
    neg1_to_m = np.array([(-1.)**m for ell, m in indices1])
    for iota in special_angles:
        for phi in special_angles:
            R = quaternion.from_spherical_coords(iota, phi)
            for s in range(1-ell_max, ell_max):
                assert np.allclose(sf.SWSH(R, s, indices1),
                                   (-1.)**s * neg1_to_m * np.conjugate(sf.SWSH(R, -s, indices2)),
                                   atol=1e-15, rtol=1e-15)
예제 #11
0
 def validator(abd):
     check_modes(abd.psi0, sf.LM_range(abs(abd.psi0.s), ell_max))
     check_modes(abd.psi1, sf.LM_range(abs(abd.psi1.s), ell_max))
     check_modes(abd.psi2, sf.LM_range(abs(abd.psi2.s), ell_max))
     check_modes(abd.psi3, sf.LM_range(2, ell_max))
     check_modes(abd.psi4, sf.LM_range(abs(abd.psi4.s), ell_max))
     check_modes(abd.sigma, sf.LM_range(abs(abd.sigma.s), ell_max))
     assert np.max(np.abs(abd.bondi_violation_norms)) <= 4.5e-6
예제 #12
0
def modes_constructor(constructor_statement, data_functor, **kwargs):
    """WaveformModes object filled with data from the input functor

    Additional keyword arguments are mostly passed to the WaveformModes initializer, though some more reasonable
    defaults are provided.

    Parameters
    ----------
    constructor_statement : str
        This is a string form of the function call used to create the object.  This is passed to the WaveformBase
        initializer as the parameter of the same name.  See the docstring for more information.
    data_functor : function
        Takes a 1-d array of time values and an array of (ell, m) values and returns the complex array of data.
    t : float array, optional
        Time values of the data.  Default is `np.linspace(-10., 100., num=1101))`.
    ell_min, ell_max : int, optional
        Smallest and largest ell value present in the data.  Defaults are 2 and 8.

    """
    t = np.array(kwargs.pop("t", np.linspace(-10.0, 100.0, num=1101)),
                 dtype=float)
    frame = np.array(kwargs.pop("frame", []), dtype=np.quaternion)
    frameType = int(kwargs.pop("frameType", scri.Inertial))
    dataType = int(kwargs.pop("dataType", scri.h))
    r_is_scaled_out = bool(kwargs.pop("r_is_scaled_out", True))
    m_is_scaled_out = bool(kwargs.pop("m_is_scaled_out", True))
    ell_min = int(kwargs.pop("ell_min", abs(scri.SpinWeights[dataType])))
    ell_max = int(kwargs.pop("ell_max", 8))
    if kwargs:
        import pprint

        warnings.warn(
            f"\nUnused kwargs passed to this function:\n{pprint.pformat(kwargs, width=1)}"
        )
    data = data_functor(t, sf.LM_range(ell_min, ell_max))
    w = scri.WaveformModes(
        t=t,
        frame=frame,
        data=data,
        history=["# Called from constant_waveform"],
        frameType=frameType,
        dataType=dataType,
        r_is_scaled_out=r_is_scaled_out,
        m_is_scaled_out=m_is_scaled_out,
        constructor_statement=constructor_statement,
        ell_min=ell_min,
        ell_max=ell_max,
    )
    return w
예제 #13
0
def test_SWSH_signatures(Rs):
    """There are two ways to call the SWSH function: with an array of Rs, or with an array of (ell,m) values.  This
    test ensures that the results are the same in both cases."""
    s_max = 5
    ss = np.arange(-s_max, s_max + 1)
    ell_max = 5
    ell_ms = sf.LM_range(0, ell_max)
    SWSHs1 = np.empty((Rs.size, ss.size, ell_ms.size // 2), dtype=np.complex)
    SWSHs2 = np.empty_like(SWSHs1)
    for i_s, s in enumerate(ss):
        for i_ellm, (ell, m) in enumerate(ell_ms):
            SWSHs1[:, i_s, i_ellm] = sf.SWSH(Rs, s, [ell, m])
    for i_s, s in enumerate(ss):
        for i_R, R in enumerate(Rs):
            SWSHs2[i_R, i_s, :] = sf.SWSH(R, s, ell_ms)
    assert np.array_equal(SWSHs1, SWSHs2)
예제 #14
0
def test_SWSH_grid(special_angles, ell_max):
    LM = sf.LM_range(0, ell_max)

    # Test flat array arrangement
    R_grid = np.array([quaternion.from_euler_angles(alpha, beta, gamma).normalized()
                       for alpha in special_angles
                       for beta in special_angles
                       for gamma in special_angles])
    for s in range(-ell_max + 1, ell_max):
        values_explicit = np.array([sf.SWSH(R, s, LM) for R in R_grid])
        values_grid = sf.SWSH_grid(R_grid, s, ell_max)
        assert np.array_equal(values_explicit, values_grid)

    # Test nested array arrangement
    R_grid = np.array([[[quaternion.from_euler_angles(alpha, beta, gamma)
                         for alpha in special_angles]
                        for beta in special_angles]
                       for gamma in special_angles])
    for s in range(-ell_max + 1, ell_max):
        values_explicit = np.array([[[sf.SWSH(R, s, LM) for R in R1] for R1 in R2] for R2 in R_grid])
        values_grid = sf.SWSH_grid(R_grid, s, ell_max)
        assert np.array_equal(values_explicit, values_grid)
예제 #15
0
파일: file_io.py 프로젝트: haraldp271/scri
def read_from_h5(file_name, **kwargs):
    """Read data from an H5 file in SXS format

    Note that SXS format is essentially compatible with NRAR format.  The existence of this function is not to be
    taken as the author's endorsement of either SXS or NRAR format.

    Parameters
    ----------
    file_name : str
        Path to H5 file containing the data, optionally including the path within the file itself to the directory
        containing the data.  For example, the standard SXS data with N=2 might be obtained with the file name
        `'rhOverM_Asymptotic_GeometricUnits.h5/Extrapolated_N2.dir'`.

    Keyword parameters
    ------------------
    frameType : int, optional
    dataType : int, optional
    r_is_scaled_out : bool, optional
    m_is_scaled_out : bool, optional
        These four parameters are documented in the docstring of the WaveformBase object.  Note that if any of these
        is present in the H5 file (which is not common) that value will override this argument.  If neither the file
        nor these parameters are present, defaults will be applied, assuming that the frame is inertial, R and M are
        both scaled out, and the data type (psi4, hdot, or h) can be gleaned from `file_name`.

    """

    import os.path
    import re
    import h5py
    import quaternion

    # This unfortunate concoction is needed to determine the (ell,m) values of the various mode data sets
    pattern_Ylm = re.compile(r"""Y_l(?P<L>[0-9]+)_m(?P<M>[-+0-9]+)\.dat""")

    # Initialize an empty object to be filled with goodies
    w = WaveformModes(
        constructor_statement='scri.SpEC.read_from_h5("{0}", **{1})'.format(
            file_name, kwargs))

    # Get an h5py handle to the desired part of the h5 file
    try:
        file_name, root_group = file_name.rsplit('.h5', 1)
        file_name += '.h5'
    except ValueError:
        root_group = ''  # FileName is just a file, not a group in a file
    try:
        f_h5 = h5py.File(file_name, 'r')
    except IOError:
        print("\n`read_from_h5` could not open the file '{0}'\n\n".format(
            file_name))
        raise
    if root_group:
        f = f_h5[root_group]
    else:
        f = f_h5

    # If it exists, add the metadata file to `w` as an object.  So, for example, the initial spin on object 1 can be
    # accessed as `w.metadata.initial_spin1`.  See the documentation of `scri.SpEC.read_metadata_into_object`
    # for more details.  And in IPython, tab completion works on the `w.metadata` object.
    try:
        w.metadata = read_metadata_into_object(
            os.path.join(os.path.dirname(file_name), 'metadata.txt'))
    except:
        pass  # Probably couldn't find the metadata.txt file

    try:  # Make sure the h5py.File gets closed, even in the event of an exception

        # Add the old history to the new history, if found
        try:
            try:
                old_history = f['History.txt'][()].decode()
            except AttributeError:
                old_history = f['History.txt'][()]
            w._append_history("", 0)
            w._append_history("<previous_history>", 2)
            w._append_history(old_history, 1)
            w._append_history("</previous_history>", 2)
        except KeyError:
            pass  # Did not find a history

        # Get the frame data, converting to quaternion objects
        try:
            w.frame = quaternion.as_quat_array(f['Frame'])
        except KeyError:
            pass  # There was no frame data

        # Get the descriptive items
        try:
            w.frameType = int(f.attrs['FrameType'])
        except KeyError:
            if 'frameType' in kwargs:
                w.frameType = int(kwargs.pop('frameType'))
            else:
                warning = (
                    "\n`frameType` was not found in '{0}' or the keyword arguments.\n"
                    .format(file_name) +
                    "Using default value `{0}`.  You may want to set it manually.\n\n"
                    .format(FrameNames[1]))
                warnings.warn(warning)
                w.frameType = 1
        try:
            w.dataType = translate_data_types_GWFrames_to_waveforms(
                int(f.attrs['DataType']))
        except KeyError:
            if 'dataType' in kwargs:
                w.dataType = int(kwargs.pop('dataType'))
            else:
                found = False
                for type_int, type_name in zip(reversed(DataType),
                                               reversed(DataNames)):
                    if type_name.lower() in file_name.lower():
                        found = True
                        w.dataType = type_int
                        warning = (
                            "\n`dataType` was not found in '{0}' or the keyword arguments.\n"
                            .format(file_name) +
                            "Using default value `{0}`.  You may want to set it manually."
                            .format(type_name))
                        warnings.warn(warning)
                        break
                if not found:
                    warning = (
                        "\n`dataType` was not found in '{0}' or the keyword arguments.\n"
                        .format(file_name) +
                        "You may want to set it manually.")
                    warnings.warn(warning)
        try:
            w.r_is_scaled_out = bool(f.attrs['RIsScaledOut'])
        except KeyError:
            if 'r_is_scaled_out' in kwargs:
                w.r_is_scaled_out = bool(kwargs.pop('r_is_scaled_out'))
            else:
                warning = (
                    "\n`r_is_scaled_out` was not found in '{0}' or the keyword arguments.\n"
                    .format(file_name) +
                    "Using default value `True`.  You may want to set it manually.\n\n"
                )
                warnings.warn(warning)
                w.r_is_scaled_out = True
        try:
            w.m_is_scaled_out = bool(f.attrs['MIsScaledOut'])
        except KeyError:
            if 'm_is_scaled_out' in kwargs:
                w.m_is_scaled_out = bool(kwargs.pop('m_is_scaled_out'))
            else:
                warning = (
                    "\n`m_is_scaled_out` was not found in '{0}' or the keyword arguments.\n"
                    .format(file_name) +
                    "Using default value `True`.  You may want to set it manually.\n\n"
                )
                warnings.warn(warning)
                w.m_is_scaled_out = True

        # Get the names of all the data sets in the h5 file, and check for matches
        YLMdata = [
            data_set for data_set in list(f)
            for m in [pattern_Ylm.search(data_set)] if m
        ]
        if len(YLMdata) == 0:
            raise ValueError(
                "Couldn't understand data set names in '{0}'.\n".format(
                    file_name) +
                "Maybe you need to add the directory within the h5 file.\n" +
                "E.g.: '{0}/Extrapolated_N2.dir'.".format(file_name))

        # Sort the data set names by increasing ell, then increasing m
        YLMdata = sorted(YLMdata,
                         key=lambda data_set: [
                             int(pattern_Ylm.search(data_set).group('L')),
                             int(pattern_Ylm.search(data_set).group('M'))
                         ])
        LM = np.array(
            sorted([[int(m.group('L')), int(m.group('M'))]
                    for data_set in YLMdata
                    for m in [pattern_Ylm.search(data_set)] if m]))
        ell_min, ell_max = min(LM[:, 0]), max(LM[:, 0])
        if not np.array_equal(LM, sf.LM_range(ell_min, ell_max)):
            raise ValueError(
                "Input [ell,m] modes are not complete.  Found modes:\n{0}\n".
                format(LM))
        n_modes = len(LM)

        # Get the time data (assuming all are equal)
        T = f[YLMdata[0]][:, 0]
        monotonic = index_is_monotonic(T)
        w.t = T[monotonic]
        n_times = len(w.t)

        # Loop through, setting data in each mode
        w.data = np.empty((n_times, n_modes), dtype=complex)
        for m, DataSet in enumerate(YLMdata):
            if f[DataSet].shape[0] != n_times:
                raise ValueError(
                    "The number of time steps in this dataset should be {0}; ".
                    format(n_times) +
                    "it is {0} in '{1}'.".format(f[DataSet].shape[0], DataSet))
            w.data[:, m] = f[DataSet][:, 1:3].view(dtype=np.complex)[monotonic,
                                                                     0]

        # Now that the data is set, we can set these
        w.ells = ell_min, ell_max

        # Check up on the validity of the waveform
        if not w.ensure_validity(alter=True, assertions=False):
            raise ValueError("The data resulting from this input is invalid")

    except KeyError:
        print(
            "\nThis H5 file appears to have not stored all the required information.\n\n"
        )
        raise  # Re-raise the exception after adding our information

    finally:  # Use `finally` to make sure this happens:
        f_h5.close()

    if kwargs:
        import pprint
        warnings.warn("\nUnused kwargs passed to this function:\n{0}".format(
            pprint.pformat(kwargs, width=1)))

    return w
예제 #16
0
    def ensure_validity(self, alter=True, assertions=False):
        """Try to ensure that the `WaveformModes` object is valid

        See `WaveformBase.ensure_validity` for the basic tests.  This function also includes tests that `data` is
        complex, and consistent with the ell_min and ell_max values.

        """
        import numbers

        errors = []
        alterations = []

        if assertions:
            from .waveform_base import test_with_assertions

            test = test_with_assertions
        else:
            from .waveform_base import test_without_assertions

            test = test_without_assertions

        # We first need to check that the ell values make sense,
        # because we'll use these below
        test(
            errors,
            isinstance(self.__ell_min, numbers.Integral),
            "isinstance(self.__ell_min, numbers.Integral) # type(self.__ell_min)={}"
            .format(type(self.__ell_min)),
        )
        test(
            errors,
            isinstance(self.__ell_max, numbers.Integral),
            "isinstance(self.__ell_max, numbers.Integral) # type(self.__ell_max)={}"
            .format(type(self.__ell_max)),
        )
        test(errors, self.__ell_min >= 0,
             f"self.__ell_min>=0 # {self.__ell_min}")
        test(
            errors,
            self.__ell_max >= self.__ell_min - 1,
            "self.__ell_max>=self.__ell_min-1 # self.__ell_max={}; self.__ell_min-1={}"
            .format(self.__ell_max, self.__ell_min - 1),
        )
        if alter and not np.array_equal(
                self.__LM, sf.LM_range(self.ell_min, self.ell_max)):
            self.__LM = sf.LM_range(self.ell_min, self.ell_max)
            alterations += [
                "{}._{}__LM = sf.LM_range({}, {})".format(
                    self,
                    type(self).__name__, self.ell_min, self.ell_max)
            ]
        test(
            errors,
            np.array_equal(self.__LM, sf.LM_range(self.ell_min, self.ell_max)),
            "np.array_equal(self.__LM, sf.LM_range(self.ell_min, self.ell_max))",
        )

        test(
            errors,
            self.data.dtype == np.dtype(complex),
            f"self.data.dtype == np.dtype(complex) # self.data.dtype={self.data.dtype}",
        )
        test(errors, self.data.ndim >= 2,
             f"self.data.ndim >= 2 # self.data.ndim={self.data.ndim}")
        test(
            errors,
            self.data.shape[1] == self.__LM.shape[0],
            "self.data.shape[1]==self.__LM.shape[0] "
            "# self.data.shape={}; self.__LM.shape[0]={}".format(
                self.data.shape[1], self.__LM.shape[0]),
        )

        if alterations:
            self._append_history(alterations)
            print("The following alterations were made:\n\t" +
                  "\n\t".join(alterations))
        if errors:
            print(
                "The following conditions were found to be incorrectly False:\n\t"
                + "\n\t".join(errors))
            return False

        # Call the base class's version
        super().ensure_validity(alter, assertions)

        self.__history_depth__ -= 1
        self._append_history(
            "WaveformModes.ensure_validity" +
            f"({self}, alter={alter}, assertions={assertions})")

        return True
예제 #17
0
파일: __init__.py 프로젝트: 10220/scri
def read_from_h5(file_name, **kwargs):
    """Read data from an H5 file in SXS format

    Note that SXS format is essentially compatible with NRAR format.  The existence of this function is not to be
    taken as the author's endorsement of either SXS or NRAR format.

    Parameters
    ----------
    file_name : str
        Path to H5 file containing the data, optionally including the path within the file itself to the directory
        containing the data.  For example, the standard SXS data with N=2 might be obtained with the file name
        `'rhOverM_Asymptotic_GeometricUnits.h5/Extrapolated_N2.dir'`.

    Keyword parameters
    ------------------
    frameType : int, optional
    dataType : int, optional
    r_is_scaled_out : bool, optional
    m_is_scaled_out : bool, optional
        These four parameters are documented in the docstring of the WaveformBase object.  Note that if any of these
        is present in the H5 file (which is not common) that value will override this argument.  If neither the file
        nor these parameters are present, defaults will be applied, assuming that the frame is inertial, R and M are
        both scaled out, and the data type (hdot, h, psi4, psi4, psi2, psi1, or psi0) can be gleaned from `file_name`.

    """

    import os.path
    import re
    import h5py
    import quaternion

    # This unfortunate concoction is needed to determine the (ell,m) values of the various mode data sets
    pattern_Ylm = re.compile(r"""Y_l(?P<L>[0-9]+)_m(?P<M>[-+0-9]+)\.dat""")

    # Initialize an empty object to be filled with goodies
    w = WaveformModes(constructor_statement=f'scri.SpEC.read_from_h5("{file_name}", **{kwargs})')

    # Get an h5py handle to the desired part of the h5 file
    try:
        file_name, root_group = file_name.rsplit(".h5", 1)
        file_name += ".h5"
    except ValueError:
        root_group = ""  # FileName is just a file, not a group in a file
    try:
        f_h5 = h5py.File(file_name, "r")
    except OSError:
        print(f"\n`read_from_h5` could not open the file '{file_name}'\n\n")
        raise
    if root_group:
        f = f_h5[root_group]
    else:
        f = f_h5

    # If it exists, add the metadata file to `w` as an object.  So, for example, the initial spin on
    # object 1 can be accessed as `w.metadata.initial_spin1`.  See the documentation of
    # `sxs.metadata.Metadata` for more details.  And in IPython, tab completion works on the
    # `w.metadata` object.
    try:
        w.metadata = Metadata.from_file(
            os.path.join(os.path.dirname(file_name), "metadata"), ignore_invalid_lines=True, cache_json=False
        )
    except:
        pass  # Probably couldn't find the metadata.json/metadata.txt file

    try:  # Make sure the h5py.File gets closed, even in the event of an exception

        # Add the old history to the new history, if found
        try:
            try:
                old_history = f["History.txt"][()].decode()
            except AttributeError:
                old_history = f["History.txt"][()]
            w._append_history("", 0)
            w._append_history("<previous_history>", 2)
            w._append_history(old_history, 1)
            w._append_history("</previous_history>", 2)
        except KeyError:
            old_history = False

        # Get the frame data, converting to quaternion objects
        try:
            w.frame = quaternion.as_quat_array(f["Frame"])
        except KeyError:
            pass  # There was no frame data

        # Get the descriptive items
        try:
            w.frameType = int(f.attrs["FrameType"])
        except KeyError:
            if "frameType" in kwargs:
                w.frameType = int(kwargs.pop("frameType"))
            else:
                warning = (
                    f"\n`frameType` was not found in '{file_name}' or the keyword arguments.\n"
                    + "Using default value `{}`.  You may want to set it manually.\n\n".format(FrameNames[1])
                )
                warnings.warn(warning)
                w.frameType = 1
        try:
            w.dataType = translate_data_types_GWFrames_to_waveforms(int(f.attrs["DataType"]))
        except KeyError:
            if "dataType" in kwargs:
                w.dataType = int(kwargs.pop("dataType"))
            else:
                found = False
                for type_int, type_name in zip(reversed(DataType), reversed(DataNames)):
                    if type_name.lower() in file_name.lower():
                        found = True
                        w.dataType = type_int
                        warning = (
                            f"\n`dataType` was not found in '{file_name}' or the keyword arguments.\n"
                            + f"Using default value `{type_name}`.  You may want to set it manually."
                        )
                        warnings.warn(warning)
                        break
                if not found:
                    warning = (
                        f"\n`dataType` was not found in '{file_name}' or the keyword arguments.\n"
                        + "You may want to set it manually."
                    )
                    warnings.warn(warning)
        try:
            w.r_is_scaled_out = bool(f.attrs["RIsScaledOut"])
        except KeyError:
            if "r_is_scaled_out" in kwargs:
                w.r_is_scaled_out = bool(kwargs.pop("r_is_scaled_out"))
            else:
                warning = (
                    f"\n`r_is_scaled_out` was not found in '{file_name}' or the keyword arguments.\n"
                    + "Using default value `True`.  You may want to set it manually.\n\n"
                )
                warnings.warn(warning)
                w.r_is_scaled_out = True
        try:
            w.m_is_scaled_out = bool(f.attrs["MIsScaledOut"])
        except KeyError:
            if "m_is_scaled_out" in kwargs:
                w.m_is_scaled_out = bool(kwargs.pop("m_is_scaled_out"))
            else:
                warning = (
                    f"\n`m_is_scaled_out` was not found in '{file_name}' or the keyword arguments.\n"
                    + "Using default value `True`.  You may want to set it manually.\n\n"
                )
                warnings.warn(warning)
                w.m_is_scaled_out = True

        # Get the names of all the data sets in the h5 file, and check for matches
        YLMdata = [data_set for data_set in list(f) for m in [pattern_Ylm.search(data_set)] if m]
        if len(YLMdata) == 0:
            raise ValueError(
                f"Couldn't understand data set names in '{file_name}'.\n"
                + "Maybe you need to add the directory within the h5 file.\n"
                + f"E.g.: '{file_name}/Extrapolated_N2.dir'."
            )

        # Sort the data set names by increasing ell, then increasing m
        YLMdata = sorted(
            YLMdata,
            key=lambda data_set: [
                int(pattern_Ylm.search(data_set).group("L")),
                int(pattern_Ylm.search(data_set).group("M")),
            ],
        )
        LM = np.array(
            sorted(
                [
                    [int(m.group("L")), int(m.group("M"))]
                    for data_set in YLMdata
                    for m in [pattern_Ylm.search(data_set)]
                    if m
                ]
            )
        )
        ell_min, ell_max = min(LM[:, 0]), max(LM[:, 0])
        if not np.array_equal(LM, sf.LM_range(ell_min, ell_max)):
            raise ValueError(f"Input [ell,m] modes are not complete.  Found modes:\n{LM}\n")
        n_modes = len(LM)

        # Get the time data (assuming all are equal)
        T = f[YLMdata[0]][:, 0]
        monotonic = index_is_monotonic(T)
        w.t = T[monotonic]
        n_times = len(w.t)

        # Loop through, setting data in each mode
        w.data = np.empty((n_times, n_modes), dtype=complex)
        for m, DataSet in enumerate(YLMdata):
            if f[DataSet].shape[0] != n_times:
                raise ValueError(
                    f"The number of time steps in this dataset should be {n_times}; "
                    + "it is {} in '{}'.".format(f[DataSet].shape[0], DataSet)
                )
            w.data[:, m] = f[DataSet][:, 1:3].view(dtype=complex)[monotonic, 0]

        # Now that the data is set, we can set these
        w.ells = ell_min, ell_max

        # If possible, retrieve the CoM-correction parameters
        try:
            if hasattr(f, "attrs") and "space_translation" in f.attrs:
                w.space_translation = np.array(list(f.attrs["space_translation"]))
            elif old_history:
                pattern = r"'{}': array\((.*?)\)".format("space_translation")
                matches = re.search(pattern, old_history)
                if matches:
                    w.space_translation = np.array(ast.literal_eval(matches.group(1)))
        except:
            pass
        try:
            if hasattr(f, "attrs") and "boost_velocity" in f.attrs:
                w.boost_velocity = np.array(list(f.attrs["boost_velocity"]))
            elif old_history:
                pattern = r"'{}': array\((.*?)\)".format("boost_velocity")
                matches = re.search(pattern, old_history)
                if matches:
                    w.boost_velocity = np.array(ast.literal_eval(matches.group(1)))
        except:
            pass

        # If possible, retrieve the CoM-correction parameters
        try:
            if "VersionHist.ver" in f_h5:
                w.version_hist = [
                    [git_hash.decode("ascii"), message.decode("ascii")]
                    for git_hash, message in f_h5["VersionHist.ver"][()].tolist()
                ]
        except:
            pass

        # Check up on the validity of the waveform
        if not w.ensure_validity(alter=True, assertions=False):
            raise ValueError("The data resulting from this input is invalid")

    except KeyError:
        print("\nThis H5 file appears to have not stored all the required information.\n\n")
        raise  # Re-raise the exception after adding our information

    finally:  # Use `finally` to make sure this happens:
        f_h5.close()

    if kwargs:
        import pprint

        warnings.warn("\nUnused kwargs passed to this function:\n{}".format(pprint.pformat(kwargs, width=1)))

    return w
예제 #18
0
def single_mode_proportional_to_time_supertranslated(**kwargs):
    """Return WaveformModes as in single_mode_proportional_to_time, with analytical supertranslation

    This function constructs the same basic object as the `single_mode_proportional_to_time`, but then applies an
    analytical supertranslation.  The arguments to this function are the same as to the other, with two additions:

    Additional parameters
    ---------------------
    supertranslation : complex array, optional
        Spherical-harmonic modes of the supertranslation to apply to the waveform.  This is overwritten by
         `space_translation` if present.  Default value is `None`.
    space_translation : float array of length 3, optional
        This is just the 3-vector representing the displacement to apply to the waveform.  Note that if
        `supertranslation`, this parameter overwrites it.  Default value is [1.0, 0.0, 0.0].

    """
    s = kwargs.pop("s", -2)
    ell = kwargs.pop("ell", abs(s))
    m = kwargs.pop("m", -ell)
    ell_min = kwargs.pop("ell_min", abs(s))
    ell_max = kwargs.pop("ell_max", 8)
    data_type = kwargs.pop("data_type",
                           scri.DataType[scri.SpinWeights.index(s)])
    t_0 = kwargs.pop("t_0", -20.0)
    t_1 = kwargs.pop("t_1", 20.0)
    dt = kwargs.pop("dt", 1.0 / 10.0)
    t = np.arange(t_0, t_1 + dt, dt)
    n_times = t.size
    beta = kwargs.pop("beta", 1.0)
    data = np.zeros((n_times, sf.LM_total_size(ell_min, ell_max)),
                    dtype=complex)
    data[:, sf.LM_index(ell, m, ell_min)] = beta * t
    supertranslation = np.array(kwargs.pop("supertranslation",
                                           np.array([], dtype=complex)),
                                dtype=complex)
    if "space_translation" in kwargs:
        if supertranslation.size < 4:
            supertranslation.resize((4, ))
        supertranslation[1:4] = -sf.vector_as_ell_1_modes(
            kwargs.pop("space_translation"))
    supertranslation_ell_max = int(math.sqrt(supertranslation.size) - 1)
    if supertranslation_ell_max * (supertranslation_ell_max +
                                   2) + 1 != supertranslation.size:
        raise ValueError(
            f"Bad number of elements in supertranslation: {supertranslation.size}"
        )
    for i, (ellpp, mpp) in enumerate(sf.LM_range(0, supertranslation_ell_max)):
        if supertranslation[i] != 0.0:
            mp = m + mpp
            for ellp in range(ell_min, min(ell_max, (ell + ellpp)) + 1):
                if ellp >= abs(mp):
                    addition = (beta * supertranslation[i] * math.sqrt(
                        ((2 * ellpp + 1) * (2 * ell + 1) *
                         (2 * ellp + 1)) / (4 * math.pi)) *
                                sf.Wigner3j(ellpp, ell, ellp, 0, -s, s) *
                                sf.Wigner3j(ellpp, ell, ellp, mpp, m, -mp))
                    if (s + mp) % 2 == 1:
                        addition *= -1
                    data[:, sf.LM_index(ellp, mp, ell_min)] += addition

    if kwargs:
        import pprint

        warnings.warn(
            f"\nUnused kwargs passed to this function:\n{pprint.pformat(kwargs, width=1)}"
        )

    return scri.WaveformModes(
        t=t,
        data=data,
        ell_min=ell_min,
        ell_max=ell_max,
        frameType=scri.Inertial,
        dataType=data_type,
        r_is_scaled_out=True,
        m_is_scaled_out=True,
    )
예제 #19
0
 def LM(self):
     return spherical_functions.LM_range(self.ell_min, self.ell_max)