def test_evaluate_pseudorandom(self):
        """Test evaluate with pseudorandom inputs."""

        rng = np.random.default_rng(30493)
        num_terms = 3
        dim = 5
        b = 1.0  # bound on size of random terms

        # random hermitian frame operator
        rand_op = rng.uniform(low=-b, high=b, size=(dim, dim)) + 1j * rng.uniform(
            low=-b, high=b, size=(dim, dim)
        )
        frame_op = Array(rand_op + rand_op.conj().transpose())

        # random hermitian operators
        randoperators = rng.uniform(low=-b, high=b, size=(num_terms, dim, dim)) + 1j * rng.uniform(
            low=-b, high=b, size=(num_terms, dim, dim)
        )
        randoperators = Array(randoperators + randoperators.conj().transpose([0, 2, 1]))

        rand_coeffs = rng.uniform(low=-b, high=b, size=(num_terms)) + 1j * rng.uniform(
            low=-b, high=b, size=(num_terms)
        )
        rand_carriers = Array(rng.uniform(low=-b, high=b, size=(num_terms)))
        rand_phases = Array(rng.uniform(low=-b, high=b, size=(num_terms)))

        self._test_evaluate(frame_op, randoperators, rand_coeffs, rand_carriers, rand_phases)

        rng = np.random.default_rng(94818)
        num_terms = 5
        dim = 10
        b = 1.0  # bound on size of random terms

        # random hermitian frame operator
        rand_op = rng.uniform(low=-b, high=b, size=(dim, dim)) + 1j * rng.uniform(
            low=-b, high=b, size=(dim, dim)
        )
        frame_op = Array(rand_op + rand_op.conj().transpose())

        # random hermitian operators
        randoperators = rng.uniform(low=-b, high=b, size=(num_terms, dim, dim)) + 1j * rng.uniform(
            low=-b, high=b, size=(num_terms, dim, dim)
        )
        randoperators = Array(randoperators + randoperators.conj().transpose([0, 2, 1]))

        rand_coeffs = rng.uniform(low=-b, high=b, size=(num_terms)) + 1j * rng.uniform(
            low=-b, high=b, size=(num_terms)
        )
        rand_carriers = Array(rng.uniform(low=-b, high=b, size=(num_terms)))
        rand_phases = Array(rng.uniform(low=-b, high=b, size=(num_terms)))

        self._test_evaluate(frame_op, randoperators, rand_coeffs, rand_carriers, rand_phases)
示例#2
0
    def test_generator_into_frame(self):
        """Test operator_out_of_frame."""
        rng = np.random.default_rng(111)
        rand_op = Array(
            rng.uniform(low=-10, high=10, size=(6, 6)) +
            1j * rng.uniform(low=-10, high=10, size=(6, 6)))

        frame_op = rand_op - rand_op.conj().transpose()

        t = rng.uniform(low=-100, high=100)
        y0 = Array(
            rng.uniform(low=-10, high=10, size=(6, 6)) +
            1j * rng.uniform(low=-10, high=10, size=(6, 6)))

        self._test_generator_into_frame(t, frame_op, y0)
        self._test_generator_into_frame(t, frame_op, y0, y_in_frame_basis=True)
        self._test_generator_into_frame(t,
                                        frame_op,
                                        y0,
                                        return_in_frame_basis=True)
        self._test_generator_into_frame(t,
                                        frame_op,
                                        y0,
                                        y_in_frame_basis=True,
                                        return_in_frame_basis=True)
示例#3
0
def vec_dissipator(L: Array):
    r"""Linear algebraic vectorization of the linear map
    X -> L X L^\dagger - 0.5 * (L^\dagger L X + X L^\dagger L)
    in column stacking convention.

    This gives

    .. math::
        \overline{L} \otimes L - 0.5(id \otimes L^\dagger L +
            (L^\dagger L)^T \otimes id)

    Note: this function is also "vectorized" in the programming sense.
    """
    iden = Array(np.eye(L.shape[-1]))
    axes = list(range(L.ndim))

    axes[-1] = axes[-2]
    axes[-2] += 1
    Lconj = L.conj()
    LdagL = Lconj.transpose(axes) @ L
    LdagLtrans = LdagL.transpose(axes)

    return np.kron(Lconj, iden) @ np.kron(iden, L) - 0.5 * (
        np.kron(iden, LdagL) + np.kron(LdagLtrans, iden)
    )
示例#4
0
    def _evaluate_in_frame_basis_with_cutoffs(self, sig_vals: Array):
        """Evaluate the operator in the frame basis with frequency cutoffs.
        The computation here corresponds to that prescribed in
        `Frame.operators_into_frame_basis_with_cutoff`.

        Args:
            sig_vals: Signals evaluated at some time.

        Returns:
            Array: operator model evaluated for a given list of signal values
        """

        return 0.5 * (
            np.tensordot(sig_vals, self._ops_in_fb_w_cutoff, axes=1)
            + np.tensordot(sig_vals.conj(), self._ops_in_fb_w_conj_cutoff, axes=1)
        )
示例#5
0
def _is_herm_or_anti_herm(mat: Array,
                          atol: Optional[float] = 1e-10,
                          rtol: Optional[float] = 1e-10):
    r"""Given `mat`, the logic of this function is:
        - if `mat` is hermitian, return `-1j * mat`
        - if `mat` is anti-hermitian, return `mat`
        - otherwise:
            - if `mat.backend == 'jax'` return `jnp.inf * mat`
            - otherwise raise an error

    The main purpose of this function is to hide the pecularities of the
    implementing the above logic in a compileable way in `jax`.

    Args:
        mat: array to check
        atol: absolute tolerance
        rtol: relative tolerance

    Returns:
        Array: anti-hermitian version of `mat` if applicable

    Raises:
        ImportError: if backend is jax and jax is not installed.
        QiskitError: if `mat` is not Hermitian or anti-Hermitian
    """
    mat = to_array(mat)
    mat = Array(mat, dtype=complex)

    if mat.backend == "jax":

        from jax.lax import cond
        import jax.numpy as jnp

        mat = mat.data

        if mat.ndim == 1:
            # this function checks if pure imaginary. If yes it returns the
            # array, otherwise it multiplies it by jnp.nan to raise an error
            # Note: pathways in conditionals in jax cannot raise Exceptions
            def anti_herm_conditional(b):
                aherm_pred = jnp.allclose(b, -b.conj(), atol=atol, rtol=rtol)
                return cond(aherm_pred, lambda A: A, lambda A: jnp.nan * A, b)

            # Check if it is purely real, if not apply anti_herm_conditional
            herm_pred = jnp.allclose(mat, mat.conj(), atol=atol, rtol=rtol)
            return Array(
                cond(herm_pred, lambda A: -1j * A, anti_herm_conditional, mat))
        else:
            # this function checks if anti-hermitian, if yes returns the array,
            # otherwise it multiplies it by jnp.nan
            def anti_herm_conditional(b):
                aherm_pred = jnp.allclose(b,
                                          -b.conj().transpose(),
                                          atol=atol,
                                          rtol=rtol)
                return cond(aherm_pred, lambda A: A, lambda A: jnp.nan * A, b)

            # the following lines check if a is hermitian, otherwise it feeds
            # it into the anti_herm_conditional
            herm_pred = jnp.allclose(mat,
                                     mat.conj().transpose(),
                                     atol=atol,
                                     rtol=rtol)
            return Array(
                cond(herm_pred, lambda A: -1j * A, anti_herm_conditional, mat))

    else:
        if mat.ndim == 1:
            if np.allclose(mat, mat.conj(), atol=atol, rtol=rtol):
                return -1j * mat
            elif np.allclose(mat, -mat.conj(), atol=atol, rtol=rtol):
                return mat
        else:
            if is_hermitian_matrix(mat, rtol=rtol, atol=atol):
                return -1j * mat
            elif is_hermitian_matrix(1j * mat, rtol=rtol, atol=atol):
                return mat

        # raise error if execution has made it this far
        raise QiskitError("""frame_operator must be either a Hermitian or
                           anti-Hermitian matrix.""")
示例#6
0
def anti_herm_part(mat: Array) -> Array:
    """Get the anti-hermitian part of an operator."""
    if mat is None:
        return None

    return 0.5 * (mat - mat.conj().transpose())