Ejemplo n.º 1
0
 def test_ChoiPreservesSelf(self):
     """
     Superoperator: to_choi(q) returns q if q is already Choi.
     """
     superop = rand_super()
     choi = to_choi(superop)
     assert_(choi is to_choi(choi))
Ejemplo n.º 2
0
 def test_ChoiPreservesSelf(self):
     """
     Superoperator: to_choi(q) returns q if q is already Choi.
     """
     superop = rand_super()
     choi = to_choi(superop)
     assert_(choi is to_choi(choi))
Ejemplo n.º 3
0
def test_dag_preserves_superrep():
    """
    Checks that dag() preserves superrep.
    """
    def case(qobj):
        orig_superrep = qobj.superrep
        assert_equal(qobj.dag().superrep, orig_superrep)

    for dim in (2, 4, 8):
        qobj = rand_super_bcsz(dim)
        yield case, to_super(qobj)
        # These two shouldn't even do anything, since qobj
        # is Hermicity-preserving.
        yield case, to_choi(qobj)
        yield case, to_chi(qobj)
Ejemplo n.º 4
0
 def test_choi_tr(self):
     """
     Superoperator: Checks that the trace of matrices returned by to_choi
     matches that asserted by the docstring for that function.
     """
     for dims in range(2, 5):
         assert_(abs(to_choi(identity(dims)).tr() - dims) <= tol)
Ejemplo n.º 5
0
        def case(qobj):
            qobj = to_chi(qobj)
            rt_qobj = to_chi(to_choi(qobj))

            assert_almost_equal(rt_qobj.data.toarray(), qobj.data.toarray())
            assert_equal(rt_qobj.type, qobj.type)
            assert_equal(rt_qobj.dims, qobj.dims)
Ejemplo n.º 6
0
        def case(qobj):
            qobj = to_chi(qobj)
            rt_qobj = to_chi(to_choi(qobj))

            assert_almost_equal(rt_qobj.data.toarray(), qobj.data.toarray())
            assert_equal(rt_qobj.type, qobj.type)
            assert_equal(rt_qobj.dims, qobj.dims)
Ejemplo n.º 7
0
 def test_choi_tr(self):
     """
     Superoperator: Checks that the trace of matrices returned by to_choi
     matches that asserted by the docstring for that function.
     """
     for dims in range(2, 5):
         assert_(abs(to_choi(identity(dims)).tr() - dims) <= tol)
Ejemplo n.º 8
0
def test_tensor_swap_other():
    dims = (2, 3, 4, 5, 7)

    for dim in dims:
        S = to_super(rand_super_bcsz(dim))

        # Swapping the inner indices on a superoperator should give a Choi matrix.
        J = to_choi(S)
        case_tensor_swap(S, [(1, 2)], [[[dim], [dim]], [[dim], [dim]]], J)
Ejemplo n.º 9
0
def test_tensor_swap_other():
    dims = (2, 3, 4, 5, 7)

    for dim in dims:
        S = to_super(rand_super_bcsz(dim))

        # Swapping the inner indices on a superoperator should give a Choi matrix.
        J = to_choi(S)
        case_tensor_swap(S, [(1, 2)], [[[dim], [dim]], [[dim], [dim]]], J)
Ejemplo n.º 10
0
 def test_dnorm_qubit_simple_known_cases(self):
     """
     Metrics: check agreement for known qubit channels.
     """
     id_chan = to_choi(qeye(2))
     X_chan = to_choi(sigmax())
     depol = to_choi(
         Qobj(diag(ones((4, ))),
              dims=[[[2], [2]], [[2], [2]]],
              superrep='chi'))
     # We need to restrict the number of iterations for things on the
     # boundary, such as perfectly distinguishable channels.
     assert 2 == pytest.approx(dnorm(id_chan, X_chan), abs=1e-7)
     assert 1.5 == pytest.approx(dnorm(id_chan, depol), abs=1e-7)
     # Finally, we add a known case from Johnston's QETLAB documentation,
     #   || Phi - I ||_♢,
     # where Phi(X) = UXU⁺ and U = [[1, 1], [-1, 1]] / sqrt(2).
     assert np.sqrt(2) == pytest.approx(
         dnorm(Qobj([[1, 1], [-1, 1]]) / np.sqrt(2), qeye(2)))
Ejemplo n.º 11
0
    def test_chi_choi_roundtrip(self, dimension):

        superop = rand_super_bcsz(dimension)
        superop = to_chi(superop)
        rt_superop = to_chi(to_choi(superop))
        dif = norm(rt_superop - superop)

        assert dif == pytest.approx(0, abs=1e-7)
        assert rt_superop.type == superop.type
        assert rt_superop.dims == superop.dims
Ejemplo n.º 12
0
    def test_SuperChoiSuper(self, superoperator):
        """
        Superoperator: Converting superoperator to Choi matrix and back.
        """

        choi_matrix = to_choi(superoperator)
        test_supe = to_super(choi_matrix)

        # Assert both that the result is close to expected, and has the right
        # type.
        assert (test_supe - superoperator).norm() < tol
        assert choi_matrix.type == "super" and choi_matrix.superrep == "choi"
        assert test_supe.type == "super" and test_supe.superrep == "super"
Ejemplo n.º 13
0
    def test_ChoiKrausChoi(self, superoperator):
        """
        Superoperator: Convert superoperator to Choi matrix and back.
        """
        choi_matrix = to_choi(superoperator)
        kraus_ops = to_kraus(choi_matrix)
        test_choi = kraus_to_choi(kraus_ops)

        # Assert both that the result is close to expected, and has the right
        # type.
        assert (test_choi - choi_matrix).norm() < tol
        assert choi_matrix.type == "super" and choi_matrix.superrep == "choi"
        assert test_choi.type == "super" and test_choi.superrep == "choi"
Ejemplo n.º 14
0
    def test_SuperChoiSuper(self):
        """
        Superoperator: Converting superoperator to Choi matrix and back.
        """
        superoperator = rand_super()

        choi_matrix = to_choi(superoperator)
        test_supe = to_super(choi_matrix)

        # Assert both that the result is close to expected, and has the right
        # type.
        assert_((test_supe - superoperator).norm() < tol)
        assert_(choi_matrix.type == "super" and choi_matrix.superrep == "choi")
        assert_(test_supe.type == "super" and test_supe.superrep == "super")
Ejemplo n.º 15
0
    def test_ChoiKrausChoi(self):
        """
        Superoperator: Convert superoperator to Choi matrix and back.
        """
        superoperator = rand_super()
        choi_matrix = to_choi(superoperator)
        kraus_ops = to_kraus(choi_matrix)
        test_choi = kraus_to_choi(kraus_ops)

        # Assert both that the result is close to expected, and has the right
        # type.
        assert_((test_choi - choi_matrix).norm() < tol)
        assert_(choi_matrix.type == "super" and choi_matrix.superrep == "choi")
        assert_(test_choi.type == "super" and test_choi.superrep == "choi")
Ejemplo n.º 16
0
 def test_ChoiKrausChoi(self):
     """
     Superoperator: Converting superoperator to Choi matrix and back.
     """
     superoperator = rand_super()
     choi_matrix = to_choi(superoperator)
     kraus_ops = to_kraus(choi_matrix)
     test_choi = kraus_to_choi(kraus_ops)
     
     # Assert both that the result is close to expected, and has the right
     # type.
     assert_((test_choi - choi_matrix).norm() < 1e-12)
     assert_(choi_matrix.type == "super" and choi_matrix.superrep == "choi")
     assert_(test_choi.type == "super" and test_choi.superrep == "choi")
Ejemplo n.º 17
0
def test_dag_preserves_superrep():
    """
    Checks that dag() preserves superrep.
    """

    def case(qobj):
        orig_superrep = qobj.superrep
        assert_equal(qobj.dag().superrep, orig_superrep)

    for dim in (2, 4, 8):
        qobj = rand_super_bcsz(dim)
        yield case, to_super(qobj)
        # These two shouldn't even do anything, since qobj
        # is Hermicity-preserving.
        yield case, to_choi(qobj)
        yield case, to_chi(qobj)
Ejemplo n.º 18
0
    def test_SuperChoiChiSuper(self):
        """
        Superoperator: Converting two-qubit superoperator through
        Choi and chi representations goes back to right superoperator.
        """
        superoperator = super_tensor(rand_super(2), rand_super(2))

        choi_matrix = to_choi(superoperator)
        chi_matrix = to_chi(choi_matrix)
        test_supe = to_super(chi_matrix)

        # Assert both that the result is close to expected, and has the right
        # type.
        assert_((test_supe - superoperator).norm() < tol)
        assert_(choi_matrix.type == "super" and choi_matrix.superrep == "choi")
        assert_(chi_matrix.type == "super" and chi_matrix.superrep == "chi")
        assert_(test_supe.type == "super" and test_supe.superrep == "super")
Ejemplo n.º 19
0
    def test_SuperChoiChiSuper(self):
        """
        Superoperator: Converting two-qubit superoperator through
        Choi and chi representations goes back to right superoperator.
        """
        superoperator = super_tensor(rand_super(2), rand_super(2))

        choi_matrix = to_choi(superoperator)
        chi_matrix = to_chi(choi_matrix)
        test_supe = to_super(chi_matrix)

        # Assert both that the result is close to expected, and has the right
        # type.
        assert_((test_supe - superoperator).norm() < tol)
        assert_(choi_matrix.type == "super" and choi_matrix.superrep == "choi")
        assert_(chi_matrix.type == "super" and chi_matrix.superrep == "chi")
        assert_(test_supe.type == "super" and test_supe.superrep == "super")
Ejemplo n.º 20
0
    def test_dnorm_qubit_known_cases(self, variable, target, matrix_creator):
        # Next, we'll generate some test cases based on comparisons to
        # pre-existing dnorm() implementations. In particular, the targets for
        # the following test cases were generated using QuantumUtils for MATLAB
        # (https://goo.gl/oWXhO9).
        id_chan = to_choi(qeye(2))
        if matrix_creator == 'overrotation':
            assert target == pytest.approx(dnorm(overrotation(variable),
                                                 id_chan),
                                           abs=1e-7)

        elif matrix_creator == 'had_mixture':
            assert target == pytest.approx(dnorm(had_mixture(variable),
                                                 id_chan),
                                           abs=1e-7)

        elif matrix_creator == 'swap_map':
            assert target == pytest.approx(dnorm(swap_map(variable), id_chan),
                                           abs=1e-7)
Ejemplo n.º 21
0
 def test_NonSquareKrausSuperChoi(self):
     """
     Superoperator: Convert non-square Kraus operator to Super + Choi matrix and back.
     """
     zero = asarray([[1], [0]], dtype=complex)
     one = asarray([[0], [1]], dtype=complex)
     zero_log = kron(kron(zero, zero), zero)
     one_log = kron(kron(one, one), one)
     # non-square Kraus operator (isometry)
     kraus = Qobj(zero_log @ zero.T + one_log @ one.T)
     super = sprepost(kraus, kraus.dag())
     choi = to_choi(super)
     op1 = to_kraus(super)
     op2 = to_kraus(choi)
     op3 = to_super(choi)
     assert_(choi.type == "super" and choi.superrep == "choi")
     assert_(super.type == "super" and super.superrep == "super")
     assert_((op1[0] - kraus).norm() < 1e-8)
     assert_((op2[0] - kraus).norm() < 1e-8)
     assert_((op3 - super).norm() < 1e-8)
Ejemplo n.º 22
0
 def test_choi_tr(self, dimension):
     """
     Superoperator: Trace returned by to_choi matches docstring.
     """
     assert abs(to_choi(identity(dimension)).tr() - dimension) <= tol
Ejemplo n.º 23
0
 def test_choi_tr(self):
     """
     Superoperator: Trace returned by to_choi matches docstring.
     """
     for dims in range(2, 5):
         assert_(abs(to_choi(identity(dims)).tr() - dims) <= tol)
Ejemplo n.º 24
0
def test_dnorm_qubit_known_cases():
    """
    Metrics: check agreement for known qubit channels.
    """
    def case(chan1, chan2, expected, significant=4):
        # We again take a generous tolerance so that we don't kill off
        # SCS solvers.
        assert_approx_equal(dnorm(chan1, chan2),
                            expected,
                            significant=significant)

    id_chan = to_choi(qeye(2))
    S_eye = to_super(id_chan)
    X_chan = to_choi(sigmax())
    depol = to_choi(
        Qobj(diag(ones((4, ))), dims=[[[2], [2]], [[2], [2]]], superrep='chi'))
    S_H = to_super(hadamard_transform())

    W = swap()

    # We need to restrict the number of iterations for things on the boundary,
    # such as perfectly distinguishable channels.
    yield case, id_chan, X_chan, 2
    yield case, id_chan, depol, 1.5

    # Next, we'll generate some test cases based on comparisons to pre-existing
    # dnorm() implementations. In particular, the targets for the following
    # test cases were generated using QuantumUtils for MATLAB (https://goo.gl/oWXhO9).

    def overrotation(x):
        return to_super((1j * np.pi * x * sigmax() / 2).expm())

    for x, target in {
            1.000000e-03: 3.141591e-03,
            3.100000e-03: 9.738899e-03,
            1.000000e-02: 3.141463e-02,
            3.100000e-02: 9.735089e-02,
            1.000000e-01: 3.128689e-01,
            3.100000e-01: 9.358596e-01
    }.items():
        yield case, overrotation(x), id_chan, target

    def had_mixture(x):
        return (1 - x) * S_eye + x * S_H

    for x, target in {
            1.000000e-03: 2.000000e-03,
            3.100000e-03: 6.200000e-03,
            1.000000e-02: 2.000000e-02,
            3.100000e-02: 6.200000e-02,
            1.000000e-01: 2.000000e-01,
            3.100000e-01: 6.200000e-01
    }.items():
        yield case, had_mixture(x), id_chan, target

    def swap_map(x):
        S = (1j * x * W).expm()
        S._type = None
        S.dims = [[[2], [2]], [[2], [2]]]
        S.superrep = 'super'
        return S

    for x, target in {
            1.000000e-03: 2.000000e-03,
            3.100000e-03: 6.199997e-03,
            1.000000e-02: 1.999992e-02,
            3.100000e-02: 6.199752e-02,
            1.000000e-01: 1.999162e-01,
            3.100000e-01: 6.173918e-01
    }.items():
        yield case, swap_map(x), id_chan, target

    # Finally, we add a known case from Johnston's QETLAB documentation,
    # || Phi - I ||,_♢ where Phi(X) = UXU⁺ and U = [[1, 1], [-1, 1]] / sqrt(2).
    yield case, Qobj([[1, 1], [-1, 1]]) / np.sqrt(2), qeye(2), np.sqrt(2)
Ejemplo n.º 25
0
def had_mixture(x):
    id_chan = to_choi(qeye(2))
    S_eye = to_super(id_chan)
    S_H = to_super(hadamard_transform())
    return (1 - x) * S_eye + x * S_H
Ejemplo n.º 26
0
 def test_ChoiPreservesSelf(self, superoperator):
     """
     Superoperator: to_choi(q) returns q if q is already Choi.
     """
     choi = to_choi(superoperator)
     assert choi is to_choi(choi)
Ejemplo n.º 27
0
def dnorm(A, B=None, solver="CVXOPT", verbose=False, force_solve=False):
    """
    Calculates the diamond norm of the quantum map q_oper, using
    the simplified semidefinite program of [Wat12]_.

    The diamond norm SDP is solved by using CVXPY_.

    Parameters
    ----------
    A : Qobj
        Quantum map to take the diamond norm of.
    B : Qobj or None
        If provided, the diamond norm of :math:`A - B` is
        taken instead.
    solver : str
        Solver to use with CVXPY. One of "CVXOPT" (default)
        or "SCS". The latter tends to be significantly faster,
        but somewhat less accurate.
    verbose : bool
        If True, prints additional information about the
        solution.
    force_solve : bool
        If True, forces dnorm to solve the associated SDP, even if a special
        case is known for the argument.

    Returns
    -------
    dn : float
        Diamond norm of q_oper.

    Raises
    ------
    ImportError
        If CVXPY cannot be imported.

    .. _cvxpy: http://www.cvxpy.org/en/latest/
    """
    if cvxpy is None:  # pragma: no cover
        raise ImportError("dnorm() requires CVXPY to be installed.")

    # We follow the strategy of using Watrous' simpler semidefinite
    # program in its primal form. This is the same strategy used,
    # for instance, by both pyGSTi and SchattenNorms.jl. (By contrast,
    # QETLAB uses the dual problem.)

    # Check if A and B are both unitaries. If so, then we can without
    # loss of generality choose B to be the identity by using the
    # unitary invariance of the diamond norm,
    #     || A - B ||_♢ = || A B⁺ - I ||_♢.
    # Then, using the technique mentioned by each of Johnston and
    # da Silva,
    #     || A B⁺ - I ||_♢ = max_{i, j} | \lambda_i(A B⁺) - \lambda_j(A B⁺) |,
    # where \lambda_i(U) is the ith eigenvalue of U.

    if (
        # There's a lot of conditions to check for this path.
        not force_solve and B is not None and
        # Only check if they aren't superoperators.
        A.type == "oper" and B.type == "oper" and
        # The difference of unitaries optimization is currently
        # only implemented for d == 2. Much of the code below is more general,
        # though, in anticipation of generalizing the optimization.
        A.shape[0] == 2
    ):
        # Make an identity the same size as A and B to
        # compare against.
        I = qeye(A.dims[0])
        # Compare to B first, so that an error is raised
        # as soon as possible.
        Bd = B.dag()
        if (
            (B * Bd - I).norm() < 1e-6 and
            (A * A.dag() - I).norm() < 1e-6
        ):
            # Now we are on the fast path, so let's compute the
            # eigenvalues, then find the diameter of the smallest circle
            # containing all of them.
            #
            # For now, this is only implemented for dim = 2, such that
            # generalizing here will allow for generalizing the optimization.
            # A reasonable approach would probably be to use Welzl's algorithm
            # (https://en.wikipedia.org/wiki/Smallest-circle_problem).
            U = A * B.dag()
            eigs = U.eigenenergies()
            eig_distances = np.abs(eigs[:, None] - eigs[None, :])
            return np.max(eig_distances)

    # Force the input superoperator to be a Choi matrix.
    J = to_choi(A)
    
    if B is not None:
        J -= to_choi(B)

    # Watrous 2012 also points out that the diamond norm of Lambda
    # is the same as the completely-bounded operator-norm (∞-norm)
    # of the dual map of Lambda. We can evaluate that norm much more
    # easily if Lambda is completely positive, since then the largest
    # eigenvalue is the same as the largest singular value.
    
    if not force_solve and J.iscp:
        S_dual = to_super(J.dual_chan())
        vec_eye = operator_to_vector(qeye(S_dual.dims[1][1]))
        op = vector_to_operator(S_dual * vec_eye)
        # The 2-norm was not implemented for sparse matrices as of the time
        # of this writing. Thus, we must yet again go dense.
        return la.norm(op.data.todense(), 2)
    
    # If we're still here, we need to actually solve the problem.

    # Assume square...
    dim = np.prod(J.dims[0][0])
    
    # The constraints only depend on the dimension, so
    # we can cache them efficiently.
    problem, Jr, Ji, X, rho0, rho1 = dnorm_problem(dim)
    
    # Load the parameters with the Choi matrix passed in.
    J_dat = J.data
    
    Jr.value = sp.csr_matrix((J_dat.data.real, J_dat.indices, J_dat.indptr), 
                             shape=J_dat.shape)
   
    Ji.value = sp.csr_matrix((J_dat.data.imag, J_dat.indices, J_dat.indptr),
                             shape=J_dat.shape)
    # Finally, set up and run the problem.
    problem.solve(solver=solver, verbose=verbose)
    
    return problem.value
Ejemplo n.º 28
0
def dnorm(A, B=None, solver="CVXOPT", verbose=False, force_solve=False,
          sparse=True):
    """
    Calculates the diamond norm of the quantum map q_oper, using
    the simplified semidefinite program of [Wat12]_.

    The diamond norm SDP is solved by using CVXPY_.

    Parameters
    ----------
    A : Qobj
        Quantum map to take the diamond norm of.
    B : Qobj or None
        If provided, the diamond norm of :math:`A - B` is taken instead.
    solver : str
        Solver to use with CVXPY. One of "CVXOPT" (default) or "SCS". The
        latter tends to be significantly faster, but somewhat less accurate.
    verbose : bool
        If True, prints additional information about the solution.
    force_solve : bool
        If True, forces dnorm to solve the associated SDP, even if a special
        case is known for the argument.
    sparse : bool
        Whether to use sparse matrices in the convex optimisation problem.
        Default True.

    Returns
    -------
    dn : float
        Diamond norm of q_oper.

    Raises
    ------
    ImportError
        If CVXPY cannot be imported.

    .. _cvxpy: http://www.cvxpy.org/en/latest/
    """
    if cvxpy is None:  # pragma: no cover
        raise ImportError("dnorm() requires CVXPY to be installed.")

    # We follow the strategy of using Watrous' simpler semidefinite
    # program in its primal form. This is the same strategy used,
    # for instance, by both pyGSTi and SchattenNorms.jl. (By contrast,
    # QETLAB uses the dual problem.)

    # Check if A and B are both unitaries. If so, then we can without
    # loss of generality choose B to be the identity by using the
    # unitary invariance of the diamond norm,
    #     || A - B ||_♢ = || A B⁺ - I ||_♢.
    # Then, using the technique mentioned by each of Johnston and
    # da Silva,
    #     || A B⁺ - I ||_♢ = max_{i, j} | \lambda_i(A B⁺) - \lambda_j(A B⁺) |,
    # where \lambda_i(U) is the ith eigenvalue of U.

    if (
        # There's a lot of conditions to check for this path.
        not force_solve and B is not None and
        # Only check if they aren't superoperators.
        A.type == "oper" and B.type == "oper" and
        # The difference of unitaries optimization is currently
        # only implemented for d == 2. Much of the code below is more general,
        # though, in anticipation of generalizing the optimization.
        A.shape[0] == 2
    ):
        # Make an identity the same size as A and B to
        # compare against.
        I = qeye(A.dims[0])
        # Compare to B first, so that an error is raised
        # as soon as possible.
        Bd = B.dag()
        if (
            (B * Bd - I).norm() < 1e-6 and
            (A * A.dag() - I).norm() < 1e-6
        ):
            # Now we are on the fast path, so let's compute the
            # eigenvalues, then find the diameter of the smallest circle
            # containing all of them.
            #
            # For now, this is only implemented for dim = 2, such that
            # generalizing here will allow for generalizing the optimization.
            # A reasonable approach would probably be to use Welzl's algorithm
            # (https://en.wikipedia.org/wiki/Smallest-circle_problem).
            U = A * B.dag()
            eigs = U.eigenenergies()
            eig_distances = np.abs(eigs[:, None] - eigs[None, :])
            return np.max(eig_distances)

    # Force the input superoperator to be a Choi matrix.
    J = to_choi(A)

    if B is not None:
        J -= to_choi(B)

    # Watrous 2012 also points out that the diamond norm of Lambda
    # is the same as the completely-bounded operator-norm (∞-norm)
    # of the dual map of Lambda. We can evaluate that norm much more
    # easily if Lambda is completely positive, since then the largest
    # eigenvalue is the same as the largest singular value.

    if not force_solve and J.iscp:
        S_dual = to_super(J.dual_chan())
        vec_eye = operator_to_vector(qeye(S_dual.dims[1][1]))
        op = vector_to_operator(S_dual * vec_eye)
        # The 2-norm was not implemented for sparse matrices as of the time
        # of this writing. Thus, we must yet again go dense.
        return la.norm(op.data.todense(), 2)

    # If we're still here, we need to actually solve the problem.

    # Assume square...
    dim = np.prod(J.dims[0][0])

    J_dat = J.data

    if not sparse:
        # The parameters and constraints only depend on the dimension, so
        # we can cache them efficiently.
        problem, Jr, Ji = dnorm_problem(dim)

        # Load the parameters with the Choi matrix passed in.
        Jr.value = sp.csr_matrix((J_dat.data.real, J_dat.indices,
                                  J_dat.indptr),
                                 shape=J_dat.shape).toarray()

        Ji.value = sp.csr_matrix((J_dat.data.imag, J_dat.indices,
                                  J_dat.indptr),
                                 shape=J_dat.shape).toarray()
    else:

        # The parameters do not depend solely on the dimension,
        # so we can not cache them efficiently.
        problem = dnorm_sparse_problem(dim, J_dat)

    problem.solve(solver=solver, verbose=verbose)

    return problem.value
Ejemplo n.º 29
0
 def test_choi_tr(self):
     """
     Superoperator: Trace returned by to_choi matches docstring.
     """
     for dims in range(2, 5):
         assert_(abs(to_choi(identity(dims)).tr() - dims) <= tol)
Ejemplo n.º 30
0
def test_dnorm_qubit_known_cases():
    """
    Metrics: check agreement for known qubit channels.
    """
    def case(chan1, chan2, expected, significant=4):
        # We again take a generous tolerance so that we don't kill off
        # SCS solvers.
        assert_approx_equal(
            dnorm(chan1, chan2), expected,
            significant=significant
        )

    id_chan = to_choi(qeye(2))
    S_eye = to_super(id_chan)
    X_chan = to_choi(sigmax())
    depol = to_choi(Qobj(
        diag(ones((4,))),
        dims=[[[2], [2]], [[2], [2]]], superrep='chi'
    ))
    S_H = to_super(hadamard_transform())

    W = swap()

    # We need to restrict the number of iterations for things on the boundary,
    # such as perfectly distinguishable channels.
    yield case, id_chan, X_chan, 2
    yield case, id_chan, depol, 1.5

    # Next, we'll generate some test cases based on comparisons to pre-existing
    # dnorm() implementations. In particular, the targets for the following
    # test cases were generated using QuantumUtils for MATLAB (https://goo.gl/oWXhO9).

    def overrotation(x):
        return to_super((1j * np.pi * x * sigmax() / 2).expm())

    for x, target in {
        1.000000e-03: 3.141591e-03,
        3.100000e-03: 9.738899e-03,
        1.000000e-02: 3.141463e-02,
        3.100000e-02: 9.735089e-02,
        1.000000e-01: 3.128689e-01,
        3.100000e-01: 9.358596e-01
    }.items():
        yield case, overrotation(x), id_chan, target

    def had_mixture(x):
        return (1 - x) * S_eye + x * S_H

    for x, target in {
        1.000000e-03: 2.000000e-03,
        3.100000e-03: 6.200000e-03,
        1.000000e-02: 2.000000e-02,
        3.100000e-02: 6.200000e-02,
        1.000000e-01: 2.000000e-01,
        3.100000e-01: 6.200000e-01
    }.items():
        yield case, had_mixture(x), id_chan, target

    def swap_map(x):
        S = (1j * x * W).expm()
        S._type = None
        S.dims = [[[2], [2]], [[2], [2]]]
        S.superrep = 'super'
        return S

    for x, target in {
        1.000000e-03: 2.000000e-03,
        3.100000e-03: 6.199997e-03,
        1.000000e-02: 1.999992e-02,
        3.100000e-02: 6.199752e-02,
        1.000000e-01: 1.999162e-01,
        3.100000e-01: 6.173918e-01
    }.items():
        yield case, swap_map(x), id_chan, target

    # Finally, we add a known case from Johnston's QETLAB documentation,
    # || Phi - I ||,_♢ where Phi(X) = UXU⁺ and U = [[1, 1], [-1, 1]] / sqrt(2).
    yield case, Qobj([[1, 1], [-1, 1]]) / np.sqrt(2), qeye(2), np.sqrt(2)