Ejemplo n.º 1
0
def test_dual_channel(sub_dimensions, n_trials=50):
    """
    Qobj: dual_chan() preserves inner products with arbitrary density ops.
    """
    S = rand_super_bcsz(np.prod(sub_dimensions))
    S.dims = [[sub_dimensions, sub_dimensions],
              [sub_dimensions, sub_dimensions]]
    S = to_super(S)
    left_dims, right_dims = S.dims

    # Assume for the purposes of the test that S maps square operators to
    # square operators.
    in_dim = np.prod(right_dims[0])
    out_dim = np.prod(left_dims[0])

    S_dual = to_super(S.dual_chan())

    primals = []
    duals = []

    for _ in [None] * n_trials:
        X = rand_dm_ginibre(out_dim)
        X.dims = left_dims
        X = operator_to_vector(X)
        Y = rand_dm_ginibre(in_dim)
        Y.dims = right_dims
        Y = operator_to_vector(Y)

        primals.append((X.dag() * S * Y)[0, 0])
        duals.append((X.dag() * S_dual.dag() * Y)[0, 0])

    np.testing.assert_array_almost_equal(primals, duals)
Ejemplo n.º 2
0
    def case(S, n_trials=50):
        S = to_super(S)
        left_dims, right_dims = S.dims

        # Assume for the purposes of the test that S maps square operators to square operators.
        in_dim = np.prod(right_dims[0])
        out_dim = np.prod(left_dims[0])

        S_dual = to_super(S.dual_chan())

        primals = []
        duals = []

        for idx_trial in range(n_trials):
            X = rand_dm_ginibre(out_dim)
            X.dims = left_dims
            X = operator_to_vector(X)
            Y = rand_dm_ginibre(in_dim)
            Y.dims = right_dims
            Y = operator_to_vector(Y)

            primals.append((X.dag() * S * Y)[0, 0])
            duals.append((X.dag() * S_dual.dag() * Y)[0, 0])

        np.testing.assert_array_almost_equal(primals, duals)
Ejemplo n.º 3
0
    def case(S, n_trials=50):        
        S = to_super(S)
        left_dims, right_dims = S.dims
        
        # Assume for the purposes of the test that S maps square operators to square operators.
        in_dim = np.prod(right_dims[0])
        out_dim = np.prod(left_dims[0])
        
        S_dual = to_super(S.dual_chan())
        
        primals = []
        duals = []
    
        for idx_trial in range(n_trials):
            X = rand_dm_ginibre(out_dim)
            X.dims = left_dims
            X = operator_to_vector(X)
            Y = rand_dm_ginibre(in_dim)
            Y.dims = right_dims
            Y = operator_to_vector(Y)

            primals.append((X.dag() * S * Y)[0, 0])
            duals.append((X.dag() * S_dual.dag() * Y)[0, 0])
    
        np.testing.assert_array_almost_equal(primals, duals)
Ejemplo n.º 4
0
 def test_stinespring_dims(self, dimension):
     """
     Stinespring: Check that dims of channels are preserved.
     """
     chan = super_tensor(to_super(sigmax()), to_super(qeye(dimension)))
     A, B = to_stinespring(chan)
     assert A.dims == [[2, dimension, 1], [2, dimension]]
     assert B.dims == [[2, dimension, 1], [2, dimension]]
Ejemplo n.º 5
0
def test_QobjPermute():
    "Qobj permute"
    A = basis(3, 0)
    B = basis(5, 4)
    C = basis(4, 2)
    psi = tensor(A, B, C)
    psi2 = psi.permute([2, 0, 1])
    assert psi2 == tensor(C, A, B)

    psi_bra = psi.dag()
    psi2_bra = psi_bra.permute([2, 0, 1])
    assert psi2_bra == tensor(C, A, B).dag()

    A = fock_dm(3, 0)
    B = fock_dm(5, 4)
    C = fock_dm(4, 2)
    rho = tensor(A, B, C)
    rho2 = rho.permute([2, 0, 1])
    assert rho2 == tensor(C, A, B)

    for _ in range(3):
        A = rand_ket(3)
        B = rand_ket(4)
        C = rand_ket(5)
        psi = tensor(A, B, C)
        psi2 = psi.permute([1, 0, 2])
        assert psi2 == tensor(B, A, C)

        psi_bra = psi.dag()
        psi2_bra = psi_bra.permute([1, 0, 2])
        assert psi2_bra == tensor(B, A, C).dag()

    for _ in range(3):
        A = rand_dm(3)
        B = rand_dm(4)
        C = rand_dm(5)
        rho = tensor(A, B, C)
        rho2 = rho.permute([1, 0, 2])
        assert rho2 == tensor(B, A, C)

        rho_vec = operator_to_vector(rho)
        rho2_vec = rho_vec.permute([[1, 0, 2], [4, 3, 5]])
        assert rho2_vec == operator_to_vector(tensor(B, A, C))

        rho_vec_bra = operator_to_vector(rho).dag()
        rho2_vec_bra = rho_vec_bra.permute([[1, 0, 2], [4, 3, 5]])
        assert rho2_vec_bra == operator_to_vector(tensor(B, A, C)).dag()

    for _ in range(3):
        super_dims = [3, 5, 4]
        U = rand_unitary(np.prod(super_dims),
                         density=0.02,
                         dims=[super_dims, super_dims])
        Unew = U.permute([2, 1, 0])
        S_tens = to_super(U)
        S_tens_new = to_super(Unew)
        assert S_tens_new == S_tens.permute([[2, 1, 0], [5, 4, 3]])
Ejemplo n.º 6
0
 def test_stinespring_dims(self):
     """
     Stinespring: Check that dims of channels are preserved.
     """
     # FIXME: not the most general test, since this assumes a map
     #        from square matrices to square matrices on the same space.
     chan = super_tensor(to_super(sigmax()), to_super(qeye(3)))
     A, B = to_stinespring(chan)
     assert_equal(A.dims, [[2, 3, 1], [2, 3]])
     assert_equal(B.dims, [[2, 3, 1], [2, 3]])
Ejemplo n.º 7
0
 def test_stinespring_dims(self):
     """
     Stinespring: Check that dims of channels are preserved.
     """
     # FIXME: not the most general test, since this assumes a map
     #        from square matrices to square matrices on the same space.
     chan = super_tensor(to_super(sigmax()), to_super(qeye(3)))
     A, B = to_stinespring(chan)
     assert_equal(A.dims, [[2, 3, 1], [2, 3]])
     assert_equal(B.dims, [[2, 3, 1], [2, 3]])
Ejemplo n.º 8
0
def test_QobjPermute():
    "Qobj permute"
    A = basis(3, 0)
    B = basis(5, 4)
    C = basis(4, 2)
    psi = tensor(A, B, C)
    psi2 = psi.permute([2, 0, 1])
    assert_(psi2 == tensor(C, A, B))
    
    psi_bra = psi.dag()
    psi2_bra = psi_bra.permute([2, 0, 1])
    assert_(psi2_bra == tensor(C, A, B).dag())

    A = fock_dm(3, 0)
    B = fock_dm(5, 4)
    C = fock_dm(4, 2)
    rho = tensor(A, B, C)
    rho2 = rho.permute([2, 0, 1])
    assert_(rho2 == tensor(C, A, B))

    for ii in range(3):
        A = rand_ket(3)
        B = rand_ket(4)
        C = rand_ket(5)
        psi = tensor(A, B, C)
        psi2 = psi.permute([1, 0, 2])
        assert_(psi2 == tensor(B, A, C))
        
        psi_bra = psi.dag()
        psi2_bra = psi_bra.permute([1, 0, 2])
        assert_(psi2_bra == tensor(B, A, C).dag())

    for ii in range(3):
        A = rand_dm(3)
        B = rand_dm(4)
        C = rand_dm(5)
        rho = tensor(A, B, C)
        rho2 = rho.permute([1, 0, 2])
        assert_(rho2 == tensor(B, A, C))
        
        rho_vec = operator_to_vector(rho)
        rho2_vec = rho_vec.permute([[1, 0, 2],[4,3,5]])
        assert_(rho2_vec == operator_to_vector(tensor(B, A, C)))
        
        rho_vec_bra = operator_to_vector(rho).dag()
        rho2_vec_bra = rho_vec_bra.permute([[1, 0, 2],[4,3,5]])
        assert_(rho2_vec_bra == operator_to_vector(tensor(B, A, C)).dag())
        
    for ii in range(3):
        super_dims = [3, 5, 4]
        U = rand_unitary(np.prod(super_dims), density=0.02, dims=[super_dims, super_dims])
        Unew = U.permute([2,1,0])
        S_tens = to_super(U)
        S_tens_new = to_super(Unew)
        assert_(S_tens_new == S_tens.permute([[2,1,0],[5,4,3]]))
Ejemplo n.º 9
0
def test_unitarity_known():
    """
    Metrics: Unitarity for known cases.
    """
    def case(q_oper, known_unitarity):
        assert_almost_equal(unitarity(q_oper), known_unitarity)

    yield case, to_super(sigmax()), 1.0
    yield case, sum(map(
        to_super,
        [qeye(2), sigmax(), sigmay(), sigmaz()])) / 4, 0.0
    yield case, sum(map(to_super, [qeye(2), sigmax()])) / 2, 1 / 3.0
Ejemplo n.º 10
0
def test_super_tensor_property():
    """
    Tensor: Super_tensor correctly tensors on underlying spaces.
    """
    U1 = rand_unitary(3)
    U2 = rand_unitary(5)

    U = tensor(U1, U2)
    S_tens = to_super(U)

    S_supertens = super_tensor(to_super(U1), to_super(U2))

    assert_(S_tens == S_supertens)
    assert_equal(S_supertens.superrep, 'super')
Ejemplo n.º 11
0
def test_super_tensor_property():
    """
    Tensor: Super_tensor correctly tensors on underlying spaces.
    """
    U1 = rand_unitary(3)
    U2 = rand_unitary(5)

    U = tensor(U1, U2)
    S_tens = to_super(U)

    S_supertens = super_tensor(to_super(U1), to_super(U2))

    assert_(S_tens == S_supertens)
    assert_equal(S_supertens.superrep, 'super')
Ejemplo n.º 12
0
def test_dag_preserves_superrep():
    """
    Checks that dag() preserves superrep.
    """
    def case(qobj):
        orig_superrep = qobj.superrep
        assert_equal(qobj.dag().superrep, orig_superrep)

    for dim in (2, 4, 8):
        qobj = rand_super_bcsz(dim)
        yield case, to_super(qobj)
        # These two shouldn't even do anything, since qobj
        # is Hermicity-preserving.
        yield case, to_choi(qobj)
        yield case, to_chi(qobj)
Ejemplo n.º 13
0
 def test_SuperPreservesSelf(self):
     """
     Superoperator: to_super(q) returns q if q is already a
     supermatrix.
     """
     superop = rand_super()
     assert_(superop is to_super(superop))
Ejemplo n.º 14
0
 def test_SuperPreservesSelf(self):
     """
     Superoperator: to_super(q) returns q if q is already a
     supermatrix.
     """
     superop = rand_super()
     assert_(superop is to_super(superop))
Ejemplo n.º 15
0
def test_composite_oper():
    """
    Composite: Tests compositing unitaries and superoperators.
    """
    U1 = rand_unitary(3)
    U2 = rand_unitary(5)
    S1 = to_super(U1)
    S2 = to_super(U2)

    S3 = rand_super(4)
    S4 = rand_super(7)

    assert_(composite(U1, U2) == tensor(U1, U2))
    assert_(composite(S3, S4) == super_tensor(S3, S4))
    assert_(composite(U1, S4) == super_tensor(S1, S4))
    assert_(composite(S3, U2) == super_tensor(S3, S2))
Ejemplo n.º 16
0
    def test_SuperPreservesSelf(self, superoperator):
        """
        Superoperator: to_super(q) returns q if q is already a
        supermatrix.
        """

        assert superoperator is to_super(superoperator)
Ejemplo n.º 17
0
def test_composite_oper():
    """
    Composite: Tests compositing unitaries and superoperators.
    """
    U1 = rand_unitary(3)
    U2 = rand_unitary(5)
    S1 = to_super(U1)
    S2 = to_super(U2)

    S3 = rand_super(4)
    S4 = rand_super(7)

    assert_(composite(U1, U2) == tensor(U1, U2))
    assert_(composite(S3, S4) == super_tensor(S3, S4))
    assert_(composite(U1, S4) == super_tensor(S1, S4))
    assert_(composite(S3, U2) == super_tensor(S3, S2))
Ejemplo n.º 18
0
def test_CheckMulType():
    "Qobj multiplication type"

    # ket-bra and bra-ket multiplication
    psi = basis(5)
    dm = psi * psi.dag()
    assert_(dm.isoper)
    assert_(dm.isherm)

    nrm = psi.dag() * psi
    assert_equal(np.prod(nrm.shape), 1)
    assert_((abs(nrm) == 1)[0, 0])

    # operator-operator multiplication
    H1 = rand_herm(3)
    H2 = rand_herm(3)
    out = H1 * H2
    assert_(out.isoper)
    out = H1 * H1
    assert_(out.isoper)
    assert_(out.isherm)
    out = H2 * H2
    assert_(out.isoper)
    assert_(out.isherm)

    U = rand_unitary(5)
    out = U.dag() * U
    assert_(out.isoper)
    assert_(out.isherm)

    N = num(5)

    out = N * N
    assert_(out.isoper)
    assert_(out.isherm)

    # operator-ket and bra-operator multiplication
    op = sigmax()
    ket1 = basis(2)
    ket2 = op * ket1
    assert_(ket2.isket)

    bra1 = basis(2).dag()
    bra2 = bra1 * op
    assert_(bra2.isbra)

    assert_(bra2.dag() == ket2)

    # superoperator-operket and operbra-superoperator multiplication
    sop = to_super(sigmax())
    opket1 = operator_to_vector(fock_dm(2))
    opket2 = sop * opket1
    assert(opket2.isoperket)

    opbra1 = operator_to_vector(fock_dm(2)).dag()
    opbra2 = opbra1 * sop
    assert(opbra2.isoperbra)

    assert_(opbra2.dag() == opket2)
Ejemplo n.º 19
0
def test_CheckMulType():
    "Qobj multiplication type"

    # ket-bra and bra-ket multiplication
    psi = basis(5)
    dm = psi * psi.dag()
    assert_(dm.isoper)
    assert_(dm.isherm)

    nrm = psi.dag() * psi
    assert_equal(np.prod(nrm.shape), 1)
    assert_((abs(nrm) == 1)[0, 0])

    # operator-operator multiplication
    H1 = rand_herm(3)
    H2 = rand_herm(3)
    out = H1 * H2
    assert_(out.isoper)
    out = H1 * H1
    assert_(out.isoper)
    assert_(out.isherm)
    out = H2 * H2
    assert_(out.isoper)
    assert_(out.isherm)

    U = rand_unitary(5)
    out = U.dag() * U
    assert_(out.isoper)
    assert_(out.isherm)

    N = num(5)

    out = N * N
    assert_(out.isoper)
    assert_(out.isherm)

    # operator-ket and bra-operator multiplication
    op = sigmax()
    ket1 = basis(2)
    ket2 = op * ket1
    assert_(ket2.isket)

    bra1 = basis(2).dag()
    bra2 = bra1 * op
    assert_(bra2.isbra)

    assert_(bra2.dag() == ket2)

    # superoperator-operket and operbra-superoperator multiplication
    sop = to_super(sigmax())
    opket1 = operator_to_vector(fock_dm(2))
    opket2 = sop * opket1
    assert(opket2.isoperket)

    opbra1 = operator_to_vector(fock_dm(2)).dag()
    opbra2 = opbra1 * sop
    assert(opbra2.isoperbra)

    assert_(opbra2.dag() == opket2)
Ejemplo n.º 20
0
def test_average_gate_fidelity_target():
    """
    Metrics: Tests that for random unitaries U, AGF(U, U) = 1.
    """
    for _ in range(10):
        U = rand_unitary_haar(13)
        SU = to_super(U)
        assert_almost_equal(average_gate_fidelity(SU, target=U), 1)
Ejemplo n.º 21
0
def test_average_gate_fidelity_target():
    """
    Metrics: Tests that for random unitaries U, AGF(U, U) = 1.
    """
    for _ in range(10):
        U = rand_unitary_haar(13)
        SU = to_super(U)
        assert_almost_equal(average_gate_fidelity(SU, target=U), 1)
Ejemplo n.º 22
0
def test_CheckMulType():
    "Qobj multiplication type"
    # ket-bra and bra-ket multiplication
    psi = basis(5)
    dm = psi * psi.dag()
    assert dm.isoper
    assert dm.isherm

    nrm = psi.dag() * psi
    assert np.prod(nrm.shape) == 1
    assert abs(nrm)[0, 0] == 1

    # operator-operator multiplication
    H1 = rand_herm(3)
    H2 = rand_herm(3)
    out = H1 * H2
    assert out.isoper
    out = H1 * H1
    assert out.isoper
    assert out.isherm
    out = H2 * H2
    assert out.isoper
    assert out.isherm

    U = rand_unitary(5)
    out = U.dag() * U
    assert out.isoper
    assert out.isherm

    N = num(5)

    out = N * N
    assert out.isoper
    assert out.isherm

    # operator-ket and bra-operator multiplication
    op = sigmax()
    ket1 = basis(2)
    ket2 = op * ket1
    assert ket2.isket

    bra1 = basis(2).dag()
    bra2 = bra1 * op
    assert bra2.isbra

    assert bra2.dag() == ket2

    # superoperator-operket and operbra-superoperator multiplication
    sop = to_super(sigmax())
    opket1 = operator_to_vector(fock_dm(2))
    opket2 = sop * opket1
    assert opket2.isoperket

    opbra1 = operator_to_vector(fock_dm(2)).dag()
    opbra2 = opbra1 * sop
    assert opbra2.isoperbra

    assert opbra2.dag() == opket2
Ejemplo n.º 23
0
def test_tensor_swap_other():
    dims = (2, 3, 4, 5, 7)

    for dim in dims:
        S = to_super(rand_super_bcsz(dim))

        # Swapping the inner indices on a superoperator should give a Choi matrix.
        J = to_choi(S)
        case_tensor_swap(S, [(1, 2)], [[[dim], [dim]], [[dim], [dim]]], J)
Ejemplo n.º 24
0
def test_tensor_swap_other():
    dims = (2, 3, 4, 5, 7)

    for dim in dims:
        S = to_super(rand_super_bcsz(dim))

        # Swapping the inner indices on a superoperator should give a Choi matrix.
        J = to_choi(S)
        case_tensor_swap(S, [(1, 2)], [[[dim], [dim]], [[dim], [dim]]], J)
Ejemplo n.º 25
0
        def case(map, state):
            S = to_super(map)
            A, B = to_stinespring(map)

            q1 = vector_to_operator(S * operator_to_vector(state))
            # FIXME: problem if Kraus index is implicitly
            #        ptraced!
            q2 = (A * state * B.dag()).ptrace((0, ))

            assert_((q1 - q2).norm('tr') <= thresh)
Ejemplo n.º 26
0
def test_tensor_contract_ident():
    qobj = identity([2, 3, 4])
    ans = 3 * identity([2, 4])

    assert_(ans == tensor_contract(qobj, (1, 4)))

    # Now try for superoperators.
    # For now, we just ensure the dims are correct.
    sqobj = to_super(qobj)
    correct_dims = [[[2, 4], [2, 4]], [[2, 4], [2, 4]]]
    assert_equal(correct_dims, tensor_contract(sqobj, (1, 4), (7, 10)).dims)
Ejemplo n.º 27
0
        def case(map, state):
            S = to_super(map)
            A, B = to_stinespring(map)

            q1 = vector_to_operator(
                S * operator_to_vector(state)
            )
            # FIXME: problem if Kraus index is implicitly
            #        ptraced!
            q2 = (A * state * B.dag()).ptrace((0,))

            assert_((q1 - q2).norm('tr') <= thresh)
Ejemplo n.º 28
0
    def test_SuperChoiSuper(self, superoperator):
        """
        Superoperator: Converting superoperator to Choi matrix and back.
        """

        choi_matrix = to_choi(superoperator)
        test_supe = to_super(choi_matrix)

        # Assert both that the result is close to expected, and has the right
        # type.
        assert (test_supe - superoperator).norm() < tol
        assert choi_matrix.type == "super" and choi_matrix.superrep == "choi"
        assert test_supe.type == "super" and test_supe.superrep == "super"
Ejemplo n.º 29
0
def test_unitarity_known():
    """
    Metrics: Unitarity for known cases.
    """
    def case(q_oper, known_unitarity):
        assert_almost_equal(unitarity(q_oper), known_unitarity)

    yield case, to_super(sigmax()), 1.0
    yield case, sum(map(
        to_super, [qeye(2), sigmax(), sigmay(), sigmaz()]
    )) / 4, 0.0
    yield case, sum(map(
        to_super, [qeye(2), sigmax()]
    )) / 2, 1 / 3.0
Ejemplo n.º 30
0
    def test_SuperChoiSuper(self):
        """
        Superoperator: Converting superoperator to Choi matrix and back.
        """
        superoperator = rand_super()

        choi_matrix = to_choi(superoperator)
        test_supe = to_super(choi_matrix)

        # Assert both that the result is close to expected, and has the right
        # type.
        assert_((test_supe - superoperator).norm() < tol)
        assert_(choi_matrix.type == "super" and choi_matrix.superrep == "choi")
        assert_(test_supe.type == "super" and test_supe.superrep == "super")
Ejemplo n.º 31
0
    def test_chi_known(self):
        """
        Superoperator: Chi-matrix for known cases is correct.
        """
        def case(S, chi_expected, silent=True):
            chi_actual = to_chi(S)
            chiq = Qobj(chi_expected,
                        dims=[[[2], [2]], [[2], [2]]],
                        superrep='chi')
            if not silent:
                print(chi_actual)
                print(chi_expected)
            assert_almost_equal((chi_actual - chiq).norm('tr'), 0)

        yield case, sigmax(), [[0, 0, 0, 0], [0, 4, 0, 0], [0, 0, 0, 0],
                               [0, 0, 0, 0]]
        yield case, to_super(sigmax()), [[0, 0, 0, 0], [0, 4, 0, 0],
                                         [0, 0, 0, 0], [0, 0, 0, 0]]
        yield case, qeye(2), [[4, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0],
                              [0, 0, 0, 0]]
        yield case, (-1j * sigmax() * pi / 4).expm(), [[2, 2j, 0, 0],
                                                       [-2j, 2, 0, 0],
                                                       [0, 0, 0, 0],
                                                       [0, 0, 0, 0]]
Ejemplo n.º 32
0
def test_dag_preserves_superrep():
    """
    Checks that dag() preserves superrep.
    """

    def case(qobj):
        orig_superrep = qobj.superrep
        assert_equal(qobj.dag().superrep, orig_superrep)

    for dim in (2, 4, 8):
        qobj = rand_super_bcsz(dim)
        yield case, to_super(qobj)
        # These two shouldn't even do anything, since qobj
        # is Hermicity-preserving.
        yield case, to_choi(qobj)
        yield case, to_chi(qobj)
Ejemplo n.º 33
0
    def test_SuperChoiChiSuper(self):
        """
        Superoperator: Converting two-qubit superoperator through
        Choi and chi representations goes back to right superoperator.
        """
        superoperator = super_tensor(rand_super(2), rand_super(2))

        choi_matrix = to_choi(superoperator)
        chi_matrix = to_chi(choi_matrix)
        test_supe = to_super(chi_matrix)

        # Assert both that the result is close to expected, and has the right
        # type.
        assert_((test_supe - superoperator).norm() < tol)
        assert_(choi_matrix.type == "super" and choi_matrix.superrep == "choi")
        assert_(chi_matrix.type == "super" and chi_matrix.superrep == "chi")
        assert_(test_supe.type == "super" and test_supe.superrep == "super")
Ejemplo n.º 34
0
    def test_stinespring_agrees(self, dimension):
        """
        Stinespring: Partial Tr over pair agrees w/ supermatrix.
        """

        map = rand_super_bcsz(dimension)
        state = rand_dm_ginibre(dimension)

        S = to_super(map)
        A, B = to_stinespring(map)

        q1 = vector_to_operator(S * operator_to_vector(state))
        # FIXME: problem if Kraus index is implicitly
        #        ptraced!
        q2 = (A * state * B.dag()).ptrace((0, ))

        assert (q1 - q2).norm('tr') <= tol
Ejemplo n.º 35
0
    def test_SuperChoiChiSuper(self):
        """
        Superoperator: Converting two-qubit superoperator through
        Choi and chi representations goes back to right superoperator.
        """
        superoperator = super_tensor(rand_super(2), rand_super(2))

        choi_matrix = to_choi(superoperator)
        chi_matrix = to_chi(choi_matrix)
        test_supe = to_super(chi_matrix)

        # Assert both that the result is close to expected, and has the right
        # type.
        assert_((test_supe - superoperator).norm() < tol)
        assert_(choi_matrix.type == "super" and choi_matrix.superrep == "choi")
        assert_(chi_matrix.type == "super" and chi_matrix.superrep == "chi")
        assert_(test_supe.type == "super" and test_supe.superrep == "super")
Ejemplo n.º 36
0
 def test_NonSquareKrausSuperChoi(self):
     """
     Superoperator: Convert non-square Kraus operator to Super + Choi matrix and back.
     """
     zero = asarray([[1], [0]], dtype=complex)
     one = asarray([[0], [1]], dtype=complex)
     zero_log = kron(kron(zero, zero), zero)
     one_log = kron(kron(one, one), one)
     # non-square Kraus operator (isometry)
     kraus = Qobj(zero_log @ zero.T + one_log @ one.T)
     super = sprepost(kraus, kraus.dag())
     choi = to_choi(super)
     op1 = to_kraus(super)
     op2 = to_kraus(choi)
     op3 = to_super(choi)
     assert_(choi.type == "super" and choi.superrep == "choi")
     assert_(super.type == "super" and super.superrep == "super")
     assert_((op1[0] - kraus).norm() < 1e-8)
     assert_((op2[0] - kraus).norm() < 1e-8)
     assert_((op3 - super).norm() < 1e-8)
Ejemplo n.º 37
0
    def test_chi_known(self):
        """
        Superoperator: Chi-matrix for known cases is correct.
        """
        def case(S, chi_expected, silent=True):
            chi_actual = to_chi(S)
            chiq = Qobj(chi_expected, dims=[[[2], [2]], [[2], [2]]], superrep='chi')
            if not silent:
                print(chi_actual)
                print(chi_expected)
            assert_almost_equal((chi_actual - chiq).norm('tr'), 0)

        yield case, sigmax(), [
            [0, 0, 0, 0],
            [0, 4, 0, 0],
            [0, 0, 0, 0],
            [0, 0, 0, 0]
        ]
        yield case, to_super(sigmax()), [
            [0, 0, 0, 0],
            [0, 4, 0, 0],
            [0, 0, 0, 0],
            [0, 0, 0, 0]
        ]
        yield case, qeye(2), [
            [4, 0, 0, 0],
            [0, 0, 0, 0],
            [0, 0, 0, 0],
            [0, 0, 0, 0]
        ]
        yield case, (-1j * sigmax() * pi / 4).expm(), [
            [2, 2j, 0, 0],
            [-2j, 2, 0, 0],
            [0, 0, 0, 0],
            [0, 0, 0, 0]
        ]
Ejemplo n.º 38
0
def hinton(rho, xlabels=None, ylabels=None, title=None, ax=None, cmap=None,
           label_top=True):
    """Draws a Hinton diagram for visualizing a density matrix or superoperator.

    Parameters
    ----------
    rho : qobj
        Input density matrix or superoperator.

    xlabels : list of strings or False
        list of x labels

    ylabels : list of strings or False
        list of y labels

    title : string
        title of the plot (optional)

    ax : a matplotlib axes instance
        The axes context in which the plot will be drawn.

    cmap : a matplotlib colormap instance
        Color map to use when plotting.

    label_top : bool
        If True, x-axis labels will be placed on top, otherwise
        they will appear below the plot.

    Returns
    -------
    fig, ax : tuple
        A tuple of the matplotlib figure and axes instances used to produce
        the figure.

    Raises
    ------
    ValueError
        Input argument is not a quantum object.

    """

    # Apply default colormaps.
    # TODO: abstract this away into something that makes default
    #       colormaps.
    cmap = (
        (cm.Greys_r if settings.colorblind_safe else cm.RdBu)
        if cmap is None else cmap
    )

    # Extract plotting data W from the input.
    if isinstance(rho, Qobj):
        if rho.isoper:
            W = rho.full()

            # Create default labels if none are given.
            if xlabels is None or ylabels is None:
                labels = _cb_labels(rho.dims[0])
                xlabels = xlabels if xlabels is not None else list(labels[0])
                ylabels = ylabels if ylabels is not None else list(labels[1])

        elif rho.isoperket:
            W = vector_to_operator(rho).full()
        elif rho.isoperbra:
            W = vector_to_operator(rho.dag()).full()
        elif rho.issuper:
            if not _isqubitdims(rho.dims):
                raise ValueError("Hinton plots of superoperators are "
                                 "currently only supported for qubits.")
            # Convert to a superoperator in the Pauli basis,
            # so that all the elements are real.
            sqobj = to_super(rho)
            nq = int(np.log2(sqobj.shape[0]) / 2)
            B = _pauli_basis(nq) / np.sqrt(2**nq)
            # To do this, we have to hack a bit and force the dims to match,
            # since the _pauli_basis function makes different assumptions
            # about indices than we need here.
            B.dims = sqobj.dims
            sqobj = B.dag() * sqobj * B
            W = sqobj.full().T
            # Create default labels, too.
            if (xlabels is None) or (ylabels is None):
                labels = list(map("".join, it.product("IXYZ", repeat=nq)))
                xlabels = xlabels if xlabels is not None else labels
                ylabels = ylabels if ylabels is not None else labels

        else:
            raise ValueError(
                "Input quantum object must be an operator or superoperator."
            )

    else:
        W = rho

    if ax is None:
        fig, ax = plt.subplots(1, 1, figsize=(8, 6))
    else:
        fig = None

    if not (xlabels or ylabels):
        ax.axis('off')

    ax.axis('equal')
    ax.set_frame_on(False)

    height, width = W.shape

    w_max = 1.25 * max(abs(np.diag(np.matrix(W))))
    if w_max <= 0.0:
        w_max = 1.0

    ax.fill(array([0, width, width, 0]), array([0, 0, height, height]),
            color=cmap(128))
    for x in range(width):
        for y in range(height):
            _x = x + 1
            _y = y + 1
            if np.real(W[x, y]) > 0.0:
                _blob(_x - 0.5, height - _y + 0.5, abs(W[x,
                      y]), w_max, min(1, abs(W[x, y]) / w_max), cmap=cmap)
            else:
                _blob(_x - 0.5, height - _y + 0.5, -abs(W[
                      x, y]), w_max, min(1, abs(W[x, y]) / w_max), cmap=cmap)

    # color axis
    norm = mpl.colors.Normalize(-abs(W).max(), abs(W).max())
    cax, kw = mpl.colorbar.make_axes(ax, shrink=0.75, pad=.1)
    mpl.colorbar.ColorbarBase(cax, norm=norm, cmap=cmap)

    # x axis
    ax.xaxis.set_major_locator(plt.IndexLocator(1, 0.5))

    if xlabels:
        ax.set_xticklabels(xlabels)
        if label_top:
            ax.xaxis.tick_top()
    ax.tick_params(axis='x', labelsize=14)

    # y axis
    ax.yaxis.set_major_locator(plt.IndexLocator(1, 0.5))
    if ylabels:
        ax.set_yticklabels(list(reversed(ylabels)))
    ax.tick_params(axis='y', labelsize=14)

    return fig, ax
Ejemplo n.º 39
0
def rand_super_bcsz(N=2, enforce_tp=True, rank=None, dims=None):
    """
    Returns a random superoperator drawn from the Bruzda
    et al ensemble for CPTP maps [BCSZ08]_. Note that due to
    finite numerical precision, for ranks less than full-rank,
    zero eigenvalues may become slightly negative, such that the
    returned operator is not actually completely positive.


    Parameters
    ----------
    N : int
        Square root of the dimension of the superoperator to be returned.
    enforce_tp : bool
        If True, the trace-preserving condition of [BCSZ08]_ is enforced;
        otherwise only complete positivity is enforced.
    rank : int or None
        Rank of the sampled superoperator. If None, a full-rank
        superoperator is generated.
    dims : list
        Dimensions of quantum object.  Used for specifying
        tensor structure. Default is dims=[[[N],[N]], [[N],[N]]].

    Returns
    -------
    rho : Qobj
        A superoperator acting on vectorized dim × dim density operators,
        sampled from the BCSZ distribution.
    """
    if dims is not None:
        # TODO: check!
        pass
    else:
        dims = [[[N],[N]], [[N],[N]]]

    if rank is None:
        rank = N**2
    if rank > N**2:
        raise ValueError("Rank cannot exceed superoperator dimension.")

    # We use mainly dense matrices here for speed in low
    # dimensions. In the future, it would likely be better to switch off
    # between sparse and dense matrices as the dimension grows.

    # We start with a Ginibre uniform matrix X of the appropriate rank,
    # and use it to construct a positive semidefinite matrix X X⁺.
    X = randnz((N**2, rank), norm='ginibre')

    # Precompute X X⁺, as we'll need it in two different places.
    XXdag = np.dot(X, X.T.conj())

    if enforce_tp:
        # We do the partial trace over the first index by using dense reshape
        # operations, so that we can avoid bouncing to a sparse representation
        # and back.
        Y = np.einsum('ijik->jk', XXdag.reshape((N, N, N, N)))

        # Now we have the matrix 𝟙 ⊗ Y^{-1/2}, which we can find by doing
        # the square root and the inverse separately. As a possible improvement,
        # iterative methods exist to find inverse square root matrices directly,
        # as this is important in statistics.
        Z = np.kron(
            np.eye(N),
            sqrtm(la.inv(Y))
        )

        # Finally, we dot everything together and pack it into a Qobj,
        # marking the dimensions as that of a type=super (that is,
        # with left and right compound indices, each representing
        # left and right indices on the underlying Hilbert space).
        D = Qobj(np.dot(Z, np.dot(XXdag, Z)))
    else:
        D = N * Qobj(XXdag / np.trace(XXdag))

    D.dims = [
        # Left dims
        [[N], [N]],
        # Right dims
        [[N], [N]]
    ]

    # Since [BCSZ08] gives a row-stacking Choi matrix, but QuTiP
    # expects a column-stacking Choi matrix, we must permute the indices.
    D = D.permute([[1], [0]])

    D.dims = dims

    # Mark that we've made a Choi matrix.
    D.superrep = 'choi'

    return sr.to_super(D)
Ejemplo n.º 40
0
    def test_known_iscptp(self):
        """
        Superoperator: ishp, iscp, istp and iscptp known cases.
        """
        def case(qobj, shouldhp, shouldcp, shouldtp):
            hp = qobj.ishp
            cp = qobj.iscp
            tp = qobj.istp
            cptp = qobj.iscptp

            shouldcptp = shouldcp and shouldtp

            if (hp == shouldhp and cp == shouldcp and tp == shouldtp
                    and cptp == shouldcptp):
                return

            fails = []
            if hp != shouldhp:
                fails.append(("ishp", shouldhp, hp))
            if tp != shouldtp:
                fails.append(("istp", shouldtp, tp))
            if cp != shouldcp:
                fails.append(("iscp", shouldcp, cp))
            if cptp != shouldcptp:
                fails.append(("iscptp", shouldcptp, cptp))

            raise AssertionError("Expected {}.".format(" and ".join([
                "{} == {} (got {})".format(fail, expected, got)
                for fail, expected, got in fails
            ])))

        # Conjugation by a creation operator should
        # have be CP (and hence HP), but not TP.
        a = create(2).dag()
        S = sprepost(a, a.dag())
        case(S, True, True, False)

        # A single off-diagonal element should not be CP,
        # nor even HP.
        S = sprepost(a, a)
        case(S, False, False, False)

        # Check that unitaries are CPTP and HP.
        case(identity(2), True, True, True)
        case(sigmax(), True, True, True)

        # Check that unitaries on bipartite systems are CPTP and HP.
        case(tensor(sigmax(), identity(2)), True, True, True)

        # Check that a linear combination of bipartitie unitaries is CPTP and HP.
        S = (to_super(tensor(sigmax(), identity(2))) +
             to_super(tensor(identity(2), sigmay()))) / 2
        case(S, True, True, True)

        # The partial transpose map, whose Choi matrix is SWAP, is TP
        # and HP but not CP (one negative eigenvalue).
        W = Qobj(swap(), type='super', superrep='choi')
        case(W, True, False, True)

        # Subnormalized maps (representing erasure channels, for instance)
        # can be CP but not TP.
        subnorm_map = Qobj(identity(4) * 0.9, type='super', superrep='super')
        case(subnorm_map, True, True, False)

        # Check that things which aren't even operators aren't identified as
        # CPTP.
        case(basis(2), False, False, False)
Ejemplo n.º 41
0
def test_dnorm_qubit_known_cases():
    """
    Metrics: check agreement for known qubit channels.
    """
    def case(chan1, chan2, expected, significant=4):
        # We again take a generous tolerance so that we don't kill off
        # SCS solvers.
        assert_approx_equal(dnorm(chan1, chan2),
                            expected,
                            significant=significant)

    id_chan = to_choi(qeye(2))
    S_eye = to_super(id_chan)
    X_chan = to_choi(sigmax())
    depol = to_choi(
        Qobj(diag(ones((4, ))), dims=[[[2], [2]], [[2], [2]]], superrep='chi'))
    S_H = to_super(hadamard_transform())

    W = swap()

    # We need to restrict the number of iterations for things on the boundary,
    # such as perfectly distinguishable channels.
    yield case, id_chan, X_chan, 2
    yield case, id_chan, depol, 1.5

    # Next, we'll generate some test cases based on comparisons to pre-existing
    # dnorm() implementations. In particular, the targets for the following
    # test cases were generated using QuantumUtils for MATLAB (https://goo.gl/oWXhO9).

    def overrotation(x):
        return to_super((1j * np.pi * x * sigmax() / 2).expm())

    for x, target in {
            1.000000e-03: 3.141591e-03,
            3.100000e-03: 9.738899e-03,
            1.000000e-02: 3.141463e-02,
            3.100000e-02: 9.735089e-02,
            1.000000e-01: 3.128689e-01,
            3.100000e-01: 9.358596e-01
    }.items():
        yield case, overrotation(x), id_chan, target

    def had_mixture(x):
        return (1 - x) * S_eye + x * S_H

    for x, target in {
            1.000000e-03: 2.000000e-03,
            3.100000e-03: 6.200000e-03,
            1.000000e-02: 2.000000e-02,
            3.100000e-02: 6.200000e-02,
            1.000000e-01: 2.000000e-01,
            3.100000e-01: 6.200000e-01
    }.items():
        yield case, had_mixture(x), id_chan, target

    def swap_map(x):
        S = (1j * x * W).expm()
        S._type = None
        S.dims = [[[2], [2]], [[2], [2]]]
        S.superrep = 'super'
        return S

    for x, target in {
            1.000000e-03: 2.000000e-03,
            3.100000e-03: 6.199997e-03,
            1.000000e-02: 1.999992e-02,
            3.100000e-02: 6.199752e-02,
            1.000000e-01: 1.999162e-01,
            3.100000e-01: 6.173918e-01
    }.items():
        yield case, swap_map(x), id_chan, target

    # Finally, we add a known case from Johnston's QETLAB documentation,
    # || Phi - I ||,_♢ where Phi(X) = UXU⁺ and U = [[1, 1], [-1, 1]] / sqrt(2).
    yield case, Qobj([[1, 1], [-1, 1]]) / np.sqrt(2), qeye(2), np.sqrt(2)
Ejemplo n.º 42
0
def had_mixture(x):
    id_chan = to_choi(qeye(2))
    S_eye = to_super(id_chan)
    S_H = to_super(hadamard_transform())
    return (1 - x) * S_eye + x * S_H
Ejemplo n.º 43
0
def dnorm(A, B=None, solver="CVXOPT", verbose=False, force_solve=False):
    """
    Calculates the diamond norm of the quantum map q_oper, using
    the simplified semidefinite program of [Wat12]_.

    The diamond norm SDP is solved by using CVXPY_.

    Parameters
    ----------
    A : Qobj
        Quantum map to take the diamond norm of.
    B : Qobj or None
        If provided, the diamond norm of :math:`A - B` is
        taken instead.
    solver : str
        Solver to use with CVXPY. One of "CVXOPT" (default)
        or "SCS". The latter tends to be significantly faster,
        but somewhat less accurate.
    verbose : bool
        If True, prints additional information about the
        solution.
    force_solve : bool
        If True, forces dnorm to solve the associated SDP, even if a special
        case is known for the argument.

    Returns
    -------
    dn : float
        Diamond norm of q_oper.

    Raises
    ------
    ImportError
        If CVXPY cannot be imported.

    .. _cvxpy: http://www.cvxpy.org/en/latest/
    """
    if cvxpy is None:  # pragma: no cover
        raise ImportError("dnorm() requires CVXPY to be installed.")

    # We follow the strategy of using Watrous' simpler semidefinite
    # program in its primal form. This is the same strategy used,
    # for instance, by both pyGSTi and SchattenNorms.jl. (By contrast,
    # QETLAB uses the dual problem.)

    # Check if A and B are both unitaries. If so, then we can without
    # loss of generality choose B to be the identity by using the
    # unitary invariance of the diamond norm,
    #     || A - B ||_♢ = || A B⁺ - I ||_♢.
    # Then, using the technique mentioned by each of Johnston and
    # da Silva,
    #     || A B⁺ - I ||_♢ = max_{i, j} | \lambda_i(A B⁺) - \lambda_j(A B⁺) |,
    # where \lambda_i(U) is the ith eigenvalue of U.

    if (
        # There's a lot of conditions to check for this path.
        not force_solve and B is not None and
        # Only check if they aren't superoperators.
        A.type == "oper" and B.type == "oper" and
        # The difference of unitaries optimization is currently
        # only implemented for d == 2. Much of the code below is more general,
        # though, in anticipation of generalizing the optimization.
        A.shape[0] == 2
    ):
        # Make an identity the same size as A and B to
        # compare against.
        I = qeye(A.dims[0])
        # Compare to B first, so that an error is raised
        # as soon as possible.
        Bd = B.dag()
        if (
            (B * Bd - I).norm() < 1e-6 and
            (A * A.dag() - I).norm() < 1e-6
        ):
            # Now we are on the fast path, so let's compute the
            # eigenvalues, then find the diameter of the smallest circle
            # containing all of them.
            #
            # For now, this is only implemented for dim = 2, such that
            # generalizing here will allow for generalizing the optimization.
            # A reasonable approach would probably be to use Welzl's algorithm
            # (https://en.wikipedia.org/wiki/Smallest-circle_problem).
            U = A * B.dag()
            eigs = U.eigenenergies()
            eig_distances = np.abs(eigs[:, None] - eigs[None, :])
            return np.max(eig_distances)

    # Force the input superoperator to be a Choi matrix.
    J = to_choi(A)
    
    if B is not None:
        J -= to_choi(B)

    # Watrous 2012 also points out that the diamond norm of Lambda
    # is the same as the completely-bounded operator-norm (∞-norm)
    # of the dual map of Lambda. We can evaluate that norm much more
    # easily if Lambda is completely positive, since then the largest
    # eigenvalue is the same as the largest singular value.
    
    if not force_solve and J.iscp:
        S_dual = to_super(J.dual_chan())
        vec_eye = operator_to_vector(qeye(S_dual.dims[1][1]))
        op = vector_to_operator(S_dual * vec_eye)
        # The 2-norm was not implemented for sparse matrices as of the time
        # of this writing. Thus, we must yet again go dense.
        return la.norm(op.data.todense(), 2)
    
    # If we're still here, we need to actually solve the problem.

    # Assume square...
    dim = np.prod(J.dims[0][0])
    
    # The constraints only depend on the dimension, so
    # we can cache them efficiently.
    problem, Jr, Ji, X, rho0, rho1 = dnorm_problem(dim)
    
    # Load the parameters with the Choi matrix passed in.
    J_dat = J.data
    
    Jr.value = sp.csr_matrix((J_dat.data.real, J_dat.indices, J_dat.indptr), 
                             shape=J_dat.shape)
   
    Ji.value = sp.csr_matrix((J_dat.data.imag, J_dat.indices, J_dat.indptr),
                             shape=J_dat.shape)
    # Finally, set up and run the problem.
    problem.solve(solver=solver, verbose=verbose)
    
    return problem.value
Ejemplo n.º 44
0
def hinton(rho, xlabels=None, ylabels=None, title=None, ax=None, cmap=None,
           label_top=True):
    """Draws a Hinton diagram for visualizing a density matrix or superoperator.

    Parameters
    ----------
    rho : qobj
        Input density matrix or superoperator.

    xlabels : list of strings or False
        list of x labels

    ylabels : list of strings or False
        list of y labels

    title : string
        title of the plot (optional)

    ax : a matplotlib axes instance
        The axes context in which the plot will be drawn.

    cmap : a matplotlib colormap instance
        Color map to use when plotting.

    label_top : bool
        If True, x-axis labels will be placed on top, otherwise
        they will appear below the plot.

    Returns
    -------
    fig, ax : tuple
        A tuple of the matplotlib figure and axes instances used to produce
        the figure.

    Raises
    ------
    ValueError
        Input argument is not a quantum object.

    """

    # Apply default colormaps.
    # TODO: abstract this away into something that makes default
    #       colormaps.
    cmap = (
        (cm.Greys_r if settings.colorblind_safe else cm.RdBu)
        if cmap is None else cmap
    )

    # Extract plotting data W from the input.
    if isinstance(rho, Qobj):
        if rho.isoper:
            W = rho.full()

            # Create default labels if none are given.
            if xlabels is None or ylabels is None:
                labels = _cb_labels(rho.dims[0])
                xlabels = xlabels if xlabels is not None else list(labels[0])
                ylabels = ylabels if ylabels is not None else list(labels[1])

        elif rho.isoperket:
            W = vector_to_operator(rho).full()
        elif rho.isoperbra:
            W = vector_to_operator(rho.dag()).full()
        elif rho.issuper:
            if not _isqubitdims(rho.dims):
                raise ValueError("Hinton plots of superoperators are "
                                 "currently only supported for qubits.")
            # Convert to a superoperator in the Pauli basis,
            # so that all the elements are real.
            sqobj = to_super(rho)
            nq = int(np.log2(sqobj.shape[0]) / 2)
            B = _pauli_basis(nq) / np.sqrt(2**nq)
            # To do this, we have to hack a bit and force the dims to match,
            # since the _pauli_basis function makes different assumptions
            # about indices than we need here.
            B.dims = sqobj.dims
            sqobj = B.dag() * sqobj * B
            W = sqobj.full()
            # Create default labels, too.
            if (xlabels is None) or (ylabels is None):
                labels = list(map("".join, it.product("IXYZ", repeat=nq)))
                xlabels = xlabels if xlabels is not None else labels
                ylabels = ylabels if ylabels is not None else labels

        else:
            raise ValueError(
                "Input quantum object must be an operator or superoperator."
            )

    else:
        W = rho

    if ax is None:
        fig, ax = plt.subplots(1, 1, figsize=(8, 6))
    else:
        fig = None

    if not (xlabels or ylabels):
        ax.axis('off')

    ax.axis('equal')
    ax.set_frame_on(False)

    height, width = W.shape

    w_max = 1.25 * max(abs(np.diag(np.matrix(W))))
    if w_max <= 0.0:
        w_max = 1.0

    ax.fill(array([0, width, width, 0]), array([0, 0, height, height]),
            color=cmap(128))
    for x in range(width):
        for y in range(height):
            _x = x + 1
            _y = y + 1
            if np.real(W[x, y]) > 0.0:
                _blob(_x - 0.5, height - _y + 0.5, abs(W[x,
                      y]), w_max, min(1, abs(W[x, y]) / w_max), cmap=cmap)
            else:
                _blob(_x - 0.5, height - _y + 0.5, -abs(W[
                      x, y]), w_max, min(1, abs(W[x, y]) / w_max), cmap=cmap)

    # color axis
    norm = mpl.colors.Normalize(-abs(W).max(), abs(W).max())
    cax, kw = mpl.colorbar.make_axes(ax, shrink=0.75, pad=.1)
    mpl.colorbar.ColorbarBase(cax, norm=norm, cmap=cmap)

    # x axis
    ax.xaxis.set_major_locator(plt.IndexLocator(1, 0.5))

    if xlabels:
        ax.set_xticklabels(xlabels)
        if label_top:
            ax.xaxis.tick_top()
    ax.tick_params(axis='x', labelsize=14)

    # y axis
    ax.yaxis.set_major_locator(plt.IndexLocator(1, 0.5))
    if ylabels:
        ax.set_yticklabels(list(reversed(ylabels)))
    ax.tick_params(axis='y', labelsize=14)

    return fig, ax
Ejemplo n.º 45
0
def rand_super_bcsz(N=2, enforce_tp=True, rank=None, dims=None):
    """
    Returns a random superoperator drawn from the Bruzda
    et al ensemble for CPTP maps [BCSZ08]_. Note that due to
    finite numerical precision, for ranks less than full-rank,
    zero eigenvalues may become slightly negative, such that the
    returned operator is not actually completely positive.


    Parameters
    ----------
    N : int
        Square root of the dimension of the superoperator to be returned.
    enforce_tp : bool
        If True, the trace-preserving condition of [BCSZ08]_ is enforced;
        otherwise only complete positivity is enforced.
    rank : int or None
        Rank of the sampled superoperator. If None, a full-rank
        superoperator is generated.
    dims : list
        Dimensions of quantum object.  Used for specifying
        tensor structure. Default is dims=[[[N],[N]], [[N],[N]]].

    Returns
    -------
    rho : Qobj
        A superoperator acting on vectorized dim × dim density operators,
        sampled from the BCSZ distribution.
    """
    if dims is not None:
        # TODO: check!
        pass
    else:
        dims = [[[N], [N]], [[N], [N]]]

    if rank is None:
        rank = N ** 2
    if rank > N ** 2:
        raise ValueError("Rank cannot exceed superoperator dimension.")

    # We use mainly dense matrices here for speed in low
    # dimensions. In the future, it would likely be better to switch off
    # between sparse and dense matrices as the dimension grows.

    # We start with a Ginibre uniform matrix X of the appropriate rank,
    # and use it to construct a positive semidefinite matrix X X⁺.
    X = randnz((N ** 2, rank), norm="ginibre")

    # Precompute X X⁺, as we'll need it in two different places.
    XXdag = np.dot(X, X.T.conj())

    if enforce_tp:
        # We do the partial trace over the first index by using dense reshape
        # operations, so that we can avoid bouncing to a sparse representation
        # and back.
        Y = np.einsum("ijik->jk", XXdag.reshape((N, N, N, N)))

        # Now we have the matrix 𝟙 ⊗ Y^{-1/2}, which we can find by doing
        # the square root and the inverse separately. As a possible improvement,
        # iterative methods exist to find inverse square root matrices directly,
        # as this is important in statistics.
        Z = np.kron(np.eye(N), sqrtm(la.inv(Y)))

        # Finally, we dot everything together and pack it into a Qobj,
        # marking the dimensions as that of a type=super (that is,
        # with left and right compound indices, each representing
        # left and right indices on the underlying Hilbert space).
        D = Qobj(np.dot(Z, np.dot(XXdag, Z)))
    else:
        D = N * Qobj(XXdag / np.trace(XXdag))

    D.dims = [
        # Left dims
        [[N], [N]],
        # Right dims
        [[N], [N]],
    ]

    # Since [BCSZ08] gives a row-stacking Choi matrix, but QuTiP
    # expects a column-stacking Choi matrix, we must permute the indices.
    D = D.permute([[1], [0]])

    D.dims = dims

    # Mark that we've made a Choi matrix.
    D.superrep = "choi"

    return sr.to_super(D)
Ejemplo n.º 46
0
 def overrotation(x):
     return to_super((1j * np.pi * x * sigmax() / 2).expm())
Ejemplo n.º 47
0
def overrotation(x):
    return to_super((1j * np.pi * x * sigmax() / 2).expm())
Ejemplo n.º 48
0
class TestSuperopReps:
    """
    A test class for the QuTiP function for applying superoperators to
    subsystems.
    """
    def test_SuperChoiSuper(self, superoperator):
        """
        Superoperator: Converting superoperator to Choi matrix and back.
        """

        choi_matrix = to_choi(superoperator)
        test_supe = to_super(choi_matrix)

        # Assert both that the result is close to expected, and has the right
        # type.
        assert (test_supe - superoperator).norm() < tol
        assert choi_matrix.type == "super" and choi_matrix.superrep == "choi"
        assert test_supe.type == "super" and test_supe.superrep == "super"

    @pytest.mark.parametrize('dimension', [2, 4])
    def test_SuperChoiChiSuper(self, dimension):
        """
        Superoperator: Converting two-qubit superoperator through
        Choi and chi representations goes back to right superoperator.
        """
        superoperator = super_tensor(
            rand_super(dimension),
            rand_super(dimension),
        )

        choi_matrix = to_choi(superoperator)
        chi_matrix = to_chi(choi_matrix)
        test_supe = to_super(chi_matrix)

        # Assert both that the result is close to expected, and has the right
        # type.
        assert (test_supe - superoperator).norm() < tol
        assert choi_matrix.type == "super" and choi_matrix.superrep == "choi"
        assert chi_matrix.type == "super" and chi_matrix.superrep == "chi"
        assert test_supe.type == "super" and test_supe.superrep == "super"

    def test_ChoiKrausChoi(self, superoperator):
        """
        Superoperator: Convert superoperator to Choi matrix and back.
        """
        choi_matrix = to_choi(superoperator)
        kraus_ops = to_kraus(choi_matrix)
        test_choi = kraus_to_choi(kraus_ops)

        # Assert both that the result is close to expected, and has the right
        # type.
        assert (test_choi - choi_matrix).norm() < tol
        assert choi_matrix.type == "super" and choi_matrix.superrep == "choi"
        assert test_choi.type == "super" and test_choi.superrep == "choi"

    def test_NonSquareKrausSuperChoi(self):
        """
        Superoperator: Convert non-square Kraus operator to Super + Choi matrix
        and back.
        """
        zero = asarray([[1], [0]], dtype=complex)
        one = asarray([[0], [1]], dtype=complex)
        zero_log = kron(kron(zero, zero), zero)
        one_log = kron(kron(one, one), one)
        # non-square Kraus operator (isometry)
        kraus = Qobj(zero_log @ zero.T + one_log @ one.T)
        super = sprepost(kraus, kraus.dag())
        choi = to_choi(super)
        op1 = to_kraus(super)
        op2 = to_kraus(choi)
        op3 = to_super(choi)

        assert choi.type == "super" and choi.superrep == "choi"
        assert super.type == "super" and super.superrep == "super"
        assert (op1[0] - kraus).norm() < tol
        assert (op2[0] - kraus).norm() < tol
        assert (op3 - super).norm() < tol

    def test_NeglectSmallKraus(self):
        """
        Superoperator: Convert Kraus to Choi matrix and back. Neglect tiny
        Kraus operators.
        """
        zero = asarray([[1], [0]], dtype=complex)
        one = asarray([[0], [1]], dtype=complex)
        zero_log = kron(kron(zero, zero), zero)
        one_log = kron(kron(one, one), one)
        # non-square Kraus operator (isometry)
        kraus = Qobj(zero_log @ zero.T + one_log @ one.T)
        super = sprepost(kraus, kraus.dag())
        # 1 non-zero Kraus operator the rest are zero
        sixteen_kraus_ops = to_kraus(super, tol=0.0)
        # default is tol=1e-9
        one_kraus_op = to_kraus(super)
        assert len(sixteen_kraus_ops) == 16 and len(one_kraus_op) == 1
        assert (one_kraus_op[0] - kraus).norm() < tol

    def test_SuperPreservesSelf(self, superoperator):
        """
        Superoperator: to_super(q) returns q if q is already a
        supermatrix.
        """

        assert superoperator is to_super(superoperator)

    def test_ChoiPreservesSelf(self, superoperator):
        """
        Superoperator: to_choi(q) returns q if q is already Choi.
        """
        choi = to_choi(superoperator)
        assert choi is to_choi(choi)

    def test_random_iscptp(self, superoperator):
        """
        Superoperator: Randomly generated superoperators are
        correctly reported as CPTP and HP.
        """
        assert superoperator.iscptp
        assert superoperator.ishp

    # Conjugation by a creation operator
    a = create(2).dag()
    S = sprepost(a, a.dag())

    # A single off-diagonal element
    S_ = sprepost(a, a)

    # Check that a linear combination of bipartite unitaries is CPTP and HP.
    S_U = (to_super(tensor(sigmax(), identity(2))) +
           to_super(tensor(identity(2), sigmay()))) / 2

    # The partial transpose map, whose Choi matrix is SWAP
    ptr_swap = Qobj(swap(), type='super', superrep='choi')

    # Subnormalized maps (representing erasure channels, for instance)
    subnorm_map = Qobj(identity(4) * 0.9, type='super', superrep='super')

    @pytest.mark.parametrize(['qobj', 'shouldhp', 'shouldcp', 'shouldtp'], [
        pytest.param(S, True, True, False, id="conjugatio by create op"),
        pytest.param(S_, False, False, False, id="single off-diag"),
        pytest.param(identity(2), True, True, True, id="Identity"),
        pytest.param(sigmax(), True, True, True, id="Pauli X"),
        pytest.param(
            tensor(sigmax(), identity(2)),
            True,
            True,
            True,
            id="bipartite system",
        ),
        pytest.param(
            S_U,
            True,
            True,
            True,
            id="linear combination of bip. unitaries",
        ),
        pytest.param(ptr_swap, True, False, True, id="partial transpose map"),
        pytest.param(subnorm_map, True, True, False, id="subnorm map"),
        pytest.param(basis(2), False, False, False, id="not an operator"),
    ])
    def test_known_iscptp(self, qobj, shouldhp, shouldcp, shouldtp):
        """
        Superoperator: ishp, iscp, istp and iscptp known cases.
        """
        assert qobj.ishp == shouldhp
        assert qobj.iscp == shouldcp
        assert qobj.istp == shouldtp
        assert qobj.iscptp == (shouldcp and shouldtp)

    def test_choi_tr(self, dimension):
        """
        Superoperator: Trace returned by to_choi matches docstring.
        """
        assert abs(to_choi(identity(dimension)).tr() - dimension) <= tol

    def test_stinespring_cp(self, dimension):
        """
        Stinespring: A and B match for CP maps.
        """
        superop = rand_super_bcsz(dimension)
        A, B = to_stinespring(superop)

        assert norm(A - B) < tol

    @pytest.mark.repeat(3)
    def test_stinespring_agrees(self, dimension):
        """
        Stinespring: Partial Tr over pair agrees w/ supermatrix.
        """

        map = rand_super_bcsz(dimension)
        state = rand_dm_ginibre(dimension)

        S = to_super(map)
        A, B = to_stinespring(map)

        q1 = vector_to_operator(S * operator_to_vector(state))
        # FIXME: problem if Kraus index is implicitly
        #        ptraced!
        q2 = (A * state * B.dag()).ptrace((0, ))

        assert (q1 - q2).norm('tr') <= tol

    def test_stinespring_dims(self, dimension):
        """
        Stinespring: Check that dims of channels are preserved.
        """
        chan = super_tensor(to_super(sigmax()), to_super(qeye(dimension)))
        A, B = to_stinespring(chan)
        assert A.dims == [[2, dimension, 1], [2, dimension]]
        assert B.dims == [[2, dimension, 1], [2, dimension]]

    @pytest.mark.parametrize('dimension', [2, 4, 8])
    def test_chi_choi_roundtrip(self, dimension):

        superop = rand_super_bcsz(dimension)
        superop = to_chi(superop)
        rt_superop = to_chi(to_choi(superop))
        dif = norm(rt_superop - superop)

        assert dif == pytest.approx(0, abs=1e-7)
        assert rt_superop.type == superop.type
        assert rt_superop.dims == superop.dims

    chi_sigmax = [[0, 0, 0, 0], [0, 4, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
    chi_diag2 = [[4, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
    rotX_pi_4 = (-1j * sigmax() * pi / 4).expm()
    chi_rotX_pi_4 = [[2, 2j, 0, 0], [-2j, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]

    @pytest.mark.parametrize(['superop', 'chi_expected'], [
        pytest.param(sigmax(), chi_sigmax),
        pytest.param(to_super(sigmax()), chi_sigmax),
        pytest.param(qeye(2), chi_diag2),
        pytest.param(rotX_pi_4, chi_rotX_pi_4)
    ])
    def test_chi_known(self, superop, chi_expected):
        """
        Superoperator: Chi-matrix for known cases is correct.
        """
        chi_actual = to_chi(superop)
        chiq = Qobj(
            chi_expected,
            dims=[[[2], [2]], [[2], [2]]],
            superrep='chi',
        )
        assert (chi_actual - chiq).norm() < tol
Ejemplo n.º 49
0
def test_dnorm_qubit_known_cases():
    """
    Metrics: check agreement for known qubit channels.
    """
    def case(chan1, chan2, expected, significant=4):
        # We again take a generous tolerance so that we don't kill off
        # SCS solvers.
        assert_approx_equal(
            dnorm(chan1, chan2), expected,
            significant=significant
        )

    id_chan = to_choi(qeye(2))
    S_eye = to_super(id_chan)
    X_chan = to_choi(sigmax())
    depol = to_choi(Qobj(
        diag(ones((4,))),
        dims=[[[2], [2]], [[2], [2]]], superrep='chi'
    ))
    S_H = to_super(hadamard_transform())

    W = swap()

    # We need to restrict the number of iterations for things on the boundary,
    # such as perfectly distinguishable channels.
    yield case, id_chan, X_chan, 2
    yield case, id_chan, depol, 1.5

    # Next, we'll generate some test cases based on comparisons to pre-existing
    # dnorm() implementations. In particular, the targets for the following
    # test cases were generated using QuantumUtils for MATLAB (https://goo.gl/oWXhO9).

    def overrotation(x):
        return to_super((1j * np.pi * x * sigmax() / 2).expm())

    for x, target in {
        1.000000e-03: 3.141591e-03,
        3.100000e-03: 9.738899e-03,
        1.000000e-02: 3.141463e-02,
        3.100000e-02: 9.735089e-02,
        1.000000e-01: 3.128689e-01,
        3.100000e-01: 9.358596e-01
    }.items():
        yield case, overrotation(x), id_chan, target

    def had_mixture(x):
        return (1 - x) * S_eye + x * S_H

    for x, target in {
        1.000000e-03: 2.000000e-03,
        3.100000e-03: 6.200000e-03,
        1.000000e-02: 2.000000e-02,
        3.100000e-02: 6.200000e-02,
        1.000000e-01: 2.000000e-01,
        3.100000e-01: 6.200000e-01
    }.items():
        yield case, had_mixture(x), id_chan, target

    def swap_map(x):
        S = (1j * x * W).expm()
        S._type = None
        S.dims = [[[2], [2]], [[2], [2]]]
        S.superrep = 'super'
        return S

    for x, target in {
        1.000000e-03: 2.000000e-03,
        3.100000e-03: 6.199997e-03,
        1.000000e-02: 1.999992e-02,
        3.100000e-02: 6.199752e-02,
        1.000000e-01: 1.999162e-01,
        3.100000e-01: 6.173918e-01
    }.items():
        yield case, swap_map(x), id_chan, target

    # Finally, we add a known case from Johnston's QETLAB documentation,
    # || Phi - I ||,_♢ where Phi(X) = UXU⁺ and U = [[1, 1], [-1, 1]] / sqrt(2).
    yield case, Qobj([[1, 1], [-1, 1]]) / np.sqrt(2), qeye(2), np.sqrt(2)
Ejemplo n.º 50
0
def dnorm(A, B=None, solver="CVXOPT", verbose=False, force_solve=False,
          sparse=True):
    """
    Calculates the diamond norm of the quantum map q_oper, using
    the simplified semidefinite program of [Wat12]_.

    The diamond norm SDP is solved by using CVXPY_.

    Parameters
    ----------
    A : Qobj
        Quantum map to take the diamond norm of.
    B : Qobj or None
        If provided, the diamond norm of :math:`A - B` is taken instead.
    solver : str
        Solver to use with CVXPY. One of "CVXOPT" (default) or "SCS". The
        latter tends to be significantly faster, but somewhat less accurate.
    verbose : bool
        If True, prints additional information about the solution.
    force_solve : bool
        If True, forces dnorm to solve the associated SDP, even if a special
        case is known for the argument.
    sparse : bool
        Whether to use sparse matrices in the convex optimisation problem.
        Default True.

    Returns
    -------
    dn : float
        Diamond norm of q_oper.

    Raises
    ------
    ImportError
        If CVXPY cannot be imported.

    .. _cvxpy: http://www.cvxpy.org/en/latest/
    """
    if cvxpy is None:  # pragma: no cover
        raise ImportError("dnorm() requires CVXPY to be installed.")

    # We follow the strategy of using Watrous' simpler semidefinite
    # program in its primal form. This is the same strategy used,
    # for instance, by both pyGSTi and SchattenNorms.jl. (By contrast,
    # QETLAB uses the dual problem.)

    # Check if A and B are both unitaries. If so, then we can without
    # loss of generality choose B to be the identity by using the
    # unitary invariance of the diamond norm,
    #     || A - B ||_♢ = || A B⁺ - I ||_♢.
    # Then, using the technique mentioned by each of Johnston and
    # da Silva,
    #     || A B⁺ - I ||_♢ = max_{i, j} | \lambda_i(A B⁺) - \lambda_j(A B⁺) |,
    # where \lambda_i(U) is the ith eigenvalue of U.

    if (
        # There's a lot of conditions to check for this path.
        not force_solve and B is not None and
        # Only check if they aren't superoperators.
        A.type == "oper" and B.type == "oper" and
        # The difference of unitaries optimization is currently
        # only implemented for d == 2. Much of the code below is more general,
        # though, in anticipation of generalizing the optimization.
        A.shape[0] == 2
    ):
        # Make an identity the same size as A and B to
        # compare against.
        I = qeye(A.dims[0])
        # Compare to B first, so that an error is raised
        # as soon as possible.
        Bd = B.dag()
        if (
            (B * Bd - I).norm() < 1e-6 and
            (A * A.dag() - I).norm() < 1e-6
        ):
            # Now we are on the fast path, so let's compute the
            # eigenvalues, then find the diameter of the smallest circle
            # containing all of them.
            #
            # For now, this is only implemented for dim = 2, such that
            # generalizing here will allow for generalizing the optimization.
            # A reasonable approach would probably be to use Welzl's algorithm
            # (https://en.wikipedia.org/wiki/Smallest-circle_problem).
            U = A * B.dag()
            eigs = U.eigenenergies()
            eig_distances = np.abs(eigs[:, None] - eigs[None, :])
            return np.max(eig_distances)

    # Force the input superoperator to be a Choi matrix.
    J = to_choi(A)

    if B is not None:
        J -= to_choi(B)

    # Watrous 2012 also points out that the diamond norm of Lambda
    # is the same as the completely-bounded operator-norm (∞-norm)
    # of the dual map of Lambda. We can evaluate that norm much more
    # easily if Lambda is completely positive, since then the largest
    # eigenvalue is the same as the largest singular value.

    if not force_solve and J.iscp:
        S_dual = to_super(J.dual_chan())
        vec_eye = operator_to_vector(qeye(S_dual.dims[1][1]))
        op = vector_to_operator(S_dual * vec_eye)
        # The 2-norm was not implemented for sparse matrices as of the time
        # of this writing. Thus, we must yet again go dense.
        return la.norm(op.data.todense(), 2)

    # If we're still here, we need to actually solve the problem.

    # Assume square...
    dim = np.prod(J.dims[0][0])

    J_dat = J.data

    if not sparse:
        # The parameters and constraints only depend on the dimension, so
        # we can cache them efficiently.
        problem, Jr, Ji = dnorm_problem(dim)

        # Load the parameters with the Choi matrix passed in.
        Jr.value = sp.csr_matrix((J_dat.data.real, J_dat.indices,
                                  J_dat.indptr),
                                 shape=J_dat.shape).toarray()

        Ji.value = sp.csr_matrix((J_dat.data.imag, J_dat.indices,
                                  J_dat.indptr),
                                 shape=J_dat.shape).toarray()
    else:

        # The parameters do not depend solely on the dimension,
        # so we can not cache them efficiently.
        problem = dnorm_sparse_problem(dim, J_dat)

    problem.solve(solver=solver, verbose=verbose)

    return problem.value
Ejemplo n.º 51
0
    def test_known_iscptp(self):
        """
        Superoperator: ishp, iscp, istp and iscptp known cases.
        """
        def case(qobj, shouldhp, shouldcp, shouldtp):
            hp = qobj.ishp
            cp = qobj.iscp
            tp = qobj.istp
            cptp = qobj.iscptp

            shouldcptp = shouldcp and shouldtp

            if (
                hp == shouldhp and
                cp == shouldcp and
                tp == shouldtp and
                cptp == shouldcptp
            ):
                return

            fails = []
            if hp != shouldhp:
                fails.append(("ishp", shouldhp, hp))
            if tp != shouldtp:
                fails.append(("istp", shouldtp, tp))
            if cp != shouldcp:
                fails.append(("iscp", shouldcp, cp))
            if cptp != shouldcptp:
                fails.append(("iscptp", shouldcptp, cptp))

            raise AssertionError("Expected {}.".format(" and ".join([
                "{} == {} (got {})".format(fail, expected, got)
                for fail, expected, got in fails
            ])))

        # Conjugation by a creation operator should
        # have be CP (and hence HP), but not TP.
        a = create(2).dag()
        S = sprepost(a, a.dag())
        case(S, True, True, False)

        # A single off-diagonal element should not be CP,
        # nor even HP.
        S = sprepost(a, a)
        case(S, False, False, False)
        
        # Check that unitaries are CPTP and HP.
        case(identity(2), True, True, True)
        case(sigmax(), True, True, True)

        # Check that unitaries on bipartite systems are CPTP and HP.
        case(tensor(sigmax(), identity(2)), True, True, True)

        # Check that a linear combination of bipartitie unitaries is CPTP and HP.
        S = (
            to_super(tensor(sigmax(), identity(2))) + to_super(tensor(identity(2), sigmay()))
        ) / 2
        case(S, True, True, True)

        # The partial transpose map, whose Choi matrix is SWAP, is TP
        # and HP but not CP (one negative eigenvalue).
        W = Qobj(swap(), type='super', superrep='choi')
        case(W, True, False, True)

        # Subnormalized maps (representing erasure channels, for instance)
        # can be CP but not TP.
        subnorm_map = Qobj(identity(4) * 0.9, type='super', superrep='super')
        case(subnorm_map, True, True, False)

        # Check that things which aren't even operators aren't identified as
        # CPTP.
        case(basis(2), False, False, False)