Exemple #1
0
def kron(A, B):
    if isinstance(A, np.ndarray):
        A = _arr_to_complex(A)
    if isinstance(B, np.ndarray):
        B = _arr_to_complex(B)

    return Complex(
        re=(cvxpy.kron(A.re, B.re) - cvxpy.kron(A.im, B.im)),
        im=(cvxpy.kron(A.im, B.re) + cvxpy.kron(A.re, B.im)),
    )
Exemple #2
0
def kron(A, B):
    if isinstance(A, np.ndarray):
        A = _arr_to_complex(A)
    if isinstance(B, np.ndarray):
        B = _arr_to_complex(B)
        
    return Complex(
        re=(cvxpy.kron(A.re, B.re) - cvxpy.kron(A.im, B.im)),
        im=(cvxpy.kron(A.im, B.re) + cvxpy.kron(A.re, B.im)),
    )
Exemple #3
0
 def test_kron(self):
     """Test the kron atom.
     """
     a = np.ones((3, 2))
     b = Parameter((2, 1), nonneg=True)
     expr = cp.kron(a, b)
     assert expr.is_nonneg()
     self.assertEqual(expr.shape, (6, 2))
     b = Parameter((2, 1), nonpos=True)
     expr = cp.kron(a, b)
     assert expr.is_nonpos()
     with self.assertRaises(Exception) as cm:
         cp.kron(self.x, -1)
     self.assertEqual(str(cm.exception),
                      "The first argument to kron must be constant.")
    def symvar_kronl(self, param):
        # Use a symmetric matrix variable
        X = cp.Variable(shape=(2, 2), symmetric=True)
        b_val = 1.5 * np.ones((1, 1))
        if param:
            b = cp.Parameter(shape=(1, 1))
            b.value = b_val
        else:
            b = cp.Constant(b_val)
        L = np.array([[0.5, 1], [2, 3]])
        U = np.array([[10, 11], [12, 13]])
        kronX = cp.kron(X, b)  # should be equal to X

        objective = cp.Minimize(cp.sum(X.flatten()))
        constraints = [U >= kronX, kronX >= L]
        prob = cp.Problem(objective, constraints)
        prob.solve()

        self.assertItemsAlmostEqual(X.value,
                                    np.array([[0.5, 2], [2, 3]]) / 1.5)
        objective = cp.Maximize(cp.sum(X.flatten()))
        prob = cp.Problem(objective, constraints)
        prob.solve()
        self.assertItemsAlmostEqual(X.value,
                                    np.array([[10, 11], [11, 13]]) / 1.5)
        pass
Exemple #5
0
    def linear_constraint_Alice(self):

        # Create dimension tuple for the partial tracing
        sub_dim = (self.dimT,
                   self.dimT**(self.n1 + self.n2 - 1) * self.dimS**2)

        for q1 in range(self.dimQ1):
            for index_else in self.indices_but_A1Q1:
                indices_A1q1a2q2_ext = [
                    np.append(np.array([a1, q1]), index_else)
                    for a1 in range(self.dimA1)
                ]
                indices_A1Q1a2q2_ext = [
                    np.append(index_A1Q1, index_else)
                    for index_A1Q1 in self.indices_A1Q1
                ]

                lhs = sum([
                    self.rho_variable[self.StI(index)]
                    for index in indices_A1q1a2q2_ext
                ])

                rhs_variable = sum([
                    self.rho_variable[self.StI(index)]
                    for index in indices_A1Q1a2q2_ext
                ])
                rhs_partial = nlg.partial_trace(rhs_variable, sub_dim)
                rhs = self.probQ1[q1] * cp.kron(self.rhoT, rhs_partial)

                self.constraints.append(lhs - rhs == 0)
def policy_unU2(G, R, L, d, x_prev, un_U, gamma):
    # a greedy policy that max next step return using un_U 
    [n,A]=R.shape
    [m,temp]=d.shape

    Q = cvxpy.Variable(n,A)
    M = cvxpy.Variable(n,n)
    x = cvxpy.Variable(n,1)

    Kr = cvxpy.kron(np.ones((A,1)),np.eye(n))

    # Create two constraints.
    constraints = [-M + cvxpy.mul_elemwise(G,(np.ones((n, 1)) * cvxpy.vec(Q).T)) * Kr == 0,
                   L * x -  d <= 0,
                   x - M * x_prev == 0,
                   Q * np.ones((A, 1)) - np.ones((n, 1)) == 0,
                   Q >= 0, 
                   x >= 0,
                   x.T * np.ones((n, 1)) - 1 == 0
                   ]

    # Form objective.
    obj = cvxpy.Minimize(-(x.T) * un_U)

    # Form and solve problem.
    prob = cvxpy.Problem(obj, constraints)
    prob.solve(solver = cvxpy.ECOS, verbose = False,  max_iters = 5000)
#    prob.solve(solver = cvxpy.SCS, verbose = False) 

    return Q.value, M.value, x.value
    def make_kron_prob(z_dims: Tuple[int], c_dims: Tuple[int], param: bool,
                       var_left: bool, seed: int):
        """
        Construct random nonnegative matrices (C, L) of shapes
        (c_dims, z_dims) respectively. Define an optimization
        problem with a matrix variable of shape z_dims:

            min sum(Z)
            s.t.  kron(Z, C) >= kron(L, C)   ---   if var_left is True
                  kron(C, Z) >= kron(C, L)   ---   if var_left is False
                  Z >= 0

        Regardless of whether var_left is True or False, the optimal
        solution to that problem is Z = L.

        If param is True, then C is defined as a CVXPY Parameter.
        If param is False, then C is a CVXPY Constant.

        A small remark: the constraint that Z >= 0 is redundant.
        It's there because it's easier to set break points that distinguish
        objective canonicalization and constraint canonicalization
        when there's more than one constraint.
        """
        np.random.seed(seed)
        C_value = np.random.rand(*c_dims).round(decimals=2)
        if param:
            C = cp.Parameter(shape=c_dims)
            C.value = C_value
        else:
            C = cp.Constant(C_value)
        Z = cp.Variable(shape=z_dims)
        L = np.random.rand(*Z.shape).round(decimals=2)
        if var_left:
            constraints = [cp.kron(Z, C) >= cp.kron(L, C), Z >= 0]
            # The cvxcore function get_kronl_mat doesn't work when C is a Parameter.
            # We get around this by having kron be non-dpp, but this comes at
            # the price of eliminating the speed benefit of using Parameter objects.
            # We'll eventually need to extend get_kronl_mat so that it supports
            # Parameters. Until then, I'll make a note that tests here DO PASS
            # with the existing get_kronl_mat implementation if we use the following
            # constraints: [cp.kron(Z - L, C) >= 0, Z >= 0].
        else:
            constraints = [cp.kron(C, Z) >= cp.kron(C, L), Z >= 0]
        obj_expr = cp.sum(Z)
        prob = cp.Problem(cp.Minimize(obj_expr), constraints)
        return Z, C, L, prob
Exemple #8
0
def diamond_norm(chan0: Channel, chan1: Channel) -> float:
    """Return the diamond norm between two completely positive
    trace-preserving (CPTP) superoperators.

    Note: Requires "cvxpy" package (and dependencies) to be fully installed.

    The calculation uses the simplified semidefinite program of Watrous
    [arXiv:0901.4709](http://arxiv.org/abs/0901.4709)
    [J. Watrous, [Theory of Computing 5, 11, pp. 217-238
    (2009)](http://theoryofcomputing.org/articles/v005a011/)]
    """
    # Kudos: Based on MatLab code written by Marcus P. da Silva
    # (https://github.com/BBN-Q/matlab-diamond-norm/)
    import cvxpy as cvx

    if set(chan0.qubits) != set(chan1.qubits):
        raise ValueError("Channels must operate on same qubits")

    if chan0.qubits != chan1.qubits:
        chan1 = chan1.permute(chan0.qubits)

    N = chan0.qubit_nb
    dim = 2**N

    choi0 = chan0.choi()
    choi1 = chan1.choi()

    delta_choi = choi0 - choi1

    # Density matrix must be Hermitian, positive semidefinite, trace 1
    rho = cvx.Variable([dim, dim], complex=True)
    constraints = [rho == rho.H]
    constraints += [rho >> 0]
    constraints += [cvx.trace(rho) == 1]

    # W must be Hermitian, positive semidefinite
    W = cvx.Variable([dim**2, dim**2], complex=True)
    constraints += [W == W.H]
    constraints += [W >> 0]

    constraints += [(W - cvx.kron(np.eye(dim), rho)) << 0]

    J = cvx.Parameter([dim**2, dim**2], complex=True)
    objective = cvx.Maximize(cvx.real(cvx.trace(J.H * W)))

    prob = cvx.Problem(objective, constraints)

    J.value = delta_choi
    prob.solve()

    dnorm = prob.value * 2

    # Diamond norm is between 0 and 2. Correct for floating point errors
    dnorm = min(2, dnorm)
    dnorm = max(0, dnorm)

    return dnorm
Exemple #9
0
def diamond_norm(J, dimA, dimB, display=False):
    '''
    Computes the diamond norm of a superoperator with Choi representation J.
    dimA is the dimension of the input space of the channel, and dimB is the
    dimension of the output space.

    The form of the SDP used comes from Theorem 3.1 of:
        
        'Simpler semidefinite programs for completely bounded norms',
            Chicago Journal of Theoretical Computer Science 2013,
            by John Watrous
    '''
    '''
    The Choi representation J in the above paper is defined using a different
    convention:
        J=(N\otimes I)(|Phi^+><Phi^+|).
    In other words, the channel N acts on the first half of the maximally-
    entangled state, while the convention used throughout this code stack
    is
        J=(I\otimes N)(|Phi^+><Phi^+|).
    We thus use syspermute to convert to the form used in the aforementioned
    paper.
    '''

    J = syspermute(J, [2, 1], [dimA, dimB])

    X = cvx.Variable((dimA * dimB, dimA * dimB), hermitian=False)
    rho0 = cvx.Variable((dimA, dimA), PSD=True)
    rho1 = cvx.Variable((dimA, dimA), PSD=True)

    M = cvx.bmat([[cvx.kron(eye(dimB), rho0), X],
                  [X.H, cvx.kron(eye(dimB), rho1)]])

    c = []
    c += [M >> 0, cvx.trace(rho0) == 1, cvx.trace(rho1) == 1]

    obj = cvx.Maximize((1 / 2) * cvx.real(cvx.trace(dag(J) @ X)) +
                       (1 / 2) * cvx.real(cvx.trace(J @ X.H)))

    prob = cvx.Problem(obj, constraints=c)

    prob.solve(verbose=display, eps=1e-7)

    return prob.value
def diamond_norm_distance(choi0: np.ndarray, choi1: np.ndarray) -> float:
    """
    Return the diamond norm distance between two completely positive
    trace-preserving (CPTP) superoperators, represented as Choi matrices.

    The calculation uses the simplified semidefinite program of Watrous in [CBN]_

    .. note::

        This calculation becomes very slow for 4 or more qubits.

    .. [CBN] Semidefinite programs for completely bounded norms.
          J. Watrous.
          Theory of Computing 5, 11, pp. 217-238 (2009).
          http://theoryofcomputing.org/articles/v005a011
          http://arxiv.org/abs/0901.4709

    :param choi0: A 4**N by 4**N matrix (where N is the number of qubits)
    :param choi1: A 4**N by 4**N matrix (where N is the number of qubits)

    """
    # Kudos: Based on MatLab code written by Marcus P. da Silva
    # (https://github.com/BBN-Q/matlab-diamond-norm/)
    import cvxpy as cvx
    assert choi0.shape == choi1.shape
    assert choi0.shape[0] == choi1.shape[1]
    dim_squared = choi0.shape[0]
    dim = int(np.sqrt(dim_squared))

    delta_choi = choi0 - choi1
    delta_choi = (delta_choi.conj().T + delta_choi) / 2  # Enforce Hermiticity

    # Density matrix must be Hermitian, positive semidefinite, trace 1
    rho = cvx.Variable([dim, dim], complex=True)
    constraints = [rho == rho.H]
    constraints += [rho >> 0]
    constraints += [cvx.trace(rho) == 1]

    # W must be Hermitian, positive semidefinite
    W = cvx.Variable([dim_squared, dim_squared], complex=True)
    constraints += [W == W.H]
    constraints += [W >> 0]

    constraints += [(W - cvx.kron(np.eye(dim), rho)) << 0]

    J = cvx.Parameter([dim_squared, dim_squared], complex=True)
    objective = cvx.Maximize(cvx.real(cvx.trace(J.H * W)))

    prob = cvx.Problem(objective, constraints)

    J.value = delta_choi
    prob.solve()

    dnorm = prob.value * 2

    return dnorm
Exemple #11
0
def dual_problem(q_a: np.ndarray, pperm: np.ndarray, num_reps: int) -> float:
    """
    Dual problem for counterfeit attack.

    :return: The optimal value of performing a counterfeit attack.
    """
    y_var = cvxpy.Variable((2**num_reps, 2**num_reps), hermitian=True)
    objective = cvxpy.Minimize(cvxpy.trace(cvxpy.real(y_var)))

    kron_var = cvxpy.kron(cvxpy.kron(np.eye(2**num_reps), np.eye(2**num_reps)),
                          y_var)

    if num_reps == 1:
        constraints = [cvxpy.real(kron_var) >> q_a]
    else:
        constraints = [cvxpy.real(kron_var) >> pperm @ q_a @ pperm.conj().T]
    problem = cvxpy.Problem(objective, constraints)

    return problem.solve()
Exemple #12
0
def trainLinearRegressor(featuresMat, targetDeltas):
    nSamples, nFeats = featuresMat.shape
    nOutputs = targetDeltas.shape[1]
    b = cvxpy.Variable(nOutputs)
    R = cvxpy.Variable(nOutputs, nFeats)
    residuals = featuresMat * R.T + cvxpy.kron(cvxpy.Constant(np.ones((nSamples))), b) - targetDeltas
    func = cvxpy.sum_squares(residuals)
    prob = cvxpy.Problem(cvxpy.Minimize(func))
    prob.solve(verbose=False)
    return R.value, b.value
Exemple #13
0
def _custom_kron(A, B):
    """ cp.kron only works if the cp.variable array is the second argument """
    # Second argument is not constant, can do cp.kron as normal
    if not isinstance(B, np.ndarray) and not isinstance(B, numbers.Number):
        return cp.kron(A, B)
    # First argument is not constant, swap the order
    elif not isinstance(A, np.ndarray) and not isinstance(A, numbers.Number):
        # find dimensions of both matrices
        dAL, dAR = A.shape
        # if B is not an np array, it must be a number, in which case its dimension is 1
        if isinstance(B, np.ndarray):
            dBL, dBR = B.shape
        else:
            dBL = 1
            dBR = 1
        swap_left = swap(int(dBL), int(dAL))
        swap_right = swap(int(dBR), int(dAR)).T
        return swap_left @ cp.kron(B, A) @ swap_right
    # Both elements are constant, can do np.kron as normal
    else:
        return np.kron(A, B)
Exemple #14
0
    def __optimize_bob(self, rho) -> Tuple[Dict, float]:
        """Fix Alice's measurements and optimize over Bob's measurements."""
        # Get number of inputs and outputs.
        (
            dim,
            _,
            num_outputs_alice,
            num_outputs_bob,
            num_inputs_alice,
            num_inputs_bob,
        ) = self.pred_mat.shape

        # The cvxpy package does not support optimizing over 4-dimensional objects.
        # To overcome this, we use a dictionary to index between the questions and
        # answers, while the cvxpy variables held at this positions are
        # `dim`-by-`dim` cvxpy variables.
        bob_povms = defaultdict(cvxpy.Variable)
        for y_ques in range(num_inputs_bob):
            for b_ans in range(num_outputs_bob):
                bob_povms[y_ques, b_ans] = cvxpy.Variable((dim, dim), hermitian=True)
        win = 0
        for x_ques in range(num_inputs_alice):
            for y_ques in range(num_inputs_bob):
                for a_ans in range(num_outputs_alice):
                    for b_ans in range(num_outputs_bob):
                        win += self.prob_mat[x_ques, y_ques] * cvxpy.trace(
                            (
                                cvxpy.kron(
                                    self.pred_mat[:, :, a_ans, b_ans, x_ques, y_ques],
                                    bob_povms[y_ques, b_ans],
                                )
                            )
                            @ rho[x_ques, a_ans].value
                        )
        objective = cvxpy.Maximize(cvxpy.real(win))

        constraints = list()

        # Sum over "b" for all "y" for Bob's measurements.
        for y_ques in range(num_inputs_bob):
            bob_sum_b = 0
            for b_ans in range(num_outputs_bob):
                bob_sum_b += bob_povms[y_ques, b_ans]
                constraints.append(bob_povms[y_ques, b_ans] >> 0)
            constraints.append(bob_sum_b == np.identity(num_outputs_bob))

        problem = cvxpy.Problem(objective, constraints)

        lower_bound = problem.solve()
        return bob_povms, lower_bound
def policy_unU(G, R, L, d, x_prev, un_U, gamma):
    [n,A]=R.shape
    [m,temp]=d.shape

    Q = cvxpy.Variable(n,A)
    M = cvxpy.Variable(n,n)
    S = cvxpy.Variable(m,n)
    K = cvxpy.Variable(m,m)
#    U = cvxpy.Variable(n,1)
#    y = cvxpy.Variable(m,1)
#    r = cvxpy.Variable(n,1)
    xi = cvxpy.Variable(m,1)
#    z = cvxpy.Variable(1,1)
    x = cvxpy.Variable(n,1)

    Kr = cvxpy.kron(np.ones((A,1)),np.eye(n))

    # Create two constraints.
    constraints = [#(d.T) * y - z <= opt_ref,
             -M + cvxpy.mul_elemwise(G,(np.ones((n, 1)) * cvxpy.vec(Q).T)) * Kr == 0,
#                   -r + cvxpy.mul_elemwise(R,Q) * np.ones((A, 1)) == 0,
#                   -L.T * y + z * np.ones((n, 1)) - U_ref <= 0,
#                   -U + r + gamma * M.T * U_next == 0,
                   -K * L + L * M  + xi * np.ones((1, n)) <= 0,
                   xi + d - K * d >= 0,
                   x - M * x_prev == 0,
                   Q * np.ones((A, 1)) - np.ones((n, 1)) == 0,
                   Q >= 0, #np.zeros((n, A)),
 #                  y >= 0, # np.zeros((m, 1)),
                   K >= 0, #np.zeros((m, m)),
                   x >= 0,
                   x.T * np.ones((n, 1)) - 1 == 0
                   ]

    # Form objective.
    obj = cvxpy.Minimize(-(x.T) * un_U)

    # Form and solve problem.
    prob = cvxpy.Problem(obj, constraints)
#    prob.solve(solver = cvxpy.MOSEK, verbose = True)
    prob.solve(solver = cvxpy.ECOS, verbose = False,  max_iters = 1000)
#    prob.solve(solver = cvxpy.SCS, verbose = False) 

    return Q.value, M.value, x.value
def policy(G, R, L, d, un_Q, un_M, un_U, U_next, U_ref, opt_ref, gamma):
    [n,A]=R.shape
    [m,temp]=d.shape

    Q = cvxpy.Variable(n,A)
    M = cvxpy.Variable(n,n)
    S = cvxpy.Variable(m,n)
    K = cvxpy.Variable(m,m)
    U = cvxpy.Variable(n,1)
    y = cvxpy.Variable(m,1)
    r = cvxpy.Variable(n,1)
    xi = cvxpy.Variable(m,1)
    z = cvxpy.Variable(1,1)

    Kr = cvxpy.kron(np.ones((A,1)),np.eye(n))


    # Create two constraints.
    constraints = [(d.T) * y - z <= opt_ref,
                   -M + cvxpy.mul_elemwise(G,(np.ones((n, 1)) * cvxpy.vec(Q).T)) * Kr == 0,
                   -r + cvxpy.mul_elemwise(R,Q) * np.ones((A, 1)) == 0,
                   -L.T * y + z * np.ones((n, 1)) - U_ref <= 0,
                   -U + r + gamma * M.T * U_next == 0,
                   -K * L + L * M + S + xi * np.ones((1, n)) == 0,
                   xi + d - K * d >= 0,
                   Q * np.ones((A, 1)) - np.ones((n, 1)) == 0,
                   Q >= np.zeros((n, A)),
                   y >= np.zeros((m, 1)),
                   S >= np.zeros((m, n)),
                   K >= np.zeros((m, m)),
                   M >= np.zeros((n, n))
                   ]

    # Form objective.
    obj = cvxpy.Minimize(cvxpy.norm((Q - un_Q), 'fro'))
#    obj = cvxpy.Minimize(cvxpy.norm((M - un_M), 'fro'))
#    obj = cvxpy.Minimize(cvxpy.norm((U - un_U)))

    # Form and solve problem.
    prob = cvxpy.Problem(obj, constraints)
    prob.solve(solver = cvxpy.ECOS, verbose = False, max_iters = 2000, feastol = 1e-4, reltol = 1e-4, abstol = 1e-4)
#    prob.solve(solver = cvxpy.CVXOPT, verbose = True)

    return U.value, Q.value, M.value
Exemple #17
0
    def linear_constraint_Bob(self):

        # Permutation matrix (T1...Tn1)(T1...Tn2)(SS) -> (Tn2...T1)(T1...Tn1)(SS)
        order = np.arange(self.n1 + self.n2 + 2)

        maskA = order[:self.n1]
        maskB = np.flip(order[self.n1:self.n1 + self.n2])
        maskS = order[self.n1 + self.n2:]
        mask = np.concatenate((maskB, maskA, maskS))

        P = cp.Constant(nlg.permutation_matrix(order, mask,
                                               self.subs_TTSS_ext))

        # Create dimension tuple for the partial tracing
        sub_dim = (self.dimT,
                   self.dimT**(self.n1 + self.n2 - 1) * self.dimS**2)

        for q2 in range(self.dimQ2):
            for index_else in self.indices_but_A2Q2:
                indices_a1q1A2q2_ext = [
                    np.append(index_else, np.array([a2, q2]))
                    for a2 in range(self.dimA2)
                ]
                indices_a1q1A2Q2_ext = [
                    np.append(index_else, index_A2Q2)
                    for index_A2Q2 in self.indices_A2Q2
                ]

                lhs_variable = sum([
                    self.rho_variable[self.StI(index)]
                    for index in indices_a1q1A2q2_ext
                ])
                lhs = P @ lhs_variable @ P.T

                rhs_variable = sum([
                    self.rho_variable[self.StI(index)]
                    for index in indices_a1q1A2Q2_ext
                ])
                rhs_permuted = P @ rhs_variable @ P.T
                rhs_partial = nlg.partial_trace(rhs_permuted, sub_dim)
                rhs = self.probQ2[q2] * cp.kron(self.rhoT, rhs_partial)

                self.constraints.append(lhs - rhs == 0)
def policy(G, R, L, d, U_next, gamma):
    [n,A]=R.shape
    [m,temp]=d.shape

    Q = cvxpy.Variable(n,A)
    M = cvxpy.Variable(n,n)
    S = cvxpy.Variable(m,n)
    K = cvxpy.Variable(m,m)
    U = cvxpy.Variable(n,1)
    y = cvxpy.Variable(m,1)
    r = cvxpy.Variable(n,1)
    xi = cvxpy.Variable(m,1)
    z = cvxpy.Variable(1,1)

    Kr = cvxpy.kron(np.ones((A,1)),np.eye(n))


    # Create two constraints.
    constraints = [-M + cvxpy.mul_elemwise(G, (np.ones((n, 1)) * cvxpy.vec(Q).T)) * Kr == 0,
                   -r + cvxpy.mul_elemwise(R,Q) * np.ones((A, 1)) == 0,
                   -L.T * y + z * np.ones((n, 1)) - U <= 0,
                   -U + r + gamma * M.T * U_next == 0,
                   -K * L + L * M + xi * np.ones((1, n)) <= 0,
                   xi + d - K * d >= 0,
                   Q * np.ones((A, 1)) - np.ones((n, 1)) == 0,
                   Q >= 0, #np.zeros((n, A)),
                   y >= 0, #np.zeros((m, 1)),
                   K >= 0, #np.zeros((m, m)),
                   ]
    

    # Form objective.
    obj = cvxpy.Minimize((d.T) * y - z)

    # Form and solve problem.
    prob = cvxpy.Problem(obj, constraints)
#    prob.solve(solver = cvxpy.MOSEK, verbose = True)
#    prob.solve(solver = cvxpy.CVXOPT, verbose = True)
    prob.solve(solver = cvxpy.ECOS, verbose = True, max_iters = 2000, feastol = 1e-4, reltol = 1e-4, abstol = 1e-4)
#    prob.solve(solver = cvxpy.SCS, verbose = False)

    return U.value, Q.value, M.value, ((d.T) * y - z).value
    def _quantum_rel_entropy_problem(self, rho, key_map_povm, m, k):
        mat_size = rho.shape[0]
        opt_mat_sh = (mat_size**2, mat_size**2)
        # Define cvxpy variables
        M = [cvx.Variable(opt_mat_sh, hermitian=True) for _ in range(k)]
        tau = cvx.Variable()
        T = cvx.Variable(m)

        key_map_povm_ = [
            np.kron(povm, np.eye(self.dim_B)) for povm in key_map_povm
        ]
        cq_rho = np.sum(povm @ rho @ povm for povm in key_map_povm_)

        X = general_kron(rho, np.eye(mat_size))
        Y = cvx.kron(np.eye(mat_size), cvx.conj(cq_rho))
        M.insert(0, Y)  # M[0] = Y
        Z = M[-1]  # M[k] = Z

        # Constraints related to matrix geometric mean cone
        const_geo_mean_cone = []
        for i in range(k):
            M_matrix = cvx.bmat([[M[i], M[i + 1]], [M[i + 1], X]])
            const_geo_mean_cone.append((M_matrix >> 0))

        # Constraints related to operator relative entropy cone
        e = np.reshape(np.eye(mat_size), (-1, 1), order="C")
        s, w = self.leggauss_zero_to_one(m)

        const_rel_entropy = []
        eXe = e.T @ X @ e
        eX = e.T @ X
        Xe = X @ e
        for j in range(m):
            T_matrix = cvx.bmat([[eXe - s[j] * T[j] / w[j], eX],
                                 [Xe, X + s[j] * (Z - X)]])
            const_rel_entropy.append((T_matrix >> 0))
        const_rel_entropy.append((np.power(2, k) * cvx.sum(T) + tau >= 0))
        constraints = const_geo_mean_cone + const_rel_entropy

        obj = cvx.Minimize(tau)
        problem = cvx.Problem(obj, constraints)
        return problem
Exemple #20
0
    def min_prob_outcome_a_dual(self) -> float:
        r"""
        Compute the minimal probability for calculating outcome "a".

        The dual problem for the minimal probability of "a" is given as:

        .. math::

            \begin{equation}
                \begin{aligned}
                    \text{maximize:} \quad & \text{Tr}(Y) \\
                    \text{subject to:} \quad & \pi \left(I_{\mathcal{Y}_1
                    \otimes \ldots \otimes \mathcal{Y}_n} \otimes Y \right)
                    \pi^* \leq Q_{a_1} \otimes \ldots \otimes Q_{a_n}, \\
                    & Y \in \text{Herm} \left(\mathcal{X} \otimes \ldots \otimes
                    \mathcal{X}_n \right)
                \end{aligned}
            \end{equation}

        :return: The optimal minimal probability for obtaining outcome "a".
        """
        y_var = cvxpy.Variable((2**self._num_reps, 2**self._num_reps),
                               hermitian=True)
        objective = cvxpy.Maximize(cvxpy.trace(cvxpy.real(y_var)))

        kron_var = cvxpy.kron(np.eye(2**self._num_reps), y_var)

        if self._num_reps == 1:
            u_var = cvxpy.multiply(cvxpy.multiply(self._pperm, kron_var),
                                   self._pperm.conj().T)
            constraints = [cvxpy.real(u_var) << self._q_a]
        else:
            constraints = [
                cvxpy.real(self._pperm @ kron_var @ self._pperm.conj().T) <<
                self._q_a
            ]
        problem = cvxpy.Problem(objective, constraints)

        return problem.solve()
Exemple #21
0
    def scalar_kronl(self, param):
        y = cp.Variable(shape=(1, 1))
        A_val = np.array([[1., 2.], [3., 4.]])
        L = np.array([[0.5, 1], [2, 3]])
        U = np.array([[10, 11], [12, 13]])
        if param:
            A = cp.Parameter(shape=(2, 2))
            A.value = A_val
        else:
            A = cp.Constant(A_val)
        krony = cp.kron(y, A)  # should be equal to y * A
        constraints = [U >= krony, krony >= L]

        objective = cp.Minimize(y)
        prob = cp.Problem(objective, constraints)
        prob.solve()
        self.assertItemsAlmostEqual(y.value, np.array([[np.max(L / A_val)]]))

        objective = cp.Maximize(y)
        prob = cp.Problem(objective, constraints)
        prob.solve()
        self.assertItemsAlmostEqual(y.value, np.array([[np.min(U / A_val)]]))
        pass
Exemple #22
0
def diamonddist(A, B, mxBasis='gm', dimOrStateSpaceDims=None):
    """
    Returns the approximate diamond norm describing the difference between gate
    matrices A and B given by :

      D = ||A - B ||_diamond = sup_rho || AxI(rho) - BxI(rho) ||_1

    Parameters
    ----------
    A, B : numpy array
        The *gate* matrices to use when computing the diamond norm.

    mxBasis : {"std","gm","pp"}, optional
        the basis of the gate matrices A and B : standard (matrix units),
        Gell-Mann, or Pauli-product, respectively.

    dimOrStateSpaceDims : int or list of ints, optional
        Structure of the density-matrix space, which further specifies the basis
        of gateMx (see BasisTools).

    Returns
    -------
    float
       Diamond norm
    """

    #currently cvxpy is only needed for this function, so don't import until here
    import cvxpy as _cvxpy

    # This SDP implementation is a modified version of Kevin's code

    #Compute the diamond norm

    #Uses the primal SDP from arXiv:1207.5726v2, Sec 3.2

    #Maximize 1/2 ( < J(phi), X > + < J(phi).dag, X.dag > )
    #Subject to  [[ I otimes rho0, X],
    #            [X.dag, I otimes rho1]] >> 0
    #              rho0, rho1 are density matrices
    #              X is linear operator

    #Jamiolkowski representation of the process
    #  J(phi) = sum_ij Phi(Eij) otimes Eij

    #< A, B > = Tr(A.dag B)

    #def vec(matrix_in):
    #    # Stack the columns of a matrix to return a vector
    #    return _np.transpose(matrix_in).flatten()
    #
    #def unvec(vector_in):
    #    # Slice a vector into columns of a matrix
    #    d = int(_np.sqrt(vector_in.size))
    #    return _np.transpose(vector_in.reshape( (d,d) ))


    dim = A.shape[0]
    smallDim = int(_np.sqrt(dim))
    assert(dim == A.shape[1] == B.shape[0] == B.shape[1])

    #Code below assumes *un-normalized* Jamiol-isomorphism, so multiply by density mx dimension
    JAstd = smallDim * _jam.jamiolkowski_iso(A, mxBasis, "std", dimOrStateSpaceDims)
    JBstd = smallDim * _jam.jamiolkowski_iso(B, mxBasis, "std", dimOrStateSpaceDims)

    #CHECK: Kevin's jamiolowski, which implements the un-normalized isomorphism:
    #  smallDim * _jam.jamiolkowski_iso(M, "std", "std")
    #def kevins_jamiolkowski(process, representation = 'superoperator'):
    #    # Return the Choi-Jamiolkowski representation of a quantum process
    #    # Add methods as necessary to accept different representations
    #    process = _np.array(process)
    #    if representation == 'superoperator':
    #        # Superoperator is the linear operator acting on vec(rho)
    #        dimension = int(_np.sqrt(process.shape[0]))
    #        print "dim = ",dimension
    #        jamiolkowski_matrix = _np.zeros([dimension**2, dimension**2], dtype='complex')
    #        for i in range(dimension**2):
    #            Ei_vec= _np.zeros(dimension**2)
    #            Ei_vec[i] = 1
    #            output = unvec(_np.dot(process,Ei_vec))
    #            tmp = _np.kron(output, unvec(Ei_vec))
    #            print "E%d = \n" % i,unvec(Ei_vec)
    #            #print "contrib =",_np.kron(output, unvec(Ei_vec))
    #            jamiolkowski_matrix += tmp
    #        return jamiolkowski_matrix
    #JAstd_kev = jamiolkowski(A)
    #JBstd_kev = jamiolkowski(B)
    #print "diff A = ",_np.linalg.norm(JAstd_kev/2.0-JAstd)
    #print "diff B = ",_np.linalg.norm(JBstd_kev/2.0-JBstd)

    #Kevin's function: def diamondnorm( jamiolkowski_matrix ):
    jamiolkowski_matrix = JBstd-JAstd

    # Here we define a bunch of auxiliary matrices because CVXPY doesn't use complex numbers

    K = jamiolkowski_matrix.real # J.real
    L = jamiolkowski_matrix.imag # J.imag

    Y = _cvxpy.Variable(dim, dim) # X.real
    Z = _cvxpy.Variable(dim, dim) # X.imag

    sig0 = _cvxpy.Variable(smallDim,smallDim) # rho0.real
    sig1 = _cvxpy.Variable(smallDim,smallDim) # rho1.real
    tau0 = _cvxpy.Variable(smallDim,smallDim) # rho1.imag
    tau1 = _cvxpy.Variable(smallDim,smallDim) # rho1.imag

    ident = _np.identity(smallDim, 'd')

    objective = _cvxpy.Maximize( _cvxpy.trace( K.T * Y + L.T * Z) )
    constraints = [ _cvxpy.bmat( [
                        [ _cvxpy.kron(ident, sig0), Y, -_cvxpy.kron(ident, tau0), -Z],
                        [ Y.T, _cvxpy.kron(ident, sig1), Z.T, -_cvxpy.kron(ident, tau1)],
                        [ _cvxpy.kron(ident, tau0), Z, _cvxpy.kron(ident, sig0), Y],
                        [ -Z.T, _cvxpy.kron(ident, tau1), Y.T, _cvxpy.kron(ident, sig1)]] ) >> 0,
                    _cvxpy.bmat( [[sig0, -tau0],
                           [tau0,  sig0]] ) >> 0,
                    _cvxpy.bmat( [[sig1, -tau1],
                           [tau1,  sig1]] ) >> 0,
                    sig0 == sig0.T,
                    sig1 == sig1.T,
                    tau0 == -tau0.T,
                    tau1 == -tau1.T,
                    _cvxpy.trace(sig0) == 1.,
                    _cvxpy.trace(sig1) == 1. ]

    prob = _cvxpy.Problem(objective, constraints)
    try:
        prob.solve(solver="CVXOPT")
#       prob.solve(solver="ECOS")
#       prob.solve(solver="SCS")#This always fails
    except:
        _warnings.warn("CVXOPT failed - diamonddist returning -2!")
        return -2
    return prob.value
    def get_primal_problem(self, m=2, k=2):

        mat_size = np.prod(self.dims)  # size of rho_AB
        opt_mat_sh = (mat_size**2, mat_size**2)

        # Define cvxpy variables
        M = [cvx.Variable(opt_mat_sh, hermitian=True) for _ in range(k)]
        rho_AB = cvx.Variable((mat_size, mat_size), hermitian=True)
        tau = cvx.Variable()
        T = cvx.Variable(m)

        cq_rho_AB = np.sum(povm @ rho_AB @ povm for povm in self._key_map_povm)

        X = general_kron(rho_AB, np.eye(mat_size))
        Y = cvx.kron(np.eye(mat_size), cvx.conj(cq_rho_AB))
        M.insert(0, Y)  # M[0] = Y
        Z = M[-1]  # M[k] = Z

        # Constraints related to matrix geometric mean cone
        const_geo_mean_cone = []
        for i in range(k):
            M_matrix = cvx.bmat([[M[i], M[i + 1]], [M[i + 1], X]])
            const_geo_mean_cone.append((M_matrix >> 0))

        # Constraints related to operator relative entropy cone
        e = np.reshape(np.eye(mat_size), (-1, 1), order="C")
        s, w = self.leggauss_zero_to_one(m)

        const_rel_entropy = []
        eXe = e.T @ X @ e
        eX = e.T @ X
        Xe = X @ e
        for j in range(m):
            T_matrix = cvx.bmat([[eXe - s[j] * T[j] / w[j], eX],
                                 [Xe, X + s[j] * (Z - X)]])
            const_rel_entropy.append((T_matrix >> 0))
        const_rel_entropy.append((np.power(2, k) * cvx.sum(T) + tau >= 0))

        # Constraints related to the state rho_AB
        const_rho_AB = const_rho_AB_ub = const_rho_AB_lb = []
        if self.Gamma_exact is not None:
            const_rho_AB = [(cvx.trace(rho_AB * G) == g)
                            for g, G in zip(self.gamma, self.Gamma_exact)]
        if self.Gamma_inexact is not None:
            const_rho_AB_ub = [
                (cvx.real(cvx.trace(rho_AB * G)) <= g_ub)
                for g_ub, G in zip(self.gamma_ub, self.Gamma_inexact)
            ]
            const_rho_AB_lb = [
                (cvx.real(cvx.trace(rho_AB * G)) >= g_lb)
                for g_lb, G in zip(self.gamma_lb, self.Gamma_inexact)
            ]

        # Other constraints
        const_rho_normalized = (cvx.trace(rho_AB) == 1)
        const_rho_pos = (rho_AB >> 0)
        constraints = [const_rho_normalized, const_rho_pos] + \
            const_rho_AB + const_rho_AB_ub + const_rho_AB_lb + \
            const_geo_mean_cone + const_rel_entropy

        obj = cvx.Minimize(tau)
        problem = cvx.Problem(obj, constraints)
        return problem
Exemple #24
0
def diamonddist(A, B, mxBasis='gm', dimOrStateSpaceDims=None):
    """
    Returns the approximate diamond norm describing the difference between gate
    matrices A and B given by :

      D = ||A - B ||_diamond = sup_rho || AxI(rho) - BxI(rho) ||_1

    Parameters
    ----------
    A, B : numpy array
        The *gate* matrices to use when computing the diamond norm.

    mxBasis : {"std","gm","pp"}, optional
        the basis of the gate matrices A and B : standard (matrix units),
        Gell-Mann, or Pauli-product, respectively.

    dimOrStateSpaceDims : int or list of ints, optional
        Structure of the density-matrix space, which further specifies the basis
        of gateMx (see BasisTools).

    Returns
    -------
    float
       Diamond norm
    """

    #currently cvxpy is only needed for this function, so don't import until here
    import cvxpy as _cvxpy

    # This SDP implementation is a modified version of Kevin's code

    #Compute the diamond norm

    #Uses the primal SDP from arXiv:1207.5726v2, Sec 3.2

    #Maximize 1/2 ( < J(phi), X > + < J(phi).dag, X.dag > )
    #Subject to  [[ I otimes rho0, X],
    #            [X.dag, I otimes rho1]] >> 0
    #              rho0, rho1 are density matrices
    #              X is linear operator

    #Jamiolkowski representation of the process
    #  J(phi) = sum_ij Phi(Eij) otimes Eij

    #< A, B > = Tr(A.dag B)

    #def vec(matrix_in):
    #    # Stack the columns of a matrix to return a vector
    #    return _np.transpose(matrix_in).flatten()
    #
    #def unvec(vector_in):
    #    # Slice a vector into columns of a matrix
    #    d = int(_np.sqrt(vector_in.size))
    #    return _np.transpose(vector_in.reshape( (d,d) ))


    dim = A.shape[0]
    smallDim = int(_np.sqrt(dim))
    assert(dim == A.shape[1] == B.shape[0] == B.shape[1])

    #Code below assumes *un-normalized* Jamiol-isomorphism, so multiply by density mx dimension
    JAstd = smallDim * _jam.jamiolkowski_iso(A, mxBasis, "std", dimOrStateSpaceDims)
    JBstd = smallDim * _jam.jamiolkowski_iso(B, mxBasis, "std", dimOrStateSpaceDims)

    #CHECK: Kevin's jamiolowski, which implements the un-normalized isomorphism:
    #  smallDim * _jam.jamiolkowski_iso(M, "std", "std")
    #def kevins_jamiolkowski(process, representation = 'superoperator'):
    #    # Return the Choi-Jamiolkowski representation of a quantum process
    #    # Add methods as necessary to accept different representations
    #    process = _np.array(process)
    #    if representation == 'superoperator':
    #        # Superoperator is the linear operator acting on vec(rho)
    #        dimension = int(_np.sqrt(process.shape[0]))
    #        print "dim = ",dimension
    #        jamiolkowski_matrix = _np.zeros([dimension**2, dimension**2], dtype='complex')
    #        for i in range(dimension**2):
    #            Ei_vec= _np.zeros(dimension**2)
    #            Ei_vec[i] = 1
    #            output = unvec(_np.dot(process,Ei_vec))
    #            tmp = _np.kron(output, unvec(Ei_vec))
    #            print "E%d = \n" % i,unvec(Ei_vec)
    #            #print "contrib =",_np.kron(output, unvec(Ei_vec))
    #            jamiolkowski_matrix += tmp
    #        return jamiolkowski_matrix
    #JAstd_kev = jamiolkowski(A)
    #JBstd_kev = jamiolkowski(B)
    #print "diff A = ",_np.linalg.norm(JAstd_kev/2.0-JAstd)
    #print "diff B = ",_np.linalg.norm(JBstd_kev/2.0-JBstd)

    #Kevin's function: def diamondnorm( jamiolkowski_matrix ):
    jamiolkowski_matrix = JBstd-JAstd

    # Here we define a bunch of auxiliary matrices because CVXPY doesn't use complex numbers

    K = jamiolkowski_matrix.real # J.real
    L = jamiolkowski_matrix.imag # J.imag

    Y = _cvxpy.Variable(dim, dim) # X.real
    Z = _cvxpy.Variable(dim, dim) # X.imag

    sig0 = _cvxpy.Variable(smallDim,smallDim) # rho0.real
    sig1 = _cvxpy.Variable(smallDim,smallDim) # rho1.real
    tau0 = _cvxpy.Variable(smallDim,smallDim) # rho1.imag
    tau1 = _cvxpy.Variable(smallDim,smallDim) # rho1.imag

    ident = _np.identity(smallDim, 'd')

    objective = _cvxpy.Maximize( _cvxpy.trace( K.T * Y + L.T * Z) )
    constraints = [ _cvxpy.bmat( [
                        [ _cvxpy.kron(ident, sig0), Y, -_cvxpy.kron(ident, tau0), -Z],
                        [ Y.T, _cvxpy.kron(ident, sig1), Z.T, -_cvxpy.kron(ident, tau1)],
                        [ _cvxpy.kron(ident, tau0), Z, _cvxpy.kron(ident, sig0), Y],
                        [ -Z.T, _cvxpy.kron(ident, tau1), Y.T, _cvxpy.kron(ident, sig1)]] ) >> 0,
                    _cvxpy.bmat( [[sig0, -tau0],
                           [tau0,  sig0]] ) >> 0,
                    _cvxpy.bmat( [[sig1, -tau1],
                           [tau1,  sig1]] ) >> 0,
                    sig0 == sig0.T,
                    sig1 == sig1.T,
                    tau0 == -tau0.T,
                    tau1 == -tau1.T,
                    _cvxpy.trace(sig0) == 1.,
                    _cvxpy.trace(sig1) == 1. ]

    prob = _cvxpy.Problem(objective, constraints)
#    try:
    prob.solve(solver="CVXOPT")
#        prob.solve(solver="ECOS")
#       prob.solve(solver="SCS")#This always fails
#    except:
#        return -1
    return prob.value
  Constant([[-5, 2], [-3, 1]])),
 (cp.diag, (2, ), [[[-5, 2], [-3, 1]]], Constant([-5, 1])),
 (cp.diag, (2, 2), [[-5, 1]], Constant([[-5, 0], [0, 1]])),
 (cp.exp, (2, 2), [[[1, 0], [2, -1]]],
  Constant([[math.e, 1], [math.e**2, 1.0 / math.e]])),
 (cp.huber, (2, 2), [[[0.5, -1.5], [4, 0]]], Constant([[0.25, 2], [7, 0]])),
 (lambda x: cp.huber(x, 2.5), (2, 2), [[[0.5, -1.5], [4, 0]]],
  Constant([[0.25, 2.25], [13.75, 0]])),
 (cp.inv_pos, (2, 2), [[[1, 2], [3, 4]]],
  Constant([[1, 1.0 / 2], [1.0 / 3, 1.0 / 4]])),
 (lambda x: (x + Constant(0))**-1, (2, 2), [[[1, 2], [3, 4]]],
  Constant([[1, 1.0 / 2], [1.0 / 3, 1.0 / 4]])),
 (cp.kl_div, tuple(), [math.e, 1], Constant([1])),
 (cp.kl_div, tuple(), [math.e, math.e], Constant([0])),
 (cp.kl_div, (2, ), [[math.e, 1], 1], Constant([1, 0])),
 (lambda x: cp.kron(np.array([[1, 2], [3, 4]]), x), (4, 4),
  [np.array([[5, 6], [7, 8]])],
  Constant(np.kron(np.array([[1, 2], [3, 4]]), np.array([[5, 6], [7,
                                                                  8]])))),
 (cp.lambda_max, tuple(), [[[2, 0], [0, 1]]], Constant([2])),
 (cp.lambda_max, tuple(), [[[2, 0, 0], [0, 3, 0], [0, 0,
                                                   1]]], Constant([3])),
 (cp.lambda_max, tuple(), [[[5, 7], [7, -3]]], Constant([9.06225775])),
 (lambda x: cp.lambda_sum_largest(x, 2), tuple(), [[[1, 2, 3], [2, 4, 5],
                                                    [3, 5, 6]]],
  Constant([11.51572947])),
 (cp.log_sum_exp, tuple(), [[[5, 7], [0, -3]]], Constant([7.1277708268])),
 (log_sum_exp_axis_0, (1, 2), [[[5, 7, 1], [0, -3, 6]]],
  Constant([[7.12910890], [6.00259878]])),
 (log_sum_exp_axis_1, (3, ), [[[5, 7, 1], [0, -3, 6]]],
  Constant([5.00671535, 7.0000454, 6.0067153])),
Exemple #26
0
def diamond_norm(choi, **kwargs):
    r"""Return the diamond norm of the input quantum channel object.

    This function computes the completely-bounded trace-norm (often
    referred to as the diamond-norm) of the input quantum channel object
    using the semidefinite-program from reference [1].

    Args:
        choi(Choi or QuantumChannel): a quantum channel object or
                                      Choi-matrix array.
        kwargs: optional arguments to pass to CVXPY solver.

    Returns:
        float: The completely-bounded trace norm
               :math:`\|\mathcal{E}\|_{\diamond}`.

    Raises:
        QiskitError: if CVXPY package cannot be found.

    Additional Information:
        The input to this function is typically *not* a CPTP quantum
        channel, but rather the *difference* between two quantum channels
        :math:`\|\Delta\mathcal{E}\|_\diamond` where
        :math:`\Delta\mathcal{E} = \mathcal{E}_1 - \mathcal{E}_2`.

    Reference:
        J. Watrous. "Simpler semidefinite programs for completely bounded
        norms", arXiv:1207.5726 [quant-ph] (2012).

    .. note::

        This function requires the optional CVXPY package to be installed.
        Any additional kwargs will be passed to the ``cvxpy.solve``
        function. See the CVXPY documentation for information on available
        SDP solvers.
    """
    _cvxpy_check('`diamond_norm`')  # Check CVXPY is installed

    choi = Choi(_input_formatter(choi, Choi, 'diamond_norm', 'choi'))

    def cvx_bmat(mat_r, mat_i):
        """Block matrix for embedding complex matrix in reals"""
        return cvxpy.bmat([[mat_r, -mat_i], [mat_i, mat_r]])

    # Dimension of input and output spaces
    dim_in = choi._input_dim
    dim_out = choi._output_dim
    size = dim_in * dim_out

    # SDP Variables to convert to real valued problem
    r0_r = cvxpy.Variable((dim_in, dim_in))
    r0_i = cvxpy.Variable((dim_in, dim_in))
    r0 = cvx_bmat(r0_r, r0_i)

    r1_r = cvxpy.Variable((dim_in, dim_in))
    r1_i = cvxpy.Variable((dim_in, dim_in))
    r1 = cvx_bmat(r1_r, r1_i)

    x_r = cvxpy.Variable((size, size))
    x_i = cvxpy.Variable((size, size))
    iden = sparse.eye(dim_out)

    # Watrous uses row-vec convention for his Choi matrix while we use
    # col-vec. It turns out row-vec convention is requried for CVXPY too
    # since the cvxpy.kron function must have a constant as its first argument.
    c_r = cvxpy.bmat([[cvxpy.kron(iden, r0_r), x_r],
                      [x_r.T, cvxpy.kron(iden, r1_r)]])
    c_i = cvxpy.bmat([[cvxpy.kron(iden, r0_i), x_i],
                      [-x_i.T, cvxpy.kron(iden, r1_i)]])
    c = cvx_bmat(c_r, c_i)

    # Convert col-vec convention Choi-matrix to row-vec convention and
    # then take Transpose: Choi_C -> Choi_R.T
    choi_rt = np.transpose(
        np.reshape(choi.data, (dim_in, dim_out, dim_in, dim_out)),
        (3, 2, 1, 0)).reshape(choi.data.shape)
    choi_rt_r = choi_rt.real
    choi_rt_i = choi_rt.imag

    # Constraints
    cons = [
        r0 >> 0, r0_r == r0_r.T, r0_i == -r0_i.T,
        cvxpy.trace(r0_r) == 1, r1 >> 0, r1_r == r1_r.T, r1_i == -r1_i.T,
        cvxpy.trace(r1_r) == 1, c >> 0
    ]

    # Objective function
    obj = cvxpy.Maximize(
        cvxpy.trace(choi_rt_r @ x_r) + cvxpy.trace(choi_rt_i @ x_i))
    prob = cvxpy.Problem(obj, cons)
    sol = prob.solve(**kwargs)
    return sol
Exemple #27
0
    (cp.huber, (2, 2), [[[0.5, -1.5], [4, 0]]],
     Constant([[0.25, 2], [7, 0]])),
    (lambda x: cp.huber(x, 2.5), (2, 2), [[[0.5, -1.5], [4, 0]]],
     Constant([[0.25, 2.25], [13.75, 0]])),
    (cp.inv_pos, (2, 2), [[[1, 2], [3, 4]]],
     Constant([[1, 1.0 / 2], [1.0 / 3, 1.0 / 4]])),
    (lambda x: (x + Constant(0))**-1, (2, 2), [[[1, 2], [3, 4]]],
     Constant([[1, 1.0 / 2], [1.0 / 3, 1.0 / 4]])),
    (cp.kl_div, tuple(), [math.e, 1], Constant([1])),
    (cp.kl_div, tuple(), [math.e, math.e], Constant([0])),
    (cp.kl_div, (2,), [[math.e, 1], 1], Constant([1, 0])),
    (cp.rel_entr, tuple(), [math.e, 1], Constant([math.e])),
    (cp.rel_entr, tuple(), [math.e, math.e], Constant([0])),
    (cp.rel_entr, (2,), [[math.e, 1], 1], Constant([math.e, 0])),
    # kron with variable in the right operand
    (lambda x: cp.kron(np.array([[1, 2], [3, 4]]), x), (4, 4),
     [np.array([[5, 6], [7, 8]])],
     Constant(np.kron(np.array([[1, 2], [3, 4]]), np.array([[5, 6], [7, 8]])))),
    (lambda x: cp.kron(np.array([[1, 2], [3, 4], [5, 6]]), x), (6, 4),
     [np.array([[5, 6], [7, 8]])],
     Constant(np.kron(np.array([[1, 2], [3, 4], [5, 6]]), np.array([[5, 6], [7, 8]])))),
    (lambda x: cp.kron(np.array([[1, 2], [3, 4]]), x), (6, 4),
     [np.array([[5, 6], [7, 8], [9, 10]])],
     Constant(np.kron(np.array([[1, 2], [3, 4]]), np.array([[5, 6], [7, 8], [9, 10]])))),

    # kron with variable in the left operand
    (lambda x: cp.kron(x, np.array([[1, 2], [3, 4]])), (4, 4),
     [np.array([[5, 6], [7, 8]])],
     Constant(np.kron(np.array([[5, 6], [7, 8]]), np.array([[1, 2], [3, 4]])))),
    (lambda x: cp.kron(x, np.array([[1, 2], [3, 4], [5, 6]])), (6, 4),
     [np.array([[5, 6], [7, 8]])],
Exemple #28
0
def channel_discrimination(J0,
                           J1,
                           dimA,
                           dimB,
                           p,
                           succ=False,
                           sdp=False,
                           dual=False,
                           display=False):
    '''
    Calculates the optimal error probability for quantum channel discrimination, with prior
    probability p for the channel with Choi representation J1.

    J0 and J1 are the Choi representations of the two channels. dimA and dimB are the input
    and output dimensions, respectively, of the channels.

    If succ=True, then this function returns the optimal success probability instead.
    If sdp=True, then this function calculates the optimal value (error or success 
    probability) using an SDP.
    '''

    if sdp:

        if not dual:

            # Need the following syspermute because the cvxpy kron function
            # requires a constant in the first argument
            J0 = syspermute(J0, [2, 1], [dimA, dimB])
            J1 = syspermute(J1, [2, 1], [dimA, dimB])

            Q0 = cvx.Variable((dimA * dimB, dimA * dimB), hermitian=True)
            Q1 = cvx.Variable((dimA * dimB, dimA * dimB), hermitian=True)
            rho = cvx.Variable((dimA, dimA), hermitian=True)

            c = [
                Q0 >> 0, Q1 >> 0, rho >> 0,
                cvx.real(cvx.trace(rho)) == 1,
                Q0 + Q1 == cvx.kron(eye(dimB), rho)
            ]

            obj = cvx.Minimize(
                cvx.real(p * cvx.trace(Q1 @ J0) +
                         (1 - p) * cvx.trace(Q0 @ J1)))
            prob = cvx.Problem(obj, constraints=c)

            prob.solve(verbose=display, eps=1e-7)

            p_err = prob.value

            if succ:
                return 1 - p_err
            else:
                return p_err

        elif dual:

            mu = cvx.Variable()
            W = cvx.Variable((dimA * dimB, dimA * dimB), hermitian=True)

            WA = numpy_to_cvxpy(
                partial_trace(cvxpy_to_numpy(W), [2], [dimA, dimB]))

            c = [W << p * J0, W << (1 - p) * J1, mu * eye(dimA) << WA]

            obj = cvx.Maximize(mu)
            prob = cvx.Problem(obj, constraints=c)

            prob.solve(verbose=display, eps=1e-7)

            p_err = prob.value

            if succ:
                return 1 - p_err
            else:
                return p_err

    else:
        p_err = (1 / 2) * (1 - diamond_norm(
            p * J0 - (1 - p) * J1, dimA, dimB, display=display))
        if succ:
            return 1 - p_err
        else:
            return p_err
     Constant([[math.e, 1], [math.e**2, 1.0 / math.e]])),
    (cp.huber, (2, 2), [[[0.5, -1.5], [4, 0]]],
     Constant([[0.25, 2], [7, 0]])),
    (lambda x: cp.huber(x, 2.5), (2, 2), [[[0.5, -1.5], [4, 0]]],
     Constant([[0.25, 2.25], [13.75, 0]])),
    (cp.inv_pos, (2, 2), [[[1, 2], [3, 4]]],
     Constant([[1, 1.0 / 2], [1.0 / 3, 1.0 / 4]])),
    (lambda x: (x + Constant(0))**-1, (2, 2), [[[1, 2], [3, 4]]],
     Constant([[1, 1.0 / 2], [1.0 / 3, 1.0 / 4]])),
    (cp.kl_div, tuple(), [math.e, 1], Constant([1])),
    (cp.kl_div, tuple(), [math.e, math.e], Constant([0])),
    (cp.kl_div, (2,), [[math.e, 1], 1], Constant([1, 0])),
    (cp.rel_entr, tuple(), [math.e, 1], Constant([math.e])),
    (cp.rel_entr, tuple(), [math.e, math.e], Constant([0])),
    (cp.rel_entr, (2,), [[math.e, 1], 1], Constant([math.e, 0])),
    (lambda x: cp.kron(np.array([[1, 2], [3, 4]]), x), (4, 4), [np.array([[5, 6], [7, 8]])],
     Constant(np.kron(np.array([[1, 2], [3, 4]]), np.array([[5, 6], [7, 8]])))),
    (cp.lambda_max, tuple(), [[[2, 0], [0, 1]]], Constant([2])),
    (cp.lambda_max, tuple(), [[[2, 0, 0], [0, 3, 0], [0, 0, 1]]], Constant([3])),

    (cp.lambda_max, tuple(), [[[5, 7], [7, -3]]], Constant([9.06225775])),
    (lambda x: cp.lambda_sum_largest(x, 2), tuple(),
     [[[1, 2, 3], [2, 4, 5], [3, 5, 6]]], Constant([11.51572947])),
    (cp.log_sum_exp, tuple(), [[[5, 7], [0, -3]]], Constant([7.1277708268])),
    (log_sum_exp_axis_0, (1, 2),
     [[[5, 7, 1], [0, -3, 6]]], Constant([[7.12910890], [6.00259878]])),
    (log_sum_exp_axis_1, (3,),
     [[[5, 7, 1], [0, -3, 6]]], Constant([5.00671535, 7.0000454, 6.0067153])),
    (cp.logistic, (2, 2),
     [
        [[math.log(5), math.log(7)],
Exemple #30
0
    def _cvxpy_objective_and_constraints(arg, arg_test, fun, grad,
                                         grad_lips_constant,
                                         is_monotone_inc=False,
                                         is_monotone_dec=False, is_convex=False,
                                         is_concave=False):
        """
        Create the collection of linear constraints for the lower and upper
        bounds

        Sets up the optimization problem

        LB:  minimize     y_0
             subject to   y_i - g_0^\top (x_i - x_0)
                                           + L/2|x_i-x_0|^2 <= y_0
                                                |g_i - g_0| <= L |x_i-x_0|
                                    for each i\\in[1,..,N+1]

        UB:  maximize     y_0
             subject to   y_i - g_0^\top (x_i - x_0)
                                           - L/2|x_i-x_0|^2 <= y_0
                                                |g_i - g_0| <= L |x_i-x_0|
                                    for each i\\in[1,..,N+1]
        with decision variables y_0 and g_0
        """
        n_args = arg.shape[0]

        # Variables for the data points
        # Fun_test is the cvx variable denoting the possible values of the
        # interpolant at arg_test
        fun_test = cp.Variable((1,))
        grad_test = cp.Variable((1, 1), nonneg=is_monotone_inc,
                                nonpos=is_monotone_dec)

        # Constraints for existence
        rep_mat = np.ones((n_args, 1))
        # kron to repeat the elements and np.newaxis to retain the dimension
        # \nabla f(x_j) - \nabla f(x_0) for each j
        delta_grad = grad - cp.kron(rep_mat, grad_test)
        # x_j - x_0 for each j
        delta_arg = arg - cp.kron(rep_mat, arg_test)
        # L/2 ||x_j - x_0||^2 for each i and j
        delta_arg_norm = cp.Pnorm(delta_arg, p=2, axis=1)
        L_times_half_delta_arg_sqr = grad_lips_constant \
                                     * (delta_arg_norm ** 2)/2

        const_ub = [cp.abs(delta_grad) <= grad_lips_constant *cp.abs(delta_arg)]
        const_lb = [cp.abs(delta_grad) <= grad_lips_constant *cp.abs(delta_arg)]
        if is_convex:
            const_ub.append(fun >= fun_test + (delta_arg @ grad_test)[:, 0])
            const_lb.append(fun <= fun_test + (delta_arg @ grad_test)[:, 0]
                            + L_times_half_delta_arg_sqr)
        elif is_concave:
            const_ub.append(fun >= fun_test + (delta_arg @ grad_test)[:, 0]
                            - L_times_half_delta_arg_sqr)
            const_lb.append(fun <= fun_test + (delta_arg @ grad_test)[:, 0])
        else:
            const_ub.append(fun >= fun_test + (delta_arg@grad_test)[:, 0]
                            - L_times_half_delta_arg_sqr)
            const_lb.append(fun <= fun_test + (delta_arg @ grad_test)[:, 0]
                            + L_times_half_delta_arg_sqr)

        return const_lb, const_ub, fun_test
Exemple #31
0
constraints.append( sum([cp.trace(rho_TTSS[i]) for i in map(binarytoint,indices_A1Q1A2Q2)]) - 1 == 0 )

# 2b) positive semidefinite matrices
for i in map(binarytoint,indices_A1Q1A2Q2):
    constraints.append( rho_TTSS[i] >> 0 )
    
# 3) First linear constraint
for a2,q1,q2 in indices_A2Q1Q2:
    indices_A1q1a2q2 = [binarytoint([a,q1,a2,q2]) for a in range(dimA1)]
    indices_A1Q1a2q2 = [binarytoint([a,q,a2,q2]) for a,q in indices_A1Q1]
    
    lhs = sum([rho_TTSS[i] for i in indices_A1q1a2q2])
    
    rhs_variable = sum([rho_TTSS[i] for i in indices_A1Q1a2q2])
    rhs_partial = partial_trace(rhs_variable, [dimT, dim_TSS])
    rhs = cp.Constant(probQ1[q1]) * cp.kron(rhoT, rhs_partial)
    
    constraints.append( lhs - rhs == 0 )
    
# 4) Second linear constraint
P = permutation_matrix((0,1,2,3),(1,0,2,3),subs_TTSS)

for a1,q1,q2 in indices_A1Q1Q2:
    indices_a1q1A2q2 = [binarytoint([a1,q1,a,q2]) for a in range(dimA2)]
    indices_a1q1A2Q2 = [binarytoint([a1,q1,a,q]) for a,q in indices_A2Q2]
    
    lhs_variable = sum([rho_TTSS[i] for i in indices_a1q1A2q2])
    lhs = cp.matmul( cp.matmul(P,lhs_variable) , P )
    
    rhs_variable = sum([rho_TTSS[i] for i in indices_a1q1A2Q2])
    rhs_permuted = cp.matmul( cp.matmul(P,rhs_variable) , P )