def test_proj_to_tp():
    # Identity process is trace preserving, so no change
    state = vec(kraus2choi(np.eye(2)))
    assert np.allclose(state, proj_to_tp(state))

    # Bit flip process is trace preserving, so no change
    state = vec(kraus2choi(sigma_x))
    assert np.allclose(state, proj_to_tp(state))
Ejemplo n.º 2
0
def _constraint_project(choi_mat, trace_preserving=True):
    """
    Projects the given Choi matrix into the subspace of Completetly Positive and either Trace Perserving (TP) or
    Trace-Non-Increasing maps.
    Uses Dykstra's algorithm with the stopping criterion presented in:

    [DYKALG] Dykstra’s algorithm and robust stopping criteria
             Birgin et al.,
             (Springer US, Boston, MA, 2009), pp. 828–833, ISBN 978-0-387-74759-0.
             https://doi.org/10.1007/978-0-387-74759-0_143

    This method is suggested in [PGD]

    :param choi_mat: A density matrix corresponding to the Choi representation estimate of a quantum process.
    :param trace_preserving: Default project the estimate to a trace-preserving process. False for trace non-increasing
    :return: The choi representation of CPTP map that is closest to the given state.
    """
    shape = choi_mat.shape
    old_CP_change = vec(np.zeros(shape))
    old_TP_change = vec(np.zeros(shape))
    last_CP_projection = vec(np.zeros(shape))
    last_state = vec(choi_mat)

    while True:
        # Dykstra's algorithm
        pre_CP = last_state - old_CP_change
        CP_projection = proj_to_cp(pre_CP)
        new_CP_change = CP_projection - pre_CP

        pre_TP = CP_projection - old_TP_change
        if trace_preserving:
            new_state = proj_to_tp(pre_TP)
        else:
            new_state = proj_to_tni(pre_TP)
        new_TP_change = new_state - pre_TP

        CP_change_change = new_CP_change - old_CP_change
        TP_change_change = new_TP_change - old_TP_change
        state_change = new_state - last_state

        # stopping criterion
        if np.linalg.norm(CP_change_change, ord=2) ** 2 + np.linalg.norm(TP_change_change, ord=2) ** 2 \
                + 2 * abs(np.dot(old_TP_change.conj().T, state_change)) \
                + 2 * abs(np.dot(old_CP_change.conj().T, (CP_projection - last_CP_projection))) < 1e-4:
            break

        # store results from this iteration
        old_CP_change = new_CP_change
        old_TP_change = new_TP_change
        last_CP_projection = CP_projection
        last_state = new_state

    return unvec(new_state)
Ejemplo n.º 3
0
def pgdb_process_estimate(results: List[ExperimentResult], qubits: List[int],
                          trace_preserving=True) -> np.ndarray:
    """
    Provide an estimate of the process via Projected Gradient Descent with Backtracking.

    [PGD] Maximum-likelihood quantum process tomography via projected gradient descent
          Knee et al.,
          Phys. Rev. A 98, 062336 (2018)
          https://dx.doi.org/10.1103/PhysRevA.98.062336
          https://arxiv.org/abs/1803.10062

    :param results: A tomographically complete list of ExperimentResults
    :param qubits: A list of qubits giving the tensor order of the resulting Choi matrix.
    :param trace_preserving: Whether to project the estimate to a trace-preserving process. If
        set to False, we ensure trace non-increasing.
    :return: an estimate of the process in the Choi matrix representation.
    """
    # construct the matrix A and vector n from the data for vectorized calculations of
    # the cost function and its gradient
    A, n = _extract_from_results(results, qubits[::-1])

    dim = 2 ** len(qubits)
    est = np.eye(dim ** 2, dim ** 2, dtype=complex) / dim  # initial estimate
    old_cost = _cost(A, n, est)  # initial cost, which we want to decrease
    mu = 3 / (2 * dim ** 2)  # inverse learning rate
    gamma = .3  # tolerance of letting the constrained update deviate from true gradient; larger is more demanding
    while True:
        gradient = _grad_cost(A, n, est)
        update = _constraint_project(est - gradient / mu, trace_preserving) - est

        # determine step size factor, alpha
        alpha = 1
        new_cost = _cost(A, n, est + alpha * update)
        change = gamma * alpha * np.dot(vec(update).conj().T, vec(gradient))
        while new_cost > old_cost + change:
            alpha = .5 * alpha
            change = .5 * change  # directly update change, corresponding to update of alpha
            new_cost = _cost(A, n, est + alpha * update)

            # small alpha stopgap
            if alpha < 1e-15:
                break

        # update estimate
        est += alpha * update
        if old_cost - new_cost < 1e-10:
            break
        # store current cost
        old_cost = new_cost


    return est
Ejemplo n.º 4
0
def linear_inv_state_estimate(results: List[ExperimentResult],
                              qubits: List[int]) -> np.ndarray:
    """
    Estimate a quantum state using linear inversion.

    This is the simplest state tomography post processing. To use this function,
    collect state tomography data with :py:func:`generate_state_tomography_experiment`
    and :py:func:`~pyquil.operator_estimation.measure_observables`.

    For more details on this post-processing technique,
    see https://en.wikipedia.org/wiki/Quantum_tomography#Linear_inversion or
    see section 3.4 of

    [WOOD] Initialization and characterization of open quantum systems
           C. Wood,
           PhD thesis from University of Waterloo, (2015).
           http://hdl.handle.net/10012/9557

    :param results: A tomographically complete list of results.
    :param qubits: All qubits that were tomographized. This specifies the order in
        which qubits will be kron'ed together.
    :return: A point estimate of the quantum state rho.
    """
    measurement_matrix = np.vstack([
        vec(lifted_pauli(result.setting.out_operator, qubits=qubits)).T.conj()
        for result in results
    ])
    expectations = np.array([result.expectation for result in results])
    rho = pinv(measurement_matrix) @ expectations
    return unvec(rho)
Ejemplo n.º 5
0
def _extract_from_results(results: List[ExperimentResult], qubits: List[int]):
    """
    Construct the matrix A such that the probabilities p_ij of outcomes n_ij given an estimate E
    can be cast in a vectorized form.

    Specifically::

        p = vec(p_ij) = A x vec(E)

    This yields convenient vectorized calculations of the cost and its gradient, in terms of A, n,
    and E.
    """
    A = []
    n = []
    grand_total_shots = 0

    for result in results:
        in_state_matrix = lifted_state_operator(result.setting.in_state, qubits=qubits)
        operator = lifted_pauli(result.setting.out_operator, qubits=qubits)
        proj_plus = (np.eye(2 ** len(qubits)) + operator) / 2
        proj_minus = (np.eye(2 ** len(qubits)) - operator) / 2

        # Constructing A per eq. (22)
        # TODO: figure out if we can avoid re-splitting into Pi+ and Pi- counts
        A += [
            # vec() turns into a column vector; transpose to a row vector; index into the
            # 1 row to avoid an extra tensor dimension when we call np.asarray(A).
            vec(np.kron(in_state_matrix, proj_plus.T)).T[0],
            vec(np.kron(in_state_matrix, proj_minus.T)).T[0],
        ]

        expected_plus_ones = (1 + result.expectation) / 2
        n += [
            result.total_counts * expected_plus_ones,
            result.total_counts * (1 - expected_plus_ones)
        ]
        grand_total_shots += result.total_counts

    n_qubits = len(qubits)
    dimension = 2 ** n_qubits
    A = np.asarray(A) / dimension ** 2
    n = np.asarray(n)[:, np.newaxis] / grand_total_shots
    return A, n
Ejemplo n.º 6
0
def proj_to_cp(choi_vec):
    """
    Projects the vectorized Choi representation of a process, into the nearest vectorized choi matrix in the space of
    completely positive maps. Equation 9 of [PGD]
    :param choi_vec: vectorized density matrix or Choi representation of a process
    :return: closest vectorized choi matrix in the space of completely positive maps
    """
    matrix = unvec(choi_vec)
    hermitian = (matrix + matrix.conj().T) / 2  # enforce Hermiticity
    d, v = np.linalg.eigh(hermitian)
    d[d < 0] = 0  # enforce completely positive by removing negative eigenvalues
    D = np.diag(d)
    return vec(v @ D @ v.conj().T)
def test_proj_to_cp():
    state = vec(np.array([[1., 0], [0, 1.]]))
    assert np.allclose(state, proj_to_cp(state))

    state = vec(np.array([[1.5, 0], [0, 10]]))
    assert np.allclose(state, proj_to_cp(state))

    state = vec(np.array([[-1, 0], [0, 1.]]))
    cp_state = vec(np.array([[0, 0], [0, 1.]]))
    assert np.allclose(cp_state, proj_to_cp(state))

    state = vec(np.array([[0, 1], [1, 0]]))
    cp_state = vec(np.array([[.5, .5], [.5, .5]]))
    assert np.allclose(cp_state, proj_to_cp(state))

    state = vec(np.array([[0, -1j], [1j, 0]]))
    cp_state = vec(np.array([[.5, -.5j], [.5j, .5]]))
    assert np.allclose(cp_state, proj_to_cp(state))
Ejemplo n.º 8
0
def _grad_cost(A, n, estimate, eps=1e-6):
    """
    Computes the gradient of the cost, leveraging the vectorized calculation given in the
    appendix of [PGD]

    :param A: a matrix constructed from the input states and POVM elements (eq. 22) that aids
        in calculating the model probabilities p.
    :param n: vectorized form of the observed counts n_ij
    :param estimate: the current model Choi representation of an estimated process for which we
        compute the gradient.
    :return: Gradient of the cost of the estimate given the data, n
    """
    p = A @ vec(estimate)
    # see appendix on "stalling"
    p = np.clip(p, a_min=eps, a_max=None)
    eta = n / p
    return unvec(-A.conj().T @ eta)
Ejemplo n.º 9
0
def proj_to_tp(choi_vec):
    """
    Projects the vectorized Choi representation of a process into the closest processes in the space of trace preserving
    maps. Equation 13 of [PGD]
    :param choi_vec: vectorized Choi representation of a process
    :return: The vectorized Choi representation of the projected TP process
    """
    dim = int(np.sqrt(np.sqrt(choi_vec.size)))
    b = vec(np.eye(dim, dim))
    # construct M, which acts as partial trace over output Hilbert space
    M = np.zeros((dim ** 2, dim ** 4))
    for i in range(dim):
        e = np.zeros((dim, 1))
        e[i] = 1
        B = np.kron(np.eye(dim, dim), e.T)
        M = M + np.kron(B, B)
    return choi_vec + 1 / dim * (M.conj().T @ b - M.conj().T @ M @ choi_vec)
Ejemplo n.º 10
0
def _cost(A, n, estimate, eps=1e-6):
    """
    Computes the cost (negative log likelihood) of the estimated process using the vectorized
    version of equation 4 of [PGD].

    See the appendix of [PGD].

    :param A: a matrix constructed from the input states and POVM elements (eq. 22) that aids
        in calculating the model probabilities p.
    :param n: vectorized form of the observed counts n_ij
    :param estimate: the current model Choi representation of an estimated process for which we
        report the cost.
    :return: Cost of the estimate given the data, n
    """
    p = A @ vec(estimate)  # vectorized form of the probabilities of outcomes, p_ij
    # see appendix on "stalling"
    p = np.clip(p, a_min=eps, a_max=None)
    return - n.T @ np.log(p)
Ejemplo n.º 11
0
def proj_to_tni(choi_vec):
    """
    Projects the vectorized Choi matrix of a process into the space of trace non-increasing maps. Equation 33 of [PGD]
    :param choi_vec: vectorized Choi representation of a process
    :return: The vectorized Choi representation of the projected TNI process
    """
    dim = int(np.sqrt(np.sqrt(choi_vec.size)))

    # trace out the output Hilbert space
    pt = partial_trace(unvec(choi_vec), dims=[dim, dim], keep=[0])

    hermitian = (pt + pt.conj().T) / 2  # enforce Hermiticity
    d, v = np.linalg.eigh(hermitian)
    d[d > 1] = 1  # enforce trace preserving
    D = np.diag(d)
    projection = v @ D @ v.conj().T

    trace_increasing_part = np.kron((pt - projection) / dim, np.eye(dim))

    return choi_vec - vec(trace_increasing_part)
def test_proj_to_tni():
    state = np.array([[0., 0., 0., 0.], [0., 1.01, 1.01, 0.], [0., 1., 1., 0.], [0., 0., 0., 0.]])
    trace_non_increasing = unvec(proj_to_tni(vec(state)))
    pt = partial_trace(trace_non_increasing, dims=[2, 2], keep=[0])
    assert np.allclose(pt, np.eye(2))