示例#1
0
def pgdb_process_estimate(results: List[ExperimentResult],
                          qubits: List[int],
                          trace_preserving=True) -> np.ndarray:
    """
    Provide an estimate of the process via Projected Gradient Descent with Backtracking.

    [PGD] Maximum-likelihood quantum process tomography via projected gradient descent
          Knee et al.,
          Phys. Rev. A 98, 062336 (2018)
          https://dx.doi.org/10.1103/PhysRevA.98.062336
          https://arxiv.org/abs/1803.10062

    :param results: A tomographically complete list of ExperimentResults
    :param qubits: A list of qubits giving the tensor order of the resulting Choi matrix.
    :param trace_preserving: Whether to project the estimate to a trace-preserving process. If
        set to False, we ensure trace non-increasing.
    :return: an estimate of the process in the Choi matrix representation.
    """
    # construct the matrix A and vector n from the data for vectorized calculations of
    # the cost function and its gradient
    A, n = _extract_from_results(results, qubits[::-1])

    dim = 2**len(qubits)
    est = np.eye(dim**2, dim**2, dtype=complex) / dim  # initial estimate
    old_cost = _cost(A, n, est)  # initial cost, which we want to decrease
    mu = 3 / (2 * dim**2)  # inverse learning rate
    gamma = .3  # tolerance of letting the constrained update deviate from true gradient; larger is more demanding
    while True:
        gradient = _grad_cost(A, n, est)
        update = proj_choi_to_physical(est - gradient / mu,
                                       trace_preserving) - est

        # determine step size factor, alpha
        alpha = 1
        new_cost = _cost(A, n, est + alpha * update)
        change = gamma * alpha * np.dot(vec(update).conj().T, vec(gradient))
        while new_cost > old_cost + change:
            alpha = .5 * alpha
            change = .5 * change  # directly update change, corresponding to update of alpha
            new_cost = _cost(A, n, est + alpha * update)

            # small alpha stopgap
            if alpha < 1e-15:
                break

        # update estimate
        est += alpha * update
        if old_cost - new_cost < 1e-10:
            break
        # store current cost
        old_cost = new_cost

    return est
示例#2
0
def linear_inv_state_estimate(results: List[ExperimentResult],
                              qubits: List[int]) -> np.ndarray:
    """
    Estimate a quantum state using linear inversion.

    This is the simplest state tomography post processing. To use this function,
    collect state tomography data with :py:func:`generate_state_tomography_experiment`
    and :py:func:`~pyquil.operator_estimation.measure_observables`.

    For more details on this post-processing technique,
    see https://en.wikipedia.org/wiki/Quantum_tomography#Linear_inversion or
    see section 3.4 of

    [WOOD] Initialization and characterization of open quantum systems
           C. Wood,
           PhD thesis from University of Waterloo, (2015).
           http://hdl.handle.net/10012/9557

    :param results: A tomographically complete list of results.
    :param qubits: All qubits that were tomographized. This specifies the order in
        which qubits will be kron'ed together.
    :return: A point estimate of the quantum state rho.
    """
    measurement_matrix = np.vstack([
        vec(pauli2matrix(result.setting.out_operator, qubits=qubits)).T.conj()
        for result in results
    ])
    expectations = np.array([result.expectation for result in results])
    rho = pinv(measurement_matrix) @ expectations
    return unvec(rho)
示例#3
0
def _extract_from_results(results: List[ExperimentResult], qubits: List[int]):
    """
    Construct the matrix A such that the probabilities p_ij of outcomes n_ij given an estimate E
    can be cast in a vectorized form.

    Specifically::

        p = vec(p_ij) = A x vec(E)

    This yields convenient vectorized calculations of the cost and its gradient, in terms of A, n,
    and E.
    """
    A = []
    n = []
    grand_total_shots = 0

    for result in results:
        # 'lift' the result's ExperimentSetting input TensorProductState to the corresponding
        # matrix. This is simply the density matrix of the state that was prepared.
        in_state_matrix = state2matrix(result.setting.in_state, qubits=qubits)
        # 'lift' the result's ExperimentSetting output PauliTerm to the corresponding matrix.
        operator = pauli2matrix(result.setting.out_operator, qubits=qubits)
        proj_plus = (np.eye(2 ** len(qubits)) + operator) / 2
        proj_minus = (np.eye(2 ** len(qubits)) - operator) / 2

        # Constructing A per eq. (A1) of [PGD]
        # TODO: figure out if we can avoid re-splitting into Pi+ and Pi- counts
        A += [
            # vec() turns into a column vector; transpose to a row vector; index into the
            # 1 row to avoid an extra tensor dimension when we call np.asarray(A).
            vec(np.kron(in_state_matrix, proj_plus.T)).T[0],
            vec(np.kron(in_state_matrix, proj_minus.T)).T[0],
        ]

        expected_plus_ones = (1 + result.expectation) / 2
        n += [
            result.total_counts * expected_plus_ones,
            result.total_counts * (1 - expected_plus_ones)
        ]
        grand_total_shots += result.total_counts

    n_qubits = len(qubits)
    dimension = 2 ** n_qubits
    A = np.asarray(A) / dimension ** 2
    n = np.asarray(n)[:, np.newaxis] / grand_total_shots
    return A, n
示例#4
0
def _grad_cost(A, n, estimate, eps=1e-6):
    """
    Computes the gradient of the cost, leveraging the vectorized equation 6 of [PGD] given in the
    appendix.

    :param A: a matrix constructed from the input states and POVM elements (eq. A1) that aids
        in calculating the model probabilities p.
    :param n: vectorized form of the observed counts n_ij
    :param estimate: the current model Choi representation of an estimated process for which we
        compute the gradient.
    :return: Gradient of the cost of the estimate given the data, n
    """
    p = A @ vec(estimate)
    # see appendix on "stalling"
    p = np.clip(p, a_min=eps, a_max=None)
    eta = n / p
    return unvec(-A.conj().T @ eta)
示例#5
0
def _cost(A, n, estimate, eps=1e-6):
    """
    Computes the cost (negative log likelihood) of the estimated process using the vectorized
    version of equation 3 of [PGD].

    See the appendix of [PGD].

    :param A: a matrix constructed from the input states and POVM elements (eq. A1) that aids
        in calculating the model probabilities p.
    :param n: vectorized form of the observed counts n_ij
    :param estimate: the current model Choi representation of an estimated process for which we
        report the cost.
    :return: Cost of the estimate given the data, n
    """
    p = A @ vec(estimate)  # vectorized form of the probabilities of outcomes, p_ij
    # see appendix on "stalling"
    p = np.clip(p, a_min=eps, a_max=None)
    return - n.T @ np.log(p)