def test_initial_guess_in_minimize_one_norm(): for noise_level in [0.7, 0.9]: depo_kraus = global_depolarizing_kraus(noise_level, num_qubits=1) depo_super = kraus_to_super(depo_kraus) ideal_matrix = kraus_to_super(channel(H)) basis_matrices = [ depo_super @ kraus_to_super(channel(gate)) @ ideal_matrix for gate in [I, X, Y, Z, H] ] optimal_coeffs = minimize_one_norm( ideal_matrix, basis_matrices, initial_guess=[1.0, 1.0, 1.0, 1.0, 1.0], ) represented_mat = sum( [eta * mat for eta, mat in zip(optimal_coeffs, basis_matrices)]) assert np.allclose(ideal_matrix, represented_mat) # With a very bad guess it should fail with raises(RuntimeError, match="optimal representation failed"): minimize_one_norm( ideal_matrix, basis_matrices, initial_guess=[-1.0e9, 1.0e9, -1.0e9, +1.0e9, -1.0e9], )
def test_initial_guess_in_minimize_one_norm(): for noise_level in [0.7, 0.9]: depo_kraus = global_depolarizing_kraus(noise_level, num_qubits=1) depo_super = kraus_to_super(depo_kraus) ideal_matrix = kraus_to_super(kraus(H)) basis_matrices = [ depo_super @ kraus_to_super(kraus(gate)) @ ideal_matrix for gate in [I, X, Y, Z, H] ] optimal_coeffs = minimize_one_norm( ideal_matrix, basis_matrices, initial_guess=[1.0, 1.0, 1.0, 1.0, 1.0], ) represented_mat = sum( [eta * mat for eta, mat in zip(optimal_coeffs, basis_matrices)] ) assert np.allclose(ideal_matrix, represented_mat) # Test bad argument with raises(ValueError, match="shapes"): minimize_one_norm( ideal_matrix, basis_matrices, initial_guess=[1], )
def test_minimize_one_norm_with_depolarized_superoperators(): for noise_level in [0.01, 0.02, 0.03]: depo_kraus = global_depolarizing_kraus(noise_level, num_qubits=1) depo_super = kraus_to_super(depo_kraus) ideal_matrix = kraus_to_super(channel(H)) basis_matrices = [ depo_super @ kraus_to_super(channel(gate)) @ ideal_matrix for gate in [I, X, Y, Z, H] ] optimal_coeffs = minimize_one_norm(ideal_matrix, basis_matrices) represented_mat = sum( [eta * mat for eta, mat in zip(optimal_coeffs, basis_matrices)]) assert np.allclose(ideal_matrix, represented_mat) # Optimal analytic result by Takagi (arXiv:2006.12509) eps = 4.0 / 3.0 * noise_level expected = (1.0 + 0.5 * eps) / (1.0 - eps) assert np.isclose(np.linalg.norm(optimal_coeffs, 1), expected)
def find_optimal_representation( ideal_operation: QPROGRAM, noisy_basis: NoisyBasis, tol: float = 1.0e-8, initial_guess: Optional[np.ndarray] = None, ) -> OperationRepresentation: r"""Returns the ``OperationRepresentaiton`` of the input ideal operation which minimizes the one-norm of the associated quasi-probability distribution. More precicely, it solve the following optimization problem: .. math:: \min_{{\eta_\alpha}} = \sum_\alpha |\eta_\alpha|, \text{ such that } \mathcal G = \sum_\alpha \eta_\alpha \mathcal O_\alpha, where :math:`\{\mathcal O_j\}` is the input basis of noisy operations. Args: ideal_operation: The ideal operation to represent. noisy_basis: The ``NoisyBasis`` in which the ``ideal_operation`` should be represented. It must contain ``NoisyOperation`` objects which are initialized with a numerical superoperator matrix. tol: The error tolerance for each matrix element of the represented operation. initial_guess: Optional initial guess for the coefficients :math:`\{ \eta_\alpha \}``. Returns: The optimal OperationRepresentation. """ ideal_cirq_circuit, _ = convert_to_mitiq(ideal_operation) ideal_matrix = kraus_to_super( cast(List[np.ndarray], kraus(ideal_cirq_circuit))) basis_set = noisy_basis.elements try: basis_matrices = [noisy_op.channel_matrix for noisy_op in basis_set] except ValueError as err: if str(err) == "The channel matrix is unknown.": raise ValueError( "The input noisy_basis should contain NoisyOperation objects" " which are initialized with a numerical superoperator matrix." ) else: raise err # pragma no cover # Run numerical optimization problem quasi_prob_dist = minimize_one_norm( ideal_matrix, basis_matrices, tol=tol, initial_guess=initial_guess, ) basis_expansion = {op: eta for op, eta in zip(basis_set, quasi_prob_dist)} return OperationRepresentation(ideal_operation, basis_expansion)
def test_super_to_choi(): for noise_level in [0, 0.3, 1]: super_damping = kraus_to_super(amplitude_damping_kraus(noise_level, 1)) # Apply Pauli Y to get some complex numbers super_op = np.kron(channel(Y)[0], channel(Y)[0].conj()) @ super_damping choi_state = super_to_choi(super_op) # expected result q = LineQubit(0) choi_expected = _operation_to_choi( [AmplitudeDampingChannel(noise_level)(q), Y(q)]) assert np.allclose(choi_state, choi_expected)
def test_minimize_one_norm_tolerance(): depo_kraus = global_depolarizing_kraus(noise_level=0.1, num_qubits=1) depo_super = kraus_to_super(depo_kraus) ideal_matrix = kraus_to_super(channel(H)) basis_matrices = [ depo_super @ kraus_to_super(channel(gate)) @ ideal_matrix for gate in [I, X, Y, Z] ] previous_minimum = 0.0 previous_error = 1.0 for tol in [1.0e-2, 1.0e-4, 1.0e-6, 1.0e-8]: optimal_coeffs = minimize_one_norm(ideal_matrix, basis_matrices, tol) represented_mat = sum( [eta * mat for eta, mat in zip(optimal_coeffs, basis_matrices)]) worst_case_error = np.max(abs(ideal_matrix - represented_mat)) minimum = np.linalg.norm(optimal_coeffs, 1) # Reducing "tol" should decrease the worst case error # and should also increase the objective function assert worst_case_error < previous_error assert minimum > previous_minimum previous_error = worst_case_error previous_minimum = minimum
def test_minimize_one_norm_with_amp_damp_superoperators(): for noise_level in [0.01, 0.02, 0.03]: damp_kraus = amplitude_damping_kraus(noise_level, num_qubits=1) damp_super = kraus_to_super(damp_kraus) ideal_matrix = kraus_to_super(channel(H)) basis_matrices = [ damp_super @ kraus_to_super(channel(gate)) @ ideal_matrix for gate in [I, Z] ] # Append reset channel reset_kraus = channel(ResetChannel()) basis_matrices.append(kraus_to_super(reset_kraus)) optimal_coeffs = minimize_one_norm(ideal_matrix, basis_matrices, tol=1.0e-6) represented_mat = sum( [eta * mat for eta, mat in zip(optimal_coeffs, basis_matrices)]) assert np.allclose(ideal_matrix, represented_mat) # Optimal analytic result by Takagi (arXiv:2006.12509) expected = (1.0 + noise_level) / (1.0 - noise_level) assert np.isclose(np.linalg.norm(optimal_coeffs, 1), expected)
def test_kraus_to_super(): """Tests the function on random channels acting on random states. Channels and states are non-physical, but this is irrelevant for the test. """ for num_qubits in (1, 2, 3, 4, 5): d = 2**num_qubits fake_kraus_ops = [ np.random.rand(d, d) + 1.0j * np.random.rand(d, d) for _ in range(7) ] super_op = kraus_to_super(fake_kraus_ops) fake_state = np.random.rand(d, d) + 1.0j * np.random.rand(d, d) result_with_kraus = sum( [k @ fake_state @ k.conj().T for k in fake_kraus_ops]) result_with_super = vector_to_matrix( super_op @ matrix_to_vector(fake_state)) assert np.allclose(result_with_kraus, result_with_super)
def test_kraus_to_choi(): for dim in (2, 4, 8, 16): rand_kraus_ops = [np.random.rand(dim, dim) for _ in range(7)] super_op = kraus_to_super(rand_kraus_ops) expected_choi = super_to_choi(super_op) assert np.allclose(kraus_to_choi(rand_kraus_ops), expected_choi)