def generate_marginal(self, qubit_set): """ Construct a marginal :param qubit_set: :return: """ pauli_label_basis = list(product(pauli_labels, repeat=len(qubit_set))) marginal_rank = len(qubit_set) marginal = np.zeros((2**marginal_rank, 2**marginal_rank), dtype=complex) # get set of matrices serving as the operator basis marginal_basis = {} for ops in pauli_label_basis: pauli_group_op = sI(0) for qubit_idx, p_op in enumerate(ops): pauli_group_op *= pauli_basis[p_op](qubit_idx) marginal_basis[ops] = tensor_up(PauliSum([pauli_group_op]), marginal_rank) for ops in pauli_label_basis: pauli_group_op = sI(0) for qubit_idx, p_op in zip(qubit_set, ops): pauli_group_op *= pauli_basis[p_op](qubit_idx) p_op_full_space = tensor_up(PauliSum([pauli_group_op]), self.num_qubits) p_op_full_space /= 2**marginal_rank # TODO: Normalize pauli coefficients...Did we do it properly? basis_coeff = np.trace(p_op_full_space.dot(self.rho)) marginal += basis_coeff * marginal_basis[ops] return marginal
def _single_projector_generator(ket_op, bra_op, index): """ Generate the pauli sum terms corresponding to |ket_op><brak_op| :param ket_op: single qubit computational basis state :param bra_op: single qubit computational basis state :param index: qubit index to assign to the projector :return: pauli sum of single qubit projection operator :rtype: PauliSum """ if not isinstance(ket_op, int): raise TypeError("ket_op needs to be an integer") if not isinstance(bra_op, int): raise TypeError("ket_op needs to be an integer") if ket_op not in [0, 1] or bra_op not in [0, 1]: raise ValueError("bra and ket op needs to be either 0 or 1") if ket_op == 0 and bra_op == 0: return 0.5 * (sZ(index) + sI(index)) elif ket_op == 0 and bra_op == 1: return 0.5 * (sX(index) + 1j * sY(index)) elif ket_op == 1 and bra_op == 0: return 0.5 * (sX(index) - 1j * sY(index)) else: return 0.5 * (sI(index) - sZ(index))
def test_single_projector(): """ Testing the projector generator. Test if it returns the correct non-hermitian operator. These tests are for a single qubit """ zero_projector = _single_projector_generator(0, 0, 0) true_zero_projector = 0.5 * (sZ(0) + sI(0)) assert zero_projector == true_zero_projector with pytest.raises(TypeError): _single_projector_generator('a', 0, 5) with pytest.raises(TypeError): _single_projector_generator(0, 'a', 5) with pytest.raises(TypeError): _single_projector_generator(0, 0.0, 5) with pytest.raises(TypeError): _single_projector_generator(0.0, 0, 5) with pytest.raises(ValueError): _single_projector_generator(5, 0, 5) with pytest.raises(ValueError): _single_projector_generator(1, 4, 5) one_projector = _single_projector_generator(1, 1, 5) true_one_projector = 0.5 * (sI(5) - sZ(5)) assert true_one_projector == one_projector lowering_projector = _single_projector_generator(0, 1, 2) true_lowering_projector = 0.5 * (sX(2) + 1j * sY(2)) assert true_lowering_projector == lowering_projector raising_projector = _single_projector_generator(1, 0, 2) true_raising_projector = 0.5 * (sX(2) - 1j * sY(2)) assert true_raising_projector == raising_projector
def test_identity(forest): qc = get_qc('2q-qvm') suite = TomographyExperiment([ExperimentSetting(sI(), 0.123 * sI(0))], program=Program(X(0)), qubits=[0]) result = list(measure_observables(qc, suite))[0] assert result.expectation == 0.123
def test_measure_observables_many_progs(forest): expts = [ ExperimentSetting(sI(), o1 * o2) for o1, o2 in itertools.product([sI(0), sX(0), sY(0), sZ(0)], [sI(1), sX(1), sY(1), sZ(1)]) ] qc = get_qc('2q-qvm') qc.qam.random_seed = 51 for prog in _random_2q_programs(): suite = TomographyExperiment(expts, program=prog, qubits=[0, 1]) assert len(suite) == 4 * 4 gsuite = group_experiments(suite) assert len( gsuite ) == 3 * 3 # can get all the terms with I for free in this case wfn = WavefunctionSimulator() wfn_exps = {} for expt in expts: wfn_exps[expt] = wfn.expectation(gsuite.program, PauliSum([expt.out_operator])) for res in measure_observables(qc, gsuite, n_shots=1_000): np.testing.assert_allclose(wfn_exps[res.setting], res.expectation, atol=0.1)
def test_max_weight_operator_misc(): assert _max_weight_operator([sZ(0), sZ(0) * sZ(1)]) is not None assert _max_weight_operator([sX(5), sZ(4)]) is not None assert _max_weight_operator([sX(0), sY(0) * sZ(2)]) is None x_term = sX(0) * sX(1) z1_term = sZ(1) z0_term = sZ(0) z0z1_term = sZ(0) * sZ(1) assert _max_weight_operator([x_term, z1_term]) is None assert _max_weight_operator([z0z1_term, x_term]) is None assert _max_weight_operator([z1_term, z0_term]) is not None assert _max_weight_operator([z0z1_term, z0_term]) is not None assert _max_weight_operator([z0z1_term, z1_term]) is not None assert _max_weight_operator([z0z1_term, sI(1)]) is not None assert _max_weight_operator([z0z1_term, sI(2)]) is not None assert _max_weight_operator([z0z1_term, sX(5) * sZ(7)]) is not None xxxx_terms = (sX(1) * sX(2) + sX(2) + sX(3) * sX(4) + sX(4) + sX(1) * sX(3) * sX(4) + sX(1) * sX(4) + sX(1) * sX(2) * sX(3)) true_term = sX(1) * sX(2) * sX(3) * sX(4) assert _max_weight_operator(xxxx_terms.terms) == true_term zzzz_terms = sZ(1) * sZ(2) + sZ(3) * sZ(4) + sZ(1) * sZ(3) + sZ(1) * sZ( 3) * sZ(4) assert _max_weight_operator( zzzz_terms.terms) == sZ(1) * sZ(2) * sZ(3) * sZ(4) pauli_terms = [sZ(0), sX(1) * sZ(0), sY(2) * sX(1), sZ(5) * sI(3)] assert _max_weight_operator(pauli_terms) == sZ(5) * sY(2) * sX(1) * sZ(0)
def test_term_equality(): with pytest.raises(TypeError): sI(0) != 0 assert sI(0) == sI(0) assert PauliTerm('X', 10, 1 + 1.j) == PauliTerm('X', 10, 1 + 1.j) assert PauliTerm('X', 10, 1 + 1.j) + PauliTerm('X', 10, 1 + 1.j) != PauliTerm('X', 10, 1 + 1.j) assert PauliTerm('X', 10, 1 + 1.j) != PauliTerm('X', 10, 1 + 1.j) + PauliTerm('X', 10, 1 + 1.j)
def build_cost(penalty, num_cities, weights, connections): ret = 0 # constraint (a) for i in range(num_cities): cur = sI() for j in range(num_cities): cur -= D(i, j) ret += cur**2 # constraint (b) for i in range(num_cities): cur = sI() for j in range(num_cities): cur -= D(j, i) ret += cur**2 # constraint (c) for i in range(num_cities - 1): cur = sI() for j in range(num_cities): for k in range(num_cities): if connections[j, k]: cur -= D(j, i) * D(k, i + 1) ret += cur # constraint (d) (the weighting) for i in range(num_cities - 1): cur = sI() for j in range(num_cities): for k in range(num_cities): if connections[j, k]: cur -= D(j, i) * D(k, i + 1) * weights[j, k] ret += cur * penalty return ret
def test_experiment_no_in(): out_ops = _generate_random_paulis(n_qubits=4, n_terms=7) for oop in out_ops: expt = ExperimentSetting(sI(), oop) expt2 = ExperimentSetting.from_str(str(expt)) assert expt == expt2 assert expt2.in_operator == sI() assert expt2.out_operator == oop
def test_is_identity(): pt1 = -1.5j * sI(2) pt2 = 1.5 * sX(1) * sZ(2) assert is_identity(pt1) assert is_identity(pt2 + (-1 * pt2) + sI(0)) assert not is_identity(0 * pt1) assert not is_identity(pt2 + (-1 * pt2))
def _generate_random_pauli(n_qubits, n_terms): paulis = [sI, sX, sY, sZ] all_op_inds = np.random.randint(len(paulis), size=(n_terms, n_qubits)) operator = sI(0) for op_inds in all_op_inds: op = functools.reduce(mul, (paulis[pi](i) for i, pi in enumerate(op_inds)), sI(0)) op *= np.random.uniform(-1, 1) operator += op return operator
def test_sum_power(): pauli_sum = (sY(0) - sX(0)) * (1.0 / np.sqrt(2)) assert pauli_sum ** 2 == PauliSum([sI(0)]) with pytest.raises(ValueError): _ = pauli_sum ** -1 pauli_sum = sI(0) + sI(1) assert pauli_sum ** 0 == sI(0) # Test to make sure large powers can be computed pauli_sum ** 400
def test_sum_power(): q = QubitPlaceholder.register(8) pauli_sum = (sY(q[0]) - sX(q[0])) * (1.0 / np.sqrt(2)) assert pauli_sum**2 == PauliSum([sI(q[0])]) with pytest.raises(ValueError): _ = pauli_sum**-1 pauli_sum = sI(q[0]) + sI(q[1]) assert pauli_sum**0 == sI(q[0]) # Test to make sure large powers can be computed pauli_sum**400
def test_term_powers(): for qubit in QubitPlaceholder.register(2): pauli_terms = [sI(qubit), sX(qubit), sY(qubit), sZ(qubit)] for pauli_term in pauli_terms: assert pauli_term**0 == sI(qubit) assert pauli_term**1 == pauli_term assert pauli_term**2 == sI(qubit) assert pauli_term**3 == pauli_term with pytest.raises(ValueError): pauli_terms[0]**-1
def test_term_equality(): q0, q10 = QubitPlaceholder.register(2) with pytest.raises(TypeError): sI(q0) != 0 assert sI(q0) == sI(q0) assert PauliTerm('X', q10, 1 + 1.j) == PauliTerm('X', q10, 1 + 1.j) assert PauliTerm('X', q10, 1 + 1.j) + PauliTerm( 'X', q10, 1 + 1.j) != PauliTerm('X', q10, 1 + 1.j) assert PauliTerm( 'X', q10, 1 + 1.j) != PauliTerm('X', q10, 1 + 1.j) + PauliTerm('X', q10, 1 + 1.j)
def test_term_powers(): for qubit_id in range(2): pauli_terms = [sI(qubit_id), sX(qubit_id), sY(qubit_id), sZ(qubit_id)] for pauli_term in pauli_terms: assert pauli_term ** 0 == sI(qubit_id) assert pauli_term ** 1 == pauli_term assert pauli_term ** 2 == sI(qubit_id) assert pauli_term ** 3 == pauli_term with pytest.raises(ValueError): pauli_terms[0] ** -1 # Test to make sure large powers can be computed (PauliTerm('X', 0, 2) * PauliTerm('Y', 0, 2)) ** 400
def test_check_trivial_commutation_operation(): """ Check if logic is sound """ # everything commutes with the identity assert check_trivial_commutation([sI(0)], sX(1)) assert check_trivial_commutation([sX(0) * sZ(1), sY(2)], sI(0)) # check if returns false for non-commuting sets assert not check_trivial_commutation([sX(0), sX(1)], sZ(0) * sZ(1)) # check trivial commutation is true assert check_trivial_commutation([sX(5) * sX(6)], sZ(4))
def test_all_ops_belong_to_tpb(): expts = [ [ ExperimentSetting(sI(), sX(0) * sI(1)), ExperimentSetting(sI(), sI(0) * sX(1)) ], [ ExperimentSetting(sI(), sZ(0) * sI(1)), ExperimentSetting(sI(), sI(0) * sZ(1)) ], ] for group in expts: for e1, e2 in itertools.combinations(group, 2): assert _all_qubits_diagonal_in_tpb(e1.in_operator, e2.in_operator) assert _all_qubits_diagonal_in_tpb(e1.out_operator, e2.out_operator) assert _all_qubits_diagonal_in_tpb(sZ(0), sZ(0) * sZ(1)) assert _all_qubits_diagonal_in_tpb(sX(5), sZ(4)) assert not _all_qubits_diagonal_in_tpb(sX(0), sY(0) * sZ(2)) # this last example illustrates that a pair of commuting operators # need not be diagonal in the same tpb assert not _all_qubits_diagonal_in_tpb(sX(1) * sZ(0), sZ(1) * sX(0))
def test_experiment_suite_pre_grouped(): expts = [ [ ExperimentSetting(sI(), sX(0) * sI(1)), ExperimentSetting(sI(), sI(0) * sX(1)) ], [ ExperimentSetting(sI(), sZ(0) * sI(1)), ExperimentSetting(sI(), sI(0) * sZ(1)) ], ] suite = TomographyExperiment(settings=expts, program=Program(X(0), Y(1)), qubits=[0, 1]) assert len(suite) == 2 # number of groups for es1, es2 in zip(expts, suite): for e1, e2 in zip(es1, es2): assert e1 == e2 prog_str = str(suite).splitlines()[0] assert prog_str == 'X 0; Y 1'
def test_diagonal_basis_commutes(): x_term = sX(0) * sX(1) z1_term = sZ(1) z0_term = sZ(0) z0z1_term = sZ(0) * sZ(1) assert not diagonal_basis_commutes(x_term, z1_term) assert not diagonal_basis_commutes(z0z1_term, x_term) assert diagonal_basis_commutes(z1_term, z0_term) assert diagonal_basis_commutes(z0z1_term, z0_term) assert diagonal_basis_commutes(z0z1_term, z1_term) assert diagonal_basis_commutes(z0z1_term, sI(1)) assert diagonal_basis_commutes(z0z1_term, sI(2)) assert diagonal_basis_commutes(z0z1_term, sX(5) * sY(7))
def test_group_experiments(grouping_method): expts = [ # cf above, I removed the inner nesting. Still grouped visually ExperimentSetting(TensorProductState(), sX(0) * sI(1)), ExperimentSetting(TensorProductState(), sI(0) * sX(1)), ExperimentSetting(TensorProductState(), sZ(0) * sI(1)), ExperimentSetting(TensorProductState(), sI(0) * sZ(1)), ] suite = Experiment(expts, Program()) grouped_suite = group_settings(suite, method=grouping_method) assert len(suite) == 4 assert len(grouped_suite) == 2
def test_append(): expts = [ [ExperimentSetting(sI(), sX(0) * sI(1)), ExperimentSetting(sI(), sI(0) * sX(1))], [ExperimentSetting(sI(), sZ(0) * sI(1)), ExperimentSetting(sI(), sI(0) * sZ(1))], ] suite = TomographyExperiment( settings=expts, program=Program(X(0), Y(1)), qubits=[0, 1] ) suite.append(ExperimentSetting(sI(), sY(0) * sX(1))) assert (len(str(suite))) > 0
def test_pauli_sum(): q_plus = 0.5 * PauliTerm('X', 0) + 0.5j * PauliTerm('Y', 0) the_sum = q_plus * PauliSum([PauliTerm('X', 0)]) term_strings = [str(x) for x in the_sum.terms] assert '(0.5+0j)*I' in term_strings assert '(0.5+0j)*Z0' in term_strings assert len(term_strings) == 2 assert len(the_sum.terms) == 2 the_sum = q_plus * PauliTerm('X', 0) term_strings = [str(x) for x in the_sum.terms] assert '(0.5+0j)*I' in term_strings assert '(0.5+0j)*Z0' in term_strings assert len(term_strings) == 2 assert len(the_sum.terms) == 2 the_sum = PauliTerm('X', 0) * q_plus term_strings = [str(x) for x in the_sum.terms] assert '(0.5+0j)*I' in term_strings assert '(-0.5+0j)*Z0' in term_strings assert len(term_strings) == 2 assert len(the_sum.terms) == 2 with pytest.raises(ValueError): _ = PauliSum(sI(0)) with pytest.raises(ValueError): _ = PauliSum([1, 1, 1, 1]) with pytest.raises(ValueError): _ = the_sum * []
def benchmarker(): try: bm = get_benchmarker() bm.apply_clifford_to_pauli(Program(I(0)), sI(0)) except RequestException as e: return pytest.skip("This test requires a running local benchmarker endpoint (ie quilc): {}" .format(e))
def test_no_complex_coeffs(forest): qc = get_qc('2q-qvm') suite = TomographyExperiment([ExperimentSetting(sI(), 1.j * sY(0))], program=Program(X(0)), qubits=[0]) with pytest.raises(ValueError): res = list(measure_observables(qc, suite))
def test_for_negative_probabilities(): # trivial program to do state tomography on prog = Program(I(0)) # make TomographyExperiment expt_settings = [ExperimentSetting(zeros_state([0]), pt) for pt in [sI(0), sX(0), sY(0), sZ(0)]] experiment_1q = TomographyExperiment(settings=expt_settings, program=prog) # make a quantum computer object device = NxDevice(nx.complete_graph(1)) qc_density = QuantumComputer( name="testy!", qam=PyQVM(n_qubits=1, quantum_simulator_type=ReferenceDensitySimulator), device=device, compiler=DummyCompiler(), ) # initialize with a pure state initial_density = np.array([[1.0, 0.0], [0.0, 0.0]]) qc_density.qam.wf_simulator.density = initial_density try: list(measure_observables(qc=qc_density, tomo_experiment=experiment_1q, n_shots=3000)) except ValueError as e: # the error is from np.random.choice by way of self.rs.choice in ReferenceDensitySimulator assert str(e) != "probabilities are not non-negative" # initialize with a mixed state initial_density = np.array([[0.9, 0.0], [0.0, 0.1]]) qc_density.qam.wf_simulator.density = initial_density try: list(measure_observables(qc=qc_density, tomo_experiment=experiment_1q, n_shots=3000)) except ValueError as e: assert str(e) != "probabilities are not non-negative"
def test_measure_observables(forest): expts = [ ExperimentSetting(sI(), o1 * o2) for o1, o2 in itertools.product([sI(0), sX(0), sY(0), sZ(0)], [sI(1), sX(1), sY(1), sZ(1)]) ] suite = TomographyExperiment(expts, program=Program(X(0), CNOT(0, 1)), qubits=[0, 1]) assert len(suite) == 4 * 4 gsuite = group_experiments(suite) assert len(gsuite) == 3 * 3 # can get all the terms with I for free in this case qc = get_qc('2q-qvm') for res in measure_observables(qc, gsuite, n_shots=10_000): if res.setting.out_operator in [sI(), sZ(0), sZ(1), sZ(0) * sZ(1)]: assert np.abs(res.expectation) > 0.9 else: assert np.abs(res.expectation) < 0.1
def test_pauli_sum(): q = QubitPlaceholder.register(8) q_plus = 0.5 * PauliTerm('X', q[0]) + 0.5j * PauliTerm('Y', q[0]) the_sum = q_plus * PauliSum([PauliTerm('X', q[0])]) term_strings = [str(x) for x in the_sum.terms] assert '(0.5+0j)*I' in term_strings assert len(term_strings) == 2 assert len(the_sum.terms) == 2 the_sum = q_plus * PauliTerm('X', q[0]) term_strings = [str(x) for x in the_sum.terms] assert '(0.5+0j)*I' in term_strings assert len(term_strings) == 2 assert len(the_sum.terms) == 2 the_sum = PauliTerm('X', q[0]) * q_plus term_strings = [str(x) for x in the_sum.terms] assert '(0.5+0j)*I' in term_strings assert len(term_strings) == 2 assert len(the_sum.terms) == 2 with pytest.raises(ValueError): _ = PauliSum(sI(q[0])) with pytest.raises(ValueError): _ = PauliSum([1, 1, 1, 1]) with pytest.raises(ValueError): _ = the_sum * []
def test_marginal(): """ Generate ground state of heinsenberg spin model and general all 1-marginals Heisenberg spin model is H = sum_{<ij>} X_{i}X_{j} + Y_{i}Y_{j} + Z_{i}Z_{j} :return: """ # generate state for Heisenberg spin-model qubits = 4 hamiltonian = sI(0) * 0.0 for ii in range(qubits): hamiltonian += sX(ii) * sX((ii + 1) % qubits) hamiltonian += sY(ii) * sY((ii + 1) % qubits) hamiltonian += sZ(ii) * sZ((ii + 1) % qubits) hamiltonian_matrix = tensor_up(hamiltonian, qubits) w, v = np.linalg.eigh(hamiltonian_matrix) rho = v[:, [0]].dot(np.conj(v[:, [0]]).T) mg = MarginalGenerator(rho) marginals = mg.construct_p_marginals(2) for marginal_id, marginal in marginals.items(): assert np.isclose(marginal.trace(), 1.0)
def test_experiment_deser(tmpdir): expts = [ [ ExperimentSetting(TensorProductState(), sX(0) * sI(1)), ExperimentSetting(TensorProductState(), sI(0) * sX(1)), ], [ ExperimentSetting(TensorProductState(), sZ(0) * sI(1)), ExperimentSetting(TensorProductState(), sI(0) * sZ(1)), ], ] suite = Experiment(settings=expts, program=Program(X(0), Y(1))) to_json(f"{tmpdir}/suite.json", suite) suite2 = read_json(f"{tmpdir}/suite.json") assert suite == suite2