def test_random_progs(n_qubits, prog_length): for repeat_i in range(10): prog = _generate_random_program(n_qubits=n_qubits, length=prog_length) u1 = program_unitary(prog, n_qubits=n_qubits) u2 = program_unitary(basic_compile(prog), n_qubits=n_qubits) assert_all_close_up_to_global_phase(u1, u2, atol=1e-12)
def test_basic_compile_defgate(): p = Program() p.inst(RX(pi, 0)) p.defgate("test", [[0, 1], [1, 0]]) p.inst(("test", 2)) p.inst(RZ(pi / 2, 0)) assert p == basic_compile(p)
def test_generate_single_depth(qvm): qvm.qam.random_seed = 5 expected_outcomes = [0., .5, 1.0, .5] for depth in [0, 1, 2, 3, 4]: for exp_type in ['X', 'Y']: exp = rpe.generate_single_depth_experiment(RZ(np.pi / 2, 0), depth, exp_type) idx = ((depth - 1) if exp_type == 'Y' else depth) % 4 expected = expected_outcomes[idx] executable = qvm.compiler.native_quil_to_executable( basic_compile(exp.wrap_in_numshots_loop(5000))) result = np.average(qvm.run(executable)) assert np.allclose(expected, result, atol=.005)
def compiled_parametric_graph_state(graph, focal_node, compiler: QPUCompiler, n_shots=1000): """ Construct a program to create and measure a graph state, map it to qubits using ``addressing``, and compile to an ISA. Hackily implement a parameterized program by compiling a program with a particular angle, finding where that angle appears in the results, and replacing it with ``"{angle}"`` so the resulting compiled program can be run many times by using python's str.format method. :param graph: A networkx graph defining the graph state :param focal_node: The node of the graph to measure :param compiler: The compiler to do the compiling. :param n_shots: The number of shots to take when measuring the graph state. :return: an executable that constructs and measures a graph state. """ program = create_graph_state(graph) measure_prog, c_addrs = measure_graph_state(graph, focal_node) program += measure_prog program.wrap_in_numshots_loop(n_shots) nq_program = basic_compile(program) executable = compiler.native_quil_to_executable(nq_program) return executable
def quil_to_native_quil(self, program: Program): return basic_compile(program)
def run(qc: QuantumComputer, exp: Program, n_trials: int) -> np.ndarray: exp.wrap_in_numshots_loop(n_trials) executable = qc.compiler.native_quil_to_executable(basic_compile(exp)) return qc.run(executable)
def estimate_pauli_sum(pauli_terms, basis_transform_dict, program, variance_bound, quantum_resource, commutation_check=True, symmetrize=True, rand_samples=16): """ Estimate the mean of a sum of pauli terms to set variance The sample variance is calculated by .. math:: \begin{align} \mathrm{Var}[\hat{\langle H \rangle}] = \sum_{i, j}h_{i}h_{j} \mathrm{Cov}(\hat{\langle P_{i} \rangle}, \hat{\langle P_{j} \rangle}) \end{align} The expectation value of each Pauli operator (term and coefficient) is also returned. It can be accessed through the named-tuple field `pauli_expectations'. :param pauli_terms: list of pauli terms to measure simultaneously or a PauliSum object :param basis_transform_dict: basis transform dictionary where the key is the qubit index and the value is the basis to rotate into. Valid basis is [I, X, Y, Z]. :param program: program generating a state to sample from. The program is deep copied to ensure no mutation of gates or program is perceived by the user. :param variance_bound: Bound on the variance of the estimator for the PauliSum. Remember this is the SQUARE of the standard error! :param quantum_resource: quantum abstract machine object :param Bool commutation_check: Optional flag toggling a safety check ensuring all terms in `pauli_terms` commute with each other :param Bool symmetrize: Optional flag toggling symmetrization of readout :param Int rand_samples: number of random realizations for readout symmetrization :return: estimated expected value, expected value of each Pauli term in the sum, covariance matrix, variance of the estimator, and the number of shots taken. The objected returned is a named tuple with field names as follows: expected_value, pauli_expectations, covariance, variance, n_shots. `expected_value' == coef_vec.dot(pauli_expectations) :rtype: EstimationResult """ if not isinstance(pauli_terms, (list, PauliSum)): raise TypeError("pauli_terms needs to be a list or a PauliSum") if isinstance(pauli_terms, PauliSum): pauli_terms = pauli_terms.terms # check if each term commutes with everything if commutation_check: if len(commuting_sets(sum(pauli_terms))) != 1: raise CommutationError("Not all terms commute in the expected way") program = program.copy() pauli_for_rotations = PauliTerm.from_list([ (value, key) for key, value in basis_transform_dict.items() ]) program += get_rotation_program(pauli_for_rotations) qubits = sorted(list(basis_transform_dict.keys())) if symmetrize: theta = program.declare("ro_symmetrize", "REAL", len(qubits)) for (idx, q) in enumerate(qubits): program += [RZ(np.pi / 2, q), RY(theta[idx], q), RZ(-np.pi / 2, q)] ro = program.declare("ro", "BIT", memory_size=len(qubits)) for num, qubit in enumerate(qubits): program.inst(MEASURE(qubit, ro[num])) coeff_vec = np.array(list(map(lambda x: x.coefficient, pauli_terms))).reshape((-1, 1)) # upper bound on samples given by IV of arXiv:1801.03524 num_sample_ubound = 10 * int( np.ceil(np.sum(np.abs(coeff_vec))**2 / variance_bound)) if num_sample_ubound <= 2: raise ValueError( "Something happened with our calculation of the max sample") if symmetrize: if min(STANDARD_NUMSHOTS, num_sample_ubound) // rand_samples == 0: raise ValueError( f"The number of shots must be larger than {rand_samples}.") program = program.wrap_in_numshots_loop( min(STANDARD_NUMSHOTS, num_sample_ubound) // rand_samples) else: program = program.wrap_in_numshots_loop( min(STANDARD_NUMSHOTS, num_sample_ubound)) binary = quantum_resource.compiler.native_quil_to_executable( basic_compile(program)) results = None sample_variance = np.infty number_of_samples = 0 tresults = np.zeros((0, len(qubits))) while (sample_variance > variance_bound and number_of_samples < num_sample_ubound): if symmetrize: # for some number of times sample random bit string for r in range(rand_samples): rand_flips = np.random.randint(low=0, high=2, size=len(qubits)) temp_results = quantum_resource.run( binary, memory_map={'ro_symmetrize': np.pi * rand_flips}) tresults = np.vstack((tresults, rand_flips ^ temp_results)) else: tresults = quantum_resource.run(binary) number_of_samples += len(tresults) parity_results = get_parity(pauli_terms, tresults) # Note: easy improvement would be to update mean and variance on the fly # instead of storing all these results. if results is None: results = parity_results else: results = np.hstack((results, parity_results)) # calculate the expected values.... covariance_mat = np.cov(results, ddof=1) sample_variance = coeff_vec.T.dot(covariance_mat).dot(coeff_vec) / ( results.shape[1] - 1) return EstimationResult( expected_value=coeff_vec.T.dot(np.mean(results, axis=1)), pauli_expectations=np.multiply(coeff_vec.flatten(), np.mean(results, axis=1).flatten()), covariance=covariance_mat, variance=sample_variance, n_shots=results.shape[1])