Ejemplo n.º 1
0
def compiled_parametric_graph_state(compiler: QPUCompiler,
                                    graph: nx.Graph,
                                    focal_node: int,
                                    num_shots: int = 1000):
    """
    Construct a program to create and measure a graph state, map it to qubits using ``addressing``,
    and compile to an ISA.

    Hackily implement a parameterized program by compiling a program with a particular angle,
    finding where that angle appears in the results, and replacing it with ``"{angle}"`` so
    the resulting compiled program can be run many times by using python's str.format method.

    :param graph: A networkx graph defining the graph state
    :param focal_node: The node of the graph to measure
    :param compiler: The compiler to do the compiling.
    :param num_shots: The number of shots to take when measuring the graph state.
    :return: an executable that constructs and measures a graph state.
    """
    program = create_graph_state(graph)
    measure_prog, c_addrs = measure_graph_state(graph, focal_node)
    program += measure_prog
    program.wrap_in_numshots_loop(num_shots)
    nq_program = basic_compile(program)
    executable = compiler.native_quil_to_executable(nq_program)
    return executable
def test_random_progs(n_qubits, prog_length):
    for repeat_i in range(10):
        prog = _generate_random_program(n_qubits=n_qubits, length=prog_length)
        u1 = program_unitary(prog, n_qubits=n_qubits)
        u2 = program_unitary(basic_compile(prog), n_qubits=n_qubits)

        assert_all_close_up_to_global_phase(u1, u2, atol=1e-12)
def test_basic_compile_defgate():
    p = Program()
    p.inst(RX(pi, 0))
    p.defgate("test", [[0, 1], [1, 0]])
    p.inst(("test", 2))
    p.inst(RZ(pi / 2, 0))

    assert p == basic_compile(p)
Ejemplo n.º 4
0
def _run_rpe_program(qc: QuantumComputer, program: Program,
                     measure_qubits: Sequence[Sequence[int]],
                     num_shots: int) -> np.ndarray:
    """
    Simple helper to run a program with appropriate number of shots and return result.

    Note that the program is first compiled with basic_compile.

    :param qc: quantum computer to run program on
    :param program: program to run
    :param measure_qubits: all of the qubits to be measured after the program is run
    :param num_shots: number of shots of results to collect for the program
    :return: the results for all of the measure_qubits after running the program
    """
    prog = Program() + program  # make a copy of program
    meas_qubits = [qubit for qubits in measure_qubits for qubit in qubits]
    ro_bit = prog.declare("ro", "BIT", len(meas_qubits))
    for idx, q in enumerate(meas_qubits):
        prog.measure(q, ro_bit[idx])
    prog.wrap_in_numshots_loop(num_shots)
    executable = qc.compiler.native_quil_to_executable(basic_compile(prog))
    return qc.run(executable)
 def quil_to_native_quil(self, program: Program):
     return basic_compile(program)
def generate_experiment_programs(obs_expt: ObservablesExperiment, active_reset: bool = False,
                                 use_basic_compile: bool = True) \
        -> Tuple[List[Program], List[List[int]]]:
    """
    Generate the programs necessary to estimate the observables in an ObservablesExperiment.

    Grouping of settings to be run in parallel, e.g. by a call to group_settings, should be
    done before this method is called.

    Note that this method does not compile anything into native gates by default. The flag
    `use_basic_compile` can be set to run a basic compilation routine that replaces some gates
    with native gates but otherwise performs no optimizations and no qubit re-indexing.

    .. CAUTION::
        One must be careful with compilation of the output programs before the appropriate MEASURE
        instructions are added, because compilation may re-index the qubits so that
        the output list of `measure_qubits` no longer accurately indexes the qubits that
        should be measured. Manually replacing a QuantumComputer compiler's quil_to_native_quil
        command with basic_compile may be an appropriate approach to circumvent this issue.

    :param obs_expt: a single ObservablesExperiment to be translated to a series of programs that,
        when run serially, can be used to estimate each of obs_expt's observables.
    :param active_reset: whether or not to begin the program by actively resetting. If true,
        execution of each of the returned programs in a loop on the QPU will generally be faster.
    :param use_basic_compile: whether or not to call basic_compile on the programs after they are
        created. To run on a QPU it is necessary that programs use only native quil gates. See
        the warning above about setting use_basic_compile to false.
    :return: a list of programs along with a corresponding list of the groups of qubits that are
        measured by that program. The returned programs may be run on a qc after measurement
        instructions are added for the corresponding group of qubits in meas_qubits, or by a call
        to `qc.run_symmetrized_readout` -- see :func:`estimate_observables` for possible usage.
    """
    # Outer loop over a collection of grouped settings for which we can simultaneously estimate.
    programs = []
    meas_qubits = []
    for settings in obs_expt:

        # Prepare a state according to the amalgam of all setting.in_state
        total_prog = Program()
        if active_reset:
            total_prog += RESET()
        max_weight_in_state = _max_weight_state(setting.in_state for setting in settings)
        if max_weight_in_state is None:
            raise ValueError('Input states are not compatible. Re-group the experiment settings '
                             'so that groups of parallel settings have compatible input states.')
        for oneq_state in max_weight_in_state.states:
            total_prog += _one_q_state_prep(oneq_state)

        # Add in the program
        total_prog += obs_expt.program

        # Prepare for measurement state according to setting.observable
        max_weight_out_op = _max_weight_operator(setting.observable for setting in settings)
        if max_weight_out_op is None:
            raise ValueError('Observables not compatible. Re-group the experiment settings '
                             'so that groups of parallel settings have compatible observables.')
        for qubit, op_str in max_weight_out_op:
            total_prog += _local_pauli_eig_meas(op_str, qubit)

        if use_basic_compile:
            programs.append(basic_compile(total_prog))
        else:
            programs.append(total_prog)

        meas_qubits.append(max_weight_out_op.get_qubits())
    return programs, meas_qubits
Ejemplo n.º 7
0
def estimate_pauli_sum(pauli_terms,
                       basis_transform_dict,
                       program,
                       variance_bound,
                       quantum_resource,
                       commutation_check=True,
                       symmetrize=True,
                       rand_samples=16):
    r"""
    Estimate the mean of a sum of pauli terms to set variance

    The sample variance is calculated by

    .. math::
        \begin{align}
        \mathrm{Var}[\hat{\langle H \rangle}] = \sum_{i, j}h_{i}h_{j}
        \mathrm{Cov}(\hat{\langle P_{i} \rangle}, \hat{\langle P_{j} \rangle})
        \end{align}

    The expectation value of each Pauli operator (term and coefficient) is
    also returned.  It can be accessed through the named-tuple field
    `pauli_expectations'.

    :param pauli_terms: list of pauli terms to measure simultaneously or a
                        PauliSum object
    :param basis_transform_dict: basis transform dictionary where the key is
                                 the qubit index and the value is the basis to
                                 rotate into. Valid basis is [I, X, Y, Z].
    :param program: program generating a state to sample from.  The program
                    is deep copied to ensure no mutation of gates or program
                    is perceived by the user.
    :param variance_bound:  Bound on the variance of the estimator for the
                            PauliSum. Remember this is the SQUARE of the
                            standard error!
    :param quantum_resource: quantum abstract machine object
    :param Bool commutation_check: Optional flag toggling a safety check
                                   ensuring all terms in `pauli_terms`
                                   commute with each other
    :param Bool symmetrize: Optional flag toggling symmetrization of readout
    :param Int rand_samples: number of random realizations for readout symmetrization
    :return: estimated expected value, expected value of each Pauli term in
             the sum, covariance matrix, variance of the estimator, and the
             number of shots taken.  The objected returned is a named tuple with
             field names as follows: expected_value, pauli_expectations,
             covariance, variance, n_shots.
             `expected_value' == coef_vec.dot(pauli_expectations)
    :rtype: EstimationResult
    """
    if not isinstance(pauli_terms, (list, PauliSum)):
        raise TypeError("pauli_terms needs to be a list or a PauliSum")

    if isinstance(pauli_terms, PauliSum):
        pauli_terms = pauli_terms.terms

    # check if each term commutes with everything
    if commutation_check:
        if len(commuting_sets(sum(pauli_terms))) != 1:
            raise CommutationError("Not all terms commute in the expected way")

    program = program.copy()
    pauli_for_rotations = PauliTerm.from_list([
        (value, key) for key, value in basis_transform_dict.items()
    ])

    program += get_rotation_program(pauli_for_rotations)

    qubits = sorted(list(basis_transform_dict.keys()))
    if symmetrize:
        theta = program.declare("ro_symmetrize", "REAL", len(qubits))
        for (idx, q) in enumerate(qubits):
            program += [RZ(np.pi / 2, q), RY(theta[idx], q), RZ(-np.pi / 2, q)]

    ro = program.declare("ro", "BIT", memory_size=len(qubits))
    for num, qubit in enumerate(qubits):
        program.inst(MEASURE(qubit, ro[num]))

    coeff_vec = np.array(list(map(lambda x: x.coefficient,
                                  pauli_terms))).reshape((-1, 1))

    # upper bound on samples given by IV of arXiv:1801.03524
    num_sample_ubound = 10 * int(
        np.ceil(np.sum(np.abs(coeff_vec))**2 / variance_bound))
    if num_sample_ubound <= 2:
        raise ValueError(
            "Something happened with our calculation of the max sample")

    if symmetrize:
        if min(STANDARD_NUMSHOTS, num_sample_ubound) // rand_samples == 0:
            raise ValueError(
                f"The number of shots must be larger than {rand_samples}.")

        program = program.wrap_in_numshots_loop(
            min(STANDARD_NUMSHOTS, num_sample_ubound) // rand_samples)
    else:
        program = program.wrap_in_numshots_loop(
            min(STANDARD_NUMSHOTS, num_sample_ubound))

    binary = quantum_resource.compiler.native_quil_to_executable(
        basic_compile(program))

    results = None
    sample_variance = np.infty
    number_of_samples = 0
    tresults = np.zeros((0, len(qubits)))
    while (sample_variance > variance_bound
           and number_of_samples < num_sample_ubound):
        if symmetrize:
            # for some number of times sample random bit string
            for r in range(rand_samples):
                rand_flips = np.random.randint(low=0, high=2, size=len(qubits))
                temp_results = quantum_resource.run(
                    binary, memory_map={'ro_symmetrize': np.pi * rand_flips})
                tresults = np.vstack((tresults, rand_flips ^ temp_results))
        else:
            tresults = quantum_resource.run(binary)

        number_of_samples += len(tresults)
        parity_results = get_parity(pauli_terms, tresults)

        # Note: easy improvement would be to update mean and variance on the fly
        # instead of storing all these results.
        if results is None:
            results = parity_results
        else:
            results = np.hstack((results, parity_results))

        # calculate the expected values....
        covariance_mat = np.cov(results, ddof=1)
        sample_variance = coeff_vec.T.dot(covariance_mat).dot(coeff_vec) / (
            results.shape[1] - 1)

    return EstimationResult(
        expected_value=coeff_vec.T.dot(np.mean(results, axis=1)),
        pauli_expectations=np.multiply(coeff_vec.flatten(),
                                       np.mean(results, axis=1).flatten()),
        covariance=covariance_mat,
        variance=sample_variance,
        n_shots=results.shape[1])