Ejemplo n.º 1
0
 def test_quantum_concurrent_op_mode(self):
     """Test getter an setter behavior for quantum_concurrent_op_mode."""
     mode = quantum_context.get_quantum_concurrent_op_mode()
     self.assertTrue(mode)
     quantum_context.set_quantum_concurrent_op_mode(False)
     mode = quantum_context.get_quantum_concurrent_op_mode()
     self.assertFalse(mode)
Ejemplo n.º 2
0
def get_unitary_op(
        quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode()):
    """Get an op that calculates the unitary matrix for the given circuits.

    >>> unitary_op = tfq.get_unitary_op()
    >>> qubit = cirq.GridQubit(0, 0)
    >>> symbol = sympy.Symbol('alpha')
    >>> my_circuit = cirq.Circuit(cirq.H(qubit) ** symbol)
    >>> tensor_circuit = tfq.convert_to_tensor([my_circuit])
    >>> unitary_op(tensor_circuit, ['alpha'], [[0.2]])
    <tf.RaggedTensor [
        [[(0.9720+0.0860j), (0.0675-0.2078j)],
         [(0.0675-0.2078j), (0.8369+0.5017j)]]]>


    Args:
        quantum_concurrent: Optional Python `bool`. True indicates that the
            returned op should not block graph level parallelism on itself when
            executing. False indicates that graph level parallelism on itself
            should be blocked. Defaults to value specified in
            `tfq.get_quantum_concurrent_op_mode` which defaults to True
            (no blocking). This flag is only needed for advanced users when
            using TFQ for very large simulations, or when running on a real
            chip.

    Returns:
        A `callable` with the following signature:
        ```op(programs, symbol_names, symbol_values)```
        programs: `tf.Tensor` of strings with shape [batch_size] containing
            the string representations of the circuits to be executed.
        symbol_names: `tf.Tensor` of strings with shape [n_params], which
            is used to specify the order in which the values in
            `symbol_values` should be placed inside of the circuits in
            `programs`.
        symbol_values: `tf.Tensor` of real numbers with shape
            [batch_size, n_params] specifying parameter values to resolve
            into the circuits specified by programs, following the ordering
            dictated by `symbol_names`.
        Returns:
            `tf.Tensor` with shape
                [batch_size, <ragged 2**max_qubits>, <ragged 2**max_qubits>]
                that holds the unitary matrix for each circuit (after resolving
                the corresponding parameters in).
    """
    if quantum_concurrent is True:
        # Do not block graph level parallelism.
        return lambda programs, symbol_names, symbol_values: \
            tfq_utility_ops.padded_to_ragged2d(
                OP_MODULE.tfq_calculate_unitary(
                    programs, symbol_names, tf.cast(symbol_values, tf.float32)))

    # Block graph level parallelism.
    return lambda programs, symbol_names, symbol_values: \
            quantum_context._GLOBAL_OP_LOCK.execute(lambda: \
                tfq_utility_ops.padded_to_ragged2d(
                    OP_MODULE.tfq_calculate_unitary(
                        programs,symbol_names, tf.cast(
                            symbol_values, tf.float32))))
Ejemplo n.º 3
0
def get_expectation_op(
    backend=None,
    *,
    quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode()):
    """Get a TensorFlow op that will calculate batches of expectation values.

    This function produces a non-differentiable TF op that will calculate
    batches of expectation values given tensor batches of `cirq.Circuit`s,
    parameter values, and `cirq.PauliSum` operators to measure.


    >>> # Simulate circuits with C++.
    >>> my_op = tfq.get_expectation_op()
    >>> # Prepare some inputs.
    >>> qubit = cirq.GridQubit(0, 0)
    >>> my_symbol = sympy.Symbol('alpha')
    >>> my_circuit_tensor = tfq.convert_to_tensor([
    ...     cirq.Circuit(cirq.H(qubit) ** my_symbol)
    ... ])
    >>> my_values = np.array([[0.123]])
    >>> my_paulis = tfq.convert_to_tensor([[
    ...     3.5 * cirq.X(qubit) - 2.2 * cirq.Y(qubit)
    ... ]])
    >>> # This op can now be run with:
    >>> output = my_op(
    ...     my_circuit_tensor, ['alpha'], my_values, my_paulis)
    >>> output
    tf.Tensor([[0.71530885]], shape=(1, 1), dtype=float32)


    In order to make the op differentiable, a `tfq.differentiator` object is
    needed. see `tfq.differentiators` for more details. Below is a simple
    example of how to make my_op from the above code block differentiable:

    >>> diff = tfq.differentiators.ForwardDifference()
    >>> my_differentiable_op = diff.generate_differentiable_op(
    ...     analytic_op=my_op
    ... )


    Args:
        backend: Optional Python `object` that specifies what backend this op
            should use when evaluating circuits. Can be any
            `cirq.SimulatesFinalState`. If not provided the default C++
            analytical expectation calculation op is returned.
        quantum_concurrent: Optional Python `bool`. True indicates that the
            returned op should not block graph level parallelism on itself when
            executing. False indicates that graph level parallelism on itself
            should be blocked. Defaults to value specified in
            `tfq.get_quantum_concurrent_op_mode` which defaults to True
            (no blocking). This flag is only needed for advanced users when
            using TFQ for very large simulations, or when running on a real
            chip.

    Returns:
        A `callable` with the following signature:

        ```op(programs, symbol_names, symbol_values, pauli_sums)```

        programs: `tf.Tensor` of strings with shape [batch_size] containing
            the string representations of the circuits to be executed.
        symbol_names: `tf.Tensor` of strings with shape [n_params], which
            is used to specify the order in which the values in
            `symbol_values` should be placed inside of the circuits in
            `programs`.
        symbol_values: `tf.Tensor` of real numbers with shape
            [batch_size, n_params] specifying parameter values to resolve
            into the circuits specified by programs, following the ordering
            dictated by `symbol_names`.
        pauli_sums: `tf.Tensor` of strings with shape [batch_size, n_ops]
            containing the string representation of the operators that will
            be used on all of the circuits in the expectation calculations.

        Returns:
            `tf.Tensor` with shape [batch_size, n_ops] that holds the
                expectation value for each circuit with each op applied to it
                (after resolving the corresponding parameters in).
    """

    # TODO (mbbrough): investigate how the above docstring renders.
    _check_quantum_concurrent(quantum_concurrent)

    op = None
    if backend is None:
        op = TFQWavefunctionSimulator.expectation

    if isinstance(backend, cirq.SimulatesFinalState):
        op = cirq_ops._get_cirq_analytical_expectation(backend)

    if op is not None:
        if quantum_concurrent is True:
            # Return an op that does not block graph level parallelism.
            return lambda programs, symbol_names, symbol_values, pauli_sums: \
                op(programs, symbol_names, symbol_values, pauli_sums)

        # Return an op that does block graph level parallelism.
        return lambda programs, symbol_names, symbol_values, pauli_sums: \
            _GLOBAL_OP_LOCK.execute(lambda: op(
                programs, symbol_names, symbol_values, pauli_sums))

    if isinstance(backend, (cirq.SimulatesSamples, cirq.Sampler)):
        raise NotImplementedError("Sample-based expectation is not supported."
                                  " Use "
                                  "tf.get_sampled_expectation_op() instead.")

    raise TypeError(
        "Backend {} is invalid. Expected a Cirq.SimulatesFinalState"
        " or None.".format(backend))
Ejemplo n.º 4
0
def get_state_op(
    backend=None,
    *,
    quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode()):
    """Get a TensorFlow op that produces states from given quantum circuits.

    This function produces a non-differentiable op that will calculate
    batches of state tensors given tensor batches of `cirq.Circuit`s and
    parameter values.


    >>> # Simulate circuits with cirq.
    >>> my_op = tfq.get_state_op(backend=cirq.DensityMatrixSimulator())
    >>> # Simulate circuits with C++.
    >>> my_second_op = tfq.get_state_op()
    >>> # Prepare some inputs.
    >>> qubit = cirq.GridQubit(0, 0)
    >>> my_symbol = sympy.Symbol('alpha')
    >>> my_circuit_tensor = tfq.convert_to_tensor([
    ...     cirq.Circuit(cirq.Y(qubit) ** my_symbol)
    ... ])
    >>> my_values = np.array([[0.5]])
    >>> # This op can now be run to calculate the state.
    >>> output = my_second_op(my_circuit_tensor, ['alpha'], my_values)
    >>> output
    <tf.RaggedTensor [[(0.5+0.5j), (0.5+0.5j)]]>


    Args:
        backend: Optional Python `object` that specifies what backend this op
            should use when evaluating circuits. Can be any
            `cirq.SimulatesFinalState`. If not provided, the default C++
            wavefunction simulator will be used.
        quantum_concurrent: Optional Python `bool`. True indicates that the
            returned op should not block graph level parallelism on itself when
            executing. False indicates that graph level parallelism on itself
            should be blocked. Defaults to value specified in
            `tfq.get_quantum_concurrent_op_mode` which defaults to True
            (no blocking). This flag is only needed for advanced users when
            using TFQ for very large simulations, or when running on a real
            chip.

    Returns:
        A `callable` with the following signature:

        ```op(programs, symbol_names, symbol_values)```

        programs: `tf.Tensor` of strings with shape [batch_size] containing
            the string representations of the circuits to be executed.
        symbol_names: `tf.Tensor` of strings with shape [n_params], which
            is used to specify the order in which the values in
            `symbol_values` should be placed inside of the circuits in
            `programs`.
        symbol_values: `tf.Tensor` of real numbers with shape
            [batch_size, n_params] specifying parameter values to resolve
            into the circuits specified by programs, following the ordering
            dictated by `symbol_names`.

        Returns:
            `tf.Tensor` with shape [batch_size, <ragged> size of state] that
            contains the state information of the circuit.
    """

    # TODO (mbbrough): investigate how the above docstring renders.
    _check_quantum_concurrent(quantum_concurrent)

    op = None
    if backend is None:
        op = TFQWavefunctionSimulator.state

    if isinstance(backend, (cirq.SimulatesFinalState)):
        op = cirq_ops._get_cirq_simulate_state(backend)

    if op is not None:
        if quantum_concurrent is True:
            # Return an op that does not block graph level parallelism.
            return lambda programs, symbol_names, symbol_values: \
                tfq_utility_ops.padded_to_ragged(
                    op(programs, symbol_names, symbol_values))

        # Return an op that does block graph level parallelism.
        return lambda programs, symbol_names, symbol_values: \
            _GLOBAL_OP_LOCK.execute(lambda: tfq_utility_ops.padded_to_ragged(
                op(programs, symbol_names, symbol_values)))

    raise TypeError(
        "Backend {} is invalid. Expected a Cirq.SimulatesFinalState"
        " or None.".format(backend))