예제 #1
0
    def test_gradients(self, op, obs, mocker, tol):
        """Tests that the gradients of circuits match between the
        finite difference and analytic methods."""
        args = np.linspace(0.2, 0.5, op.num_params)

        with ReversibleTape() as tape:
            qml.Hadamard(wires=0)
            qml.RX(0.543, wires=0)
            qml.CNOT(wires=[0, 1])

            op(*args, wires=range(op.num_wires))

            qml.Rot(1.3, -2.3, 0.5, wires=[0])
            qml.RZ(-0.5, wires=0)
            qml.RY(0.5, wires=1)
            qml.CNOT(wires=[0, 1])

            qml.expval(obs(wires=0))
            qml.expval(qml.PauliZ(wires=1))

        dev = qml.device("default.qubit", wires=2)
        res = tape.execute(dev)

        tape._update_gradient_info()
        tape.trainable_params = set(range(1, 1 + op.num_params))

        # check that every parameter is analytic
        for i in range(op.num_params):
            assert tape._par_info[1 + i]["grad_method"][0] == "A"

        grad_F = tape.jacobian(dev, method="numeric")

        spy = mocker.spy(ReversibleTape, "analytic_pd")
        spy_execute = mocker.spy(tape, "execute_device")
        grad_A = tape.jacobian(dev, method="analytic")
        spy.assert_called()

        # check that the execute device method has only been called
        # once, for all parameters.
        spy_execute.assert_called_once()

        assert np.allclose(grad_A, grad_F, atol=tol, rtol=0)
예제 #2
0
def test_integration(returns):
    """Integration tests that compare to default.qubit for a large circuit containing parametrized
    operations"""
    dev_def = qml.device("default.qubit", wires=range(4))
    dev_lightning = qml.device("lightning.qubit", wires=range(4))

    def circuit(params):
        circuit_ansatz(params, wires=range(4))
        return qml.expval(returns), qml.expval(qml.PauliY(1))

    n_params = 30
    params = np.linspace(0, 10, n_params)

    qnode_def = qml.QNode(circuit, dev_def)
    qnode_lightning = qml.QNode(circuit, dev_lightning, diff_method="adjoint")

    j_def = qml.jacobian(qnode_def)(params)
    j_lightning = qml.jacobian(qnode_lightning)(params)

    assert np.allclose(j_def, j_lightning)
예제 #3
0
def test_all_operations(mocker):
    """Test that a batch dimension can be added to all operations"""
    dev = qml.device("default.qubit", wires=3)

    @functools.partial(qml.batch_params, all_operations=True)
    @qml.qnode(dev)
    def circuit(x, weights):
        qml.RX(x, wires=0)
        qml.RY([0.2, 0.3, 0.3], wires=1)
        qml.templates.StronglyEntanglingLayers(weights, wires=[0, 1, 2])
        return qml.probs(wires=[0, 2])

    batch_size = 3
    x = np.linspace(0.1, 0.5, batch_size, requires_grad=True)
    weights = np.ones((batch_size, 10, 3, 3), requires_grad=False)

    spy = mocker.spy(circuit.device, "batch_execute")
    res = circuit(x, weights)
    assert res.shape == (batch_size, 1, 4)
    assert len(spy.call_args[0][0]) == batch_size
예제 #4
0
    def test_rmsprop_optimizer_multivar(self, tol):
        """Tests that rmsprop optimizer takes one and two steps correctly
        for multivariate functions."""
        stepsize, gamma = 0.1, 0.5
        rms_opt = RMSPropOptimizer(stepsize, decay=gamma)

        multivariate_funcs = [
            lambda x: np.sin(x[0]) + np.cos(x[1]),
            lambda x: np.exp(x[0] / 3) * np.tanh(x[1]),
            lambda x: np.sum([x_**2 for x_ in x]),
        ]
        grad_multi_funcs = [
            lambda x: (np.array([np.cos(x[0]), -np.sin(x[1])]), ),
            lambda x: (np.array([
                np.exp(x[0] / 3) / 3 * np.tanh(x[1]),
                np.exp(x[0] / 3) * (1 - np.tanh(x[1])**2),
            ]), ),
            lambda x: (np.array([2 * x_ for x_ in x]), ),
        ]

        x_vals = np.linspace(-10, 10, 16, endpoint=False)

        for gradf, f in zip(grad_multi_funcs, multivariate_funcs):
            for jdx in range(len(x_vals[:-1])):
                rms_opt.reset()

                x_vec = x_vals[jdx:jdx + 2]
                x_onestep = rms_opt.step(f, x_vec)
                past_grads = (1 - gamma) * gradf(x_vec)[0] * gradf(x_vec)[0]
                adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
                x_onestep_target = x_vec - gradf(x_vec)[0] * adapt_stepsize
                assert np.allclose(x_onestep, x_onestep_target, atol=tol)

                x_twosteps = rms_opt.step(f, x_onestep)
                past_grads = (
                    1 - gamma) * gamma * gradf(x_vec)[0] * gradf(x_vec)[0] + (
                        1 - gamma) * gradf(x_onestep)[0] * gradf(x_onestep)[0]
                adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
                x_twosteps_target = x_onestep - gradf(
                    x_onestep)[0] * adapt_stepsize
                assert np.allclose(x_twosteps, x_twosteps_target, atol=tol)
예제 #5
0
def test_autograd(diff_method, tol):
    """Test derivatives when using autograd"""
    dev = qml.device("default.qubit", wires=2)

    @qml.batch_params
    @qml.beta.qnode(dev, diff_method=diff_method)
    def circuit(x):
        qml.RX(x, wires=0)
        qml.RY(0.1, wires=1)
        qml.CNOT(wires=[0, 1])
        return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))

    def cost(x):
        return np.sum(circuit(x))

    batch_size = 3
    x = np.linspace(0.1, 0.5, batch_size, requires_grad=True)

    res = qml.grad(cost)(x)
    expected = -np.sin(0.1) * np.sin(x)
    assert np.allclose(res, expected, atol=tol, rtol=0)
    def test_nesterovmomentum_optimizer_multivar(self, tol):
        """Tests that nesterov momentum optimizer takes one and two steps correctly
        for multivariate functions."""
        stepsize, gamma = 0.1, 0.5
        nesmom_opt = NesterovMomentumOptimizer(stepsize, momentum=gamma)

        multivariate_funcs = [
            lambda x: np.sin(x[0]) + np.cos(x[1]),
            lambda x: np.exp(x[0] / 3) * np.tanh(x[1]),
            lambda x: np.sum([x_ ** 2 for x_ in x]),
        ]
        grad_multi_funcs = [
            lambda x: (np.array([np.cos(x[0]), -np.sin(x[1])]),),
            lambda x: (
                np.array(
                    [
                        np.exp(x[0] / 3) / 3 * np.tanh(x[1]),
                        np.exp(x[0] / 3) * (1 - np.tanh(x[1]) ** 2),
                    ]
                ),
            ),
            lambda x: (np.array([2 * x_ for x_ in x]),),
        ]

        x_vals = np.linspace(-10, 10, 16, endpoint=False)

        for gradf, f in zip(grad_multi_funcs, multivariate_funcs):
            for jdx in range(len(x_vals[:-1])):
                nesmom_opt.reset()

                x_vec = x_vals[jdx : jdx + 2]
                x_onestep = nesmom_opt.step(f, x_vec)
                x_onestep_target = x_vec - gradf(x_vec)[0] * stepsize
                assert np.allclose(x_onestep, x_onestep_target, atol=tol)

                x_twosteps = nesmom_opt.step(f, x_onestep)
                momentum_term = gamma * gradf(x_vec)[0]
                shifted_grad_term = gradf(x_onestep - stepsize * momentum_term)[0]
                x_twosteps_target = x_onestep - (shifted_grad_term + momentum_term) * stepsize
                assert np.allclose(x_twosteps, x_twosteps_target, atol=tol)
예제 #7
0
def test_simple_circuit(mocker):
    """Test that batching works for a simple circuit"""
    dev = qml.device("default.qubit", wires=3)

    @qml.batch_params
    @qml.qnode(dev)
    def circuit(data, x, weights):
        qml.templates.AmplitudeEmbedding(data, wires=[0, 1, 2], normalize=True)
        qml.RX(x, wires=0)
        qml.RY(0.2, wires=1)
        qml.templates.StronglyEntanglingLayers(weights, wires=[0, 1, 2])
        return qml.probs(wires=[0, 2])

    batch_size = 3
    data = np.random.random((batch_size, 8))
    x = np.linspace(0.1, 0.5, batch_size, requires_grad=True)
    weights = np.ones((batch_size, 10, 3, 3), requires_grad=True)

    spy = mocker.spy(circuit.device, "batch_execute")
    res = circuit(data, x, weights)
    assert res.shape == (batch_size, 1, 4)
    assert len(spy.call_args[0][0]) == batch_size
예제 #8
0
    def test_gradients_gaussian_circuit(self, op, obs, tol):
        """Tests that the gradients of circuits of gaussian gates match between the
        finite difference and analytic methods."""
        tol = 1e-2

        args = np.linspace(0.2, 0.5, op.num_params)

        with qml.tape.JacobianTape() as tape:
            qml.Displacement(0.5, 0, wires=0)
            op(*args, wires=range(op.num_wires))
            qml.Beamsplitter(1.3, -2.3, wires=[0, 1])
            qml.Displacement(-0.5, 0.1, wires=0)
            qml.Squeezing(0.5, -1.5, wires=0)
            qml.Rotation(-1.1, wires=0)
            qml.var(obs(wires=0))

        dev = qml.device("default.gaussian", wires=2)
        res = tape.execute(dev)

        tape.trainable_params = set(range(2, 2 + op.num_params))

        # jacobians must match
        tapes, fn = qml.gradients.finite_diff(tape)
        grad_F = fn(dev.batch_execute(tapes))

        tapes, fn = param_shift_cv(tape, dev)
        grad_A = fn(dev.batch_execute(tapes))

        tapes, fn = param_shift_cv(tape, dev, force_order2=True)
        grad_A2 = fn(dev.batch_execute(tapes))

        assert np.allclose(grad_A2, grad_F, atol=tol, rtol=0)
        assert np.allclose(grad_A, grad_F, atol=tol, rtol=0)

        # check that every parameter is analytic
        if obs != qml.NumberOperator:
            for i in range(op.num_params):
                assert tape._par_info[2 + i]["grad_method"][0] == "A"
예제 #9
0
    def test_gradient_descent_optimizer_multivar_multidim(self, tol):
        """Tests that basic stochastic gradient descent takes gradient-descent steps correctly
        for multivariate functions and with higher dimensional inputs."""
        stepsize = 0.1
        sgd_opt = GradientDescentOptimizer(stepsize)

        mvar_mdim_funcs = [
            lambda x: np.sin(x[0, 0]) + np.cos(x[1, 0]) - np.sin(x[0, 1]) + x[
                1, 1],
            lambda x: np.exp(x[0, 0] / 3) * np.tanh(x[0, 1]),
            lambda x: np.sum([x_[0]**2 for x_ in x]),
        ]
        grad_mvar_mdim_funcs = [
            lambda x: (np.array([[np.cos(x[0, 0]), -np.cos(x[0, 1])],
                                 [-np.sin(x[1, 0]), 1.0]]), ),
            lambda x: (np.array([
                [
                    np.exp(x[0, 0] / 3) / 3 * np.tanh(x[0, 1]),
                    np.exp(x[0, 0] / 3) * (1 - np.tanh(x[0, 1])**2),
                ],
                [0.0, 0.0],
            ]), ),
            lambda x: (np.array([[2 * x_[0], 0.0] for x_ in x]), ),
        ]

        x_vals = np.linspace(-10, 10, 16, endpoint=False)

        for gradf, f in zip(grad_mvar_mdim_funcs, mvar_mdim_funcs):
            for jdx in range(len(x_vals[:-3])):
                x_vec = x_vals[jdx:jdx + 4]
                x_vec_multidim = np.reshape(x_vec, (2, 2))
                x_new = sgd_opt.step(f, x_vec_multidim)
                x_correct = x_vec_multidim - gradf(
                    x_vec_multidim)[0] * stepsize
                x_new_flat = x_new.flatten()
                x_correct_flat = x_correct.flatten()
                assert np.allclose(x_new_flat, x_correct_flat, atol=tol)
예제 #10
0
class TestAdjointJacobian:
    """Tests for the adjoint_jacobian method"""
    @pytest.fixture
    def dev(self):
        return qml.device("default.qubit", wires=2)

    def test_not_expval(self, dev):
        """Test if a QuantumFunctionError is raised for a tape with measurements that are not
        expectation values"""

        with qml.tape.JacobianTape() as tape:
            qml.RX(0.1, wires=0)
            qml.var(qml.PauliZ(0))

        with pytest.raises(qml.QuantumFunctionError,
                           match="Adjoint differentiation method does"):
            dev.adjoint_jacobian(tape)

    def test_unsupported_op(self, dev):
        """Test if a QuantumFunctionError is raised for an unsupported operation, i.e.,
        multi-parameter operations that are not qml.Rot"""

        with qml.tape.JacobianTape() as tape:
            qml.CRot(0.1, 0.2, 0.3, wires=[0, 1])
            qml.expval(qml.PauliZ(0))

        with pytest.raises(qml.QuantumFunctionError,
                           match="The CRot operation is not"):
            dev.adjoint_jacobian(tape)

    @pytest.mark.parametrize("theta", np.linspace(-2 * np.pi, 2 * np.pi, 7))
    @pytest.mark.parametrize("G", [qml.RX, qml.RY, qml.RZ])
    def test_pauli_rotation_gradient(self, G, theta, tol, dev):
        """Tests that the automatic gradients of Pauli rotations are correct."""

        with qml.tape.JacobianTape() as tape:
            qml.QubitStateVector(np.array([1.0, -1.0]) / np.sqrt(2), wires=0)
            G(theta, wires=[0])
            qml.expval(qml.PauliZ(0))

        tape.trainable_params = {1}

        calculated_val = dev.adjoint_jacobian(tape)

        # compare to finite differences
        numeric_val = tape.jacobian(dev, method="numeric")
        assert np.allclose(calculated_val, numeric_val, atol=tol, rtol=0)

    @pytest.mark.parametrize("theta", np.linspace(-2 * np.pi, 2 * np.pi, 7))
    def test_Rot_gradient(self, theta, tol, dev):
        """Tests that the device gradient of an arbitrary Euler-angle-parameterized gate is
        correct."""
        params = np.array([theta, theta**3, np.sqrt(2) * theta])

        with qml.tape.JacobianTape() as tape:
            qml.QubitStateVector(np.array([1.0, -1.0]) / np.sqrt(2), wires=0)
            qml.Rot(*params, wires=[0])
            qml.expval(qml.PauliZ(0))

        tape.trainable_params = {1, 2, 3}

        calculated_val = dev.adjoint_jacobian(tape)

        # compare to finite differences
        numeric_val = tape.jacobian(dev, method="numeric")
        assert np.allclose(calculated_val, numeric_val, atol=tol, rtol=0)

    @pytest.mark.parametrize("par", [1, -2, 1.623, -0.051, 0]
                             )  # integers, floats, zero
    def test_ry_gradient(self, par, tol, dev):
        """Test that the gradient of the RY gate matches the exact analytic formula."""

        with qml.tape.JacobianTape() as tape:
            qml.RY(par, wires=[0])
            qml.expval(qml.PauliX(0))

        tape.trainable_params = {0}

        # gradients
        exact = np.cos(par)
        grad_F = tape.jacobian(dev, method="numeric")
        grad_A = dev.adjoint_jacobian(tape)

        # different methods must agree
        assert np.allclose(grad_F, exact, atol=tol, rtol=0)
        assert np.allclose(grad_A, exact, atol=tol, rtol=0)

    def test_rx_gradient(self, tol, dev):
        """Test that the gradient of the RX gate matches the known formula."""
        a = 0.7418

        with qml.tape.JacobianTape() as tape:
            qml.RX(a, wires=0)
            qml.expval(qml.PauliZ(0))

        # circuit jacobians
        dev_jacobian = dev.adjoint_jacobian(tape)
        expected_jacobian = -np.sin(a)
        assert np.allclose(dev_jacobian, expected_jacobian, atol=tol, rtol=0)

    def test_multiple_rx_gradient(self, tol):
        """Tests that the gradient of multiple RX gates in a circuit yields the correct result."""
        dev = qml.device("default.qubit", wires=3)
        params = np.array([np.pi, np.pi / 2, np.pi / 3])

        with qml.tape.JacobianTape() as tape:
            qml.RX(params[0], wires=0)
            qml.RX(params[1], wires=1)
            qml.RX(params[2], wires=2)

            for idx in range(3):
                qml.expval(qml.PauliZ(idx))

        # circuit jacobians
        dev_jacobian = dev.adjoint_jacobian(tape)
        expected_jacobian = -np.diag(np.sin(params))
        assert np.allclose(dev_jacobian, expected_jacobian, atol=tol, rtol=0)

    qubit_ops = [getattr(qml, name) for name in qml.ops._qubit__ops__]
    ops = {
        qml.RX, qml.RY, qml.RZ, qml.PhaseShift, qml.CRX, qml.CRY, qml.CRZ,
        qml.Rot
    }

    @pytest.mark.parametrize("obs", [qml.PauliX, qml.PauliY])
    @pytest.mark.parametrize("op", ops)
    def test_gradients(self, op, obs, tol, dev):
        """Tests that the gradients of circuits match between the finite difference and device
        methods."""
        args = np.linspace(0.2, 0.5, op.num_params)

        with qml.tape.JacobianTape() as tape:
            qml.Hadamard(wires=0)
            qml.RX(0.543, wires=0)
            qml.CNOT(wires=[0, 1])

            op(*args, wires=range(op.num_wires))

            qml.Rot(1.3, -2.3, 0.5, wires=[0])
            qml.RZ(-0.5, wires=0)
            qml.RY(0.5, wires=1).inv()
            qml.CNOT(wires=[0, 1])

            qml.expval(obs(wires=0))
            qml.expval(qml.PauliZ(wires=1))

        tape.execute(dev)

        tape.trainable_params = set(range(1, 1 + op.num_params))

        grad_F = tape.jacobian(dev, method="numeric")
        grad_D = dev.adjoint_jacobian(tape)

        assert np.allclose(grad_D, grad_F, atol=tol, rtol=0)

    def test_gradient_gate_with_multiple_parameters(self, tol, dev):
        """Tests that gates with multiple free parameters yield correct gradients."""
        x, y, z = [0.5, 0.3, -0.7]

        with qml.tape.JacobianTape() as tape:
            qml.RX(0.4, wires=[0])
            qml.Rot(x, y, z, wires=[0])
            qml.RY(-0.2, wires=[0])
            qml.expval(qml.PauliZ(0))

        tape.trainable_params = {1, 2, 3}

        grad_D = dev.adjoint_jacobian(tape)
        grad_F = tape.jacobian(dev, method="numeric")

        # gradient has the correct shape and every element is nonzero
        assert grad_D.shape == (1, 3)
        assert np.count_nonzero(grad_D) == 3
        # the different methods agree
        assert np.allclose(grad_D, grad_F, atol=tol, rtol=0)
예제 #11
0
plt.show()

##############################################################################
# Cost function surface for circuit ansatz
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Now, we plot the cost function surface for later comparison with the surface generated
# by learning the circuit structure.

from matplotlib import cm
from matplotlib.ticker import MaxNLocator
from mpl_toolkits.mplot3d import Axes3D

fig = plt.figure(figsize=(6, 4))
ax = fig.gca(projection="3d")

X = np.linspace(-4.0, 4.0, 40)
Y = np.linspace(-4.0, 4.0, 40)
xx, yy = np.meshgrid(X, Y)
Z = np.array([[cost([x, y]) for x in X] for y in Y]).reshape(len(Y), len(X))
surf = ax.plot_surface(xx, yy, Z, cmap=cm.coolwarm, antialiased=False)

ax.set_xlabel(r"$\theta_1$")
ax.set_ylabel(r"$\theta_2$")
ax.zaxis.set_major_locator(MaxNLocator(nbins=5, prune="lower"))

plt.show()

##############################################################################
# It is apparent that, based on the circuit structure
# chosen above, the cost function does not depend on the angle parameter :math:`\theta_2`
# for the rotation gate :math:`R_y`. As we will show in the following sections, this independence is not true
    acc_train = accuracy(Y_train, predictions_train)
    acc_val = accuracy(Y_val, predictions_val)

    print(
        "Iter: {:5d} | Cost: {:0.7f} | Acc train: {:0.7f} | Acc validation: {:0.7f} "
        "".format(it + 1, cost(var, features, Y), acc_train, acc_val))

##############################################################################
# We can plot the continuous output of the variational classifier for the
# first two dimensions of the Iris data set.

plt.figure()
cm = plt.cm.RdBu

# make data for decision regions
xx, yy = np.meshgrid(np.linspace(0.0, 1.5, 20), np.linspace(0.0, 1.5, 20))
X_grid = [np.array([x, y]) for x, y in zip(xx.flatten(), yy.flatten())]

# preprocess grid points like data inputs above
padding = 0.3 * np.ones((len(X_grid), 1))
X_grid = np.c_[np.c_[X_grid, padding],
               np.zeros((len(X_grid), 1))]  # pad each input
normalization = np.sqrt(np.sum(X_grid**2, -1))
X_grid = (X_grid.T / normalization).T  # normalize each input
features_grid = np.array([get_angles(x) for x in X_grid
                          ])  # angles for state preparation are new features
predictions_grid = [
    variational_classifier(var, angles=f) for f in features_grid
]
Z = np.reshape(predictions_grid, xx.shape)
import itertools as it

import numpy as onp
import pytest

import pennylane as qml
from pennylane import numpy as np
from pennylane.utils import _flatten
from pennylane.optimize import (GradientDescentOptimizer, MomentumOptimizer,
                                NesterovMomentumOptimizer, AdagradOptimizer,
                                RMSPropOptimizer, AdamOptimizer,
                                RotoselectOptimizer, RotosolveOptimizer)

pytestmark = pytest.mark.usefixtures("tape_mode")

x_vals = np.linspace(-10, 10, 16, endpoint=False)

# Hyperparameters for optimizers
stepsize = 0.1
gamma = 0.5
delta = 0.8

# function arguments in various formats
mixed_list = [(0.2, 0.3), np.array([0.4, 0.2, 0.4]), 0.1]
mixed_tuple = (np.array([0.2, 0.3]), [0.4, 0.2, 0.4], 0.1)
nested_list = [[[0.2], 0.3], [0.1, [0.4]], -0.1]
flat_list = [0.2, 0.3, 0.1, 0.4, -0.1]
multid_array = np.array([[0.1, 0.2], [-0.1, -0.4]])
multid_list = [[0.1, 0.2], [-0.1, -0.4]]

# functions and their gradients
The parameter-shift rule is *exact*, i.e., the formula for the gradient doesn't involve any approximations.
For quantum hardware, we can only take a finite number of samples, so we can never determine a circuit's
expectation values *exactly*. However, the parameter-shift rule provides the guarantee that it is an
`unbiased estimator <https://en.wikipedia.org/wiki/Bias_of_an_estimator>`_, meaning that if we could take a infinite number of samples, it converges to the
correct gradient value.

Let's jump into some code and take a look at the parameter-shift rule in action.
"""

import pennylane as qml
import matplotlib.pyplot as plt
from pennylane import numpy as np
from scipy.linalg import expm

np.random.seed(143)
angles = np.linspace(0, 2 * np.pi, 50)
dev = qml.device('default.qubit', wires=2)

##############################################################################
# We will consider a very simple circuit, containing just a single-qubit
# rotation about the x-axis, followed by a measurement along the z-axis.


@qml.qnode(dev)
def rotation_circuit(theta):
    qml.RX(theta, wires=0)
    return qml.expval(qml.PauliZ(0))


##############################################################################
# We will examine the gradient with respect to the parameter :math:`\theta`.
예제 #15
0
    # Define the variational parameters
    params = 2 * np.pi * (np.random.rand(sum(param_size)) - 0.5)
    
    # Define the Hamiltonian operators
    operators, coeffs = Hamiltonian()
    
    # Define the QNodeCollection
    qnodes = qml.map(circuit, operators, dev, measure="expval")

    # Evaluate the QNodeCollection
    def HNode(params):
        return np.dot(coeffs, qnodes(params, size=SIZE, layers=LAYERS))

    # Define the lattice
    X = np.linspace(-np.pi, np.pi, 100)
    Y = np.linspace(-np.pi, np.pi, 100)
    X, Y = np.meshgrid(X, Y)
    
    Op, OpG = HNode, qml.grad(HNode)
    Op_mat, OpG_mat = np.zeros(shape=(100, 100)), np.zeros(shape=(sum(param_size), 100, 100))

    for i in range(100):
        for j in range(100):
            params[args.coord[0]] = X[i, j]
            params[args.coord[1]] = Y[i, j]
            
            Op_mat[i, j] = Op(params)
            OpG_mat[:, i, j] = OpG(params)[0]

    # fig = plt.figure()
예제 #16
0
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Now we want to see how our protocol compares to the standard Ramsey interferometry protocol.
# The probe state in this case is a tensor product of three separate :math:`|+\rangle` states
# while the encoded state is measured in the :math:`|+\rangle / |-\rangle` basis.
# We can recreate the standard schemes with specific weights for our setup.

Ramsey_weights = np.zeros_like(weights)
Ramsey_weights[1:6:2] = np.pi / 2
Ramsey_weights[15:20:2] = np.pi / 2
print("Cost for standard Ramsey sensing = {:6.4f}".format(
    opt_cost(Ramsey_weights)))

##############################################################################
# We can now make a plot to compare the noise scaling of the above probes.
gammas = np.linspace(0, 0.75, 21)
comparison_costs = {
    "optimized": [],
    "standard": [],
}

for gamma in gammas:
    comparison_costs["optimized"].append(cost(weights, phi, gamma, J, W))
    comparison_costs["standard"].append(cost(Ramsey_weights, phi, gamma, J, W))

import matplotlib.pyplot as plt

plt.semilogy(gammas, comparison_costs["optimized"], label="Optimized")
plt.semilogy(gammas, comparison_costs["standard"], label="Standard")
plt.xlabel(r"$\gamma$")
plt.ylabel("Weighted Cramér-Rao bound")
예제 #17
0
plt.style.use("seaborn")
plt.plot(gd_cost_history, "b", label="Gradient descent")
plt.plot(qngd_cost_history, "g", label="Quantum natural gradient descent")

plt.ylabel("Cost function value")
plt.xlabel("Optimization steps")
plt.legend()
plt.show()

##############################################################################
# Or we can visualize the optimization path in the parameter space using a contour plot.
# Energies at different grid points have been pre-computed, and they can be downloaded by
# clicking :download:`here<../demonstrations/vqe_qng/param_landscape.npy>`.

# Discretize the parameter space
theta0 = np.linspace(0.0, 2.0 * np.pi, 100)
theta1 = np.linspace(0.0, 2.0 * np.pi, 100)

# Load energy value at each point in parameter space
parameter_landscape = np.load("vqe_qng/param_landscape.npy")

# Plot energy landscape
fig, axes = plt.subplots(figsize=(6, 6))
cmap = plt.cm.get_cmap("coolwarm")
contour_plot = plt.contourf(theta0, theta1, parameter_landscape, cmap=cmap)
plt.xlabel(r"$\theta_0$")
plt.ylabel(r"$\theta_1$")

# Plot optimization path for gradient descent. Plot every 10th point.
gd_color = "g"
plt.plot(
예제 #18
0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the qubit parameter-shift QubitParamShiftTape"""
import pytest
from pennylane import numpy as np

import pennylane as qml
from pennylane.tape.interfaces.autograd import AutogradInterface
from pennylane.tape import JacobianTape, ReversibleTape, QNode, qnode
from pennylane.tape.measure import MeasurementProcess

thetas = np.linspace(-2 * np.pi, 2 * np.pi, 8)


class TestReversibleTape:
    """Unit tests for the reversible tape"""
    def test_diff_circuit_construction(self, mocker):
        """Test that the diff circuit is correctly constructed"""
        dev = qml.device("default.qubit", wires=2)

        with ReversibleTape() as tape:
            qml.PauliX(wires=0)
            qml.RX(0.542, wires=0)
            qml.RY(0.542, wires=0)
            qml.expval(qml.PauliZ(0))

        spy = mocker.spy(dev, "execute")
예제 #19
0
import sys

import pytest
from qiskit import ClassicalRegister, QuantumCircuit, QuantumRegister
from qiskit import extensions as ex
from qiskit.circuit import Parameter
from qiskit.exceptions import QiskitError
from qiskit.quantum_info.operators import Operator

import pennylane as qml
from pennylane import numpy as np
from pennylane_qiskit.converter import load, load_qasm, load_qasm_from_file, map_wires
from pennylane.wires import Wires


THETA = np.linspace(0.11, 3, 5)
PHI = np.linspace(0.32, 3, 5)
VARPHI = np.linspace(0.02, 3, 5)


class TestConverter:
    """Tests the converter function that allows converting QuantumCircuit objects
    to Pennylane templates."""

    def test_quantum_circuit_init_by_specifying_rotation_in_circuit(self, recorder):
        """Tests the load method for a QuantumCircuit initialized using separately defined
        quantum and classical registers."""

        angle = 0.5

        qr = QuantumRegister(1)
# Tryout the quantum circuit

# In[54]:


print(quantum_circ(inits, x=[0.1, 0.3]))
print(quantum_circ(inits, x=[0.2, 0.2]))


# Plot the normalized Rosenbrock function

# In[55]:


XX = np.linspace(-2, 2., 30)   #Could use linspace instead if dividing
YY = np.linspace(-2, 4., 30)   #evenly instead of stepping...

#Create the mesh grid(s) for all X/Y combos.
XX, YY = np.meshgrid(XX, YY)

#Rosenbrock function w/ two parameters using numpy Arrays and normalized
ZZ = (1.-XX)**2 + 100.*(YY-XX*XX)**2
ZZ = ZZ/np.max(ZZ)

# plot
ax = fig.gca(projection='3d')
ax.view_init(30, 35)
ax.dist = 13
surf = ax.plot_surface(XX, YY, ZZ, rstride=1, cstride=1, 
         cmap='coolwarm', edgecolor='none',antialiased=True)  #Try coolwarm vs jet
# from PennyLane core.
# All other ops are provided by Cirq.
available_ops = [op for op in dir(cirq_ops) if not op.startswith('_')]
print("\n".join(available_ops))

##############################################################################
# PennyLane operations and external framework-specific operations can be
# interwoven freely in circuits that use that plugin's device
# for execution.
# In this case, the Cirq-provided channels can be used with
# Cirq's mixed-state simulator.
#
# We'll use the ``BitFlip`` channel, which has the effect of
# randomly flipping the qubits in the computational basis.

noise_vals = np.linspace(0, 1, 25)

CHSH_vals = []
noisy_expvals = []

for p in noise_vals:
    # we overwrite the bell_pair() subcircuit to add
    # extra noisy channels after the entangled state is created
    def bell_pair():
        qml.Hadamard(wires=0)
        qml.CNOT(wires=[0, 1])
        cirq_ops.BitFlip(p, wires=0)
        cirq_ops.BitFlip(p, wires=1)

    # measuring the circuits will now use the new noisy bell_pair() function
    expvals = circuits()
예제 #22
0
class TestMomentumOptimizer:
    """Test the Momentum optimizer"""
    @pytest.mark.parametrize(
        "grad,args",
        [
            ([40, -4, 12, -17, 400], [0, 30, 6, -7, 800]),
            ([0.00033, 0.45e-5, 0.0], [1.3, -0.5, 8e3]),
            ([43], [0.8]),
        ],
    )
    def test_apply_grad(self, grad, args, tol):
        """
        Test that the gradient can be applied correctly to a set of parameters
        and that momentum accumulation works correctly.
        """
        stepsize, gamma = 0.1, 0.5
        sgd_opt = MomentumOptimizer(stepsize, momentum=gamma)
        grad, args = np.array(grad), np.array(args, requires_grad=True)

        a1 = stepsize * grad
        expected = args - a1
        res = sgd_opt.apply_grad(grad, args)
        assert np.allclose(res, expected)

        # Simulate a new step
        grad = grad + args
        args = expected

        a2 = gamma * a1 + stepsize * grad
        expected = args - a2
        res = sgd_opt.apply_grad(grad, args)
        assert np.allclose(res, expected, atol=tol)

    @pytest.mark.parametrize("x_start", np.linspace(-10,
                                                    10,
                                                    16,
                                                    endpoint=False))
    def test_momentum_optimizer_univar(self, x_start, tol):
        """Tests that momentum optimizer takes one and two steps correctly
        for univariate functions."""
        stepsize, gamma = 0.1, 0.5
        mom_opt = MomentumOptimizer(stepsize, momentum=gamma)

        univariate_funcs = [np.sin, lambda x: np.exp(x / 10.0), lambda x: x**2]
        grad_uni_fns = [
            lambda x: (np.cos(x), ),
            lambda x: (np.exp(x / 10.0) / 10.0, ),
            lambda x: (2 * x, ),
        ]

        for gradf, f in zip(grad_uni_fns, univariate_funcs):
            mom_opt.reset()

            x_onestep = mom_opt.step(f, x_start)
            x_onestep_target = x_start - gradf(x_start)[0] * stepsize
            assert np.allclose(x_onestep, x_onestep_target, atol=tol)

            x_twosteps = mom_opt.step(f, x_onestep)
            momentum_term = gamma * gradf(x_start)[0]
            x_twosteps_target = x_onestep - (gradf(x_onestep)[0] +
                                             momentum_term) * stepsize
            assert np.allclose(x_twosteps, x_twosteps_target, atol=tol)

    def test_momentum_optimizer_multivar(self, tol):
        """Tests that momentum optimizer takes one and two steps correctly
        for multivariate functions."""
        stepsize, gamma = 0.1, 0.5
        mom_opt = MomentumOptimizer(stepsize, momentum=gamma)

        multivariate_funcs = [
            lambda x: np.sin(x[0]) + np.cos(x[1]),
            lambda x: np.exp(x[0] / 3) * np.tanh(x[1]),
            lambda x: np.sum([x_**2 for x_ in x]),
        ]
        grad_multi_funcs = [
            lambda x: (np.array([np.cos(x[0]), -np.sin(x[1])]), ),
            lambda x: (np.array([
                np.exp(x[0] / 3) / 3 * np.tanh(x[1]),
                np.exp(x[0] / 3) * (1 - np.tanh(x[1])**2),
            ]), ),
            lambda x: (np.array([2 * x_ for x_ in x]), ),
        ]

        x_vals = np.linspace(-10, 10, 16, endpoint=False)

        for gradf, f in zip(grad_multi_funcs, multivariate_funcs):
            for jdx in range(len(x_vals[:-1])):
                mom_opt.reset()

                x_vec = x_vals[jdx:jdx + 2]
                x_onestep = mom_opt.step(f, x_vec)
                x_onestep_target = x_vec - gradf(x_vec)[0] * stepsize
                assert np.allclose(x_onestep, x_onestep_target, atol=tol)

                x_twosteps = mom_opt.step(f, x_onestep)
                momentum_term = gamma * gradf(x_vec)[0]
                x_twosteps_target = x_onestep - (gradf(x_onestep)[0] +
                                                 momentum_term) * stepsize
                assert np.allclose(x_twosteps, x_twosteps_target, atol=tol)
예제 #23
0
class TestRMSPropOptimizer:
    """Test the RMSProp (root mean square propagation) optimizer"""
    @pytest.mark.parametrize(
        "grad,args",
        [
            ([40, -4, 12, -17, 400], [0, 30, 6, -7, 800]),
            ([0.00033, 0.45e-5, 0.0], [1.3, -0.5, 8e3]),
            ([43], [0.8]),
        ],
    )
    def test_apply_grad(self, grad, args, tol):
        """
        Test that the gradient can be applied correctly to a set of parameters
        and that accumulation works correctly.
        """
        stepsize, gamma, eps = 0.1, 0.5, 1e-8
        sgd_opt = RMSPropOptimizer(stepsize, decay=gamma, eps=eps)
        grad, args = np.array(grad), np.array(args, requires_grad=True)

        a1 = (1 - gamma) * grad**2
        expected = args - stepsize / np.sqrt(a1 + eps) * grad
        res = sgd_opt.apply_grad(grad, args)
        assert np.allclose(res, expected, atol=tol)

        # Simulate a new step
        grad = grad + args
        args = expected

        a2 = gamma * a1 + (1 - gamma) * grad**2
        expected = args - stepsize / np.sqrt(a2 + eps) * grad
        res = sgd_opt.apply_grad(grad, args)
        assert np.allclose(res, expected, atol=tol)

    @pytest.mark.parametrize("x_start", np.linspace(-10,
                                                    10,
                                                    16,
                                                    endpoint=False))
    def test_rmsprop_optimizer_univar(self, x_start, tol):
        """Tests that rmsprop optimizer takes one and two steps correctly
        for univariate functions."""
        stepsize, gamma = 0.1, 0.5
        rms_opt = RMSPropOptimizer(stepsize, decay=gamma)

        univariate_funcs = [np.sin, lambda x: np.exp(x / 10.0), lambda x: x**2]
        grad_uni_fns = [
            lambda x: (np.cos(x), ),
            lambda x: (np.exp(x / 10.0) / 10.0, ),
            lambda x: (2 * x, ),
        ]

        for gradf, f in zip(grad_uni_fns, univariate_funcs):
            rms_opt.reset()

            x_onestep = rms_opt.step(f, x_start)
            past_grads = (1 - gamma) * gradf(x_start)[0] * gradf(x_start)[0]
            adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
            x_onestep_target = x_start - gradf(x_start)[0] * adapt_stepsize
            assert np.allclose(x_onestep, x_onestep_target, atol=tol)

            x_twosteps = rms_opt.step(f, x_onestep)
            past_grads = (
                1 - gamma) * gamma * gradf(x_start)[0] * gradf(x_start)[0] + (
                    1 - gamma) * gradf(x_onestep)[0] * gradf(x_onestep)[0]
            adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
            x_twosteps_target = x_onestep - gradf(
                x_onestep)[0] * adapt_stepsize
            assert np.allclose(x_twosteps, x_twosteps_target, atol=tol)

    def test_rmsprop_optimizer_multivar(self, tol):
        """Tests that rmsprop optimizer takes one and two steps correctly
        for multivariate functions."""
        stepsize, gamma = 0.1, 0.5
        rms_opt = RMSPropOptimizer(stepsize, decay=gamma)

        multivariate_funcs = [
            lambda x: np.sin(x[0]) + np.cos(x[1]),
            lambda x: np.exp(x[0] / 3) * np.tanh(x[1]),
            lambda x: np.sum([x_**2 for x_ in x]),
        ]
        grad_multi_funcs = [
            lambda x: (np.array([np.cos(x[0]), -np.sin(x[1])]), ),
            lambda x: (np.array([
                np.exp(x[0] / 3) / 3 * np.tanh(x[1]),
                np.exp(x[0] / 3) * (1 - np.tanh(x[1])**2),
            ]), ),
            lambda x: (np.array([2 * x_ for x_ in x]), ),
        ]

        x_vals = np.linspace(-10, 10, 16, endpoint=False)

        for gradf, f in zip(grad_multi_funcs, multivariate_funcs):
            for jdx in range(len(x_vals[:-1])):
                rms_opt.reset()

                x_vec = x_vals[jdx:jdx + 2]
                x_onestep = rms_opt.step(f, x_vec)
                past_grads = (1 - gamma) * gradf(x_vec)[0] * gradf(x_vec)[0]
                adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
                x_onestep_target = x_vec - gradf(x_vec)[0] * adapt_stepsize
                assert np.allclose(x_onestep, x_onestep_target, atol=tol)

                x_twosteps = rms_opt.step(f, x_onestep)
                past_grads = (
                    1 - gamma) * gamma * gradf(x_vec)[0] * gradf(x_vec)[0] + (
                        1 - gamma) * gradf(x_onestep)[0] * gradf(x_onestep)[0]
                adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
                x_twosteps_target = x_onestep - gradf(
                    x_onestep)[0] * adapt_stepsize
                assert np.allclose(x_twosteps, x_twosteps_target, atol=tol)
예제 #24
0
#    Iter:   491 | Cost: 0.0119758
#    Iter:   492 | Cost: 0.0119697
#    Iter:   493 | Cost: 0.0119637
#    Iter:   494 | Cost: 0.0119578
#    Iter:   495 | Cost: 0.0119520
#    Iter:   496 | Cost: 0.0119462
#    Iter:   497 | Cost: 0.0119405
#    Iter:   498 | Cost: 0.0119349
#    Iter:   499 | Cost: 0.0119293
#    Iter:   500 | Cost: 0.0119238
#
#
# Finally, we collect the predictions of the trained model for 50 values
# in the range :math:`[-1,1]`:

x_pred = np.linspace(-1, 1, 50)
predictions = [quantum_neural_net(var, x=x_) for x_ in x_pred]

##############################################################################
# and plot the shape of the function that the model has “learned” from
# the noisy data (green dots).

plt.figure()
plt.scatter(X, Y)
plt.scatter(x_pred, predictions, color="green")
plt.xlabel("x")
plt.ylabel("f(x)")
plt.tick_params(axis="both", which="major")
plt.tick_params(axis="both", which="minor")
plt.show()
예제 #25
0
class TestQPUBasic(BaseTest):
    """Unit tests for the QPU (as a QVM)."""

    # pylint: disable=protected-access

    def test_warnings_raised_parametric_compilation_and_operator_estimation(
            self):
        """Test that a warning is raised if parameter compilation and operator estimation are both turned on."""
        device = np.random.choice(TEST_QPU_LATTICES)
        with pytest.warns(Warning,
                          match="Operator estimation is being turned off."):
            dev = qml.device(
                "forest.qpu",
                device=device,
                shots=1000,
                load_qc=False,
                parametric_compilation=True,
            )

    def test_no_readout_correction(self):
        """Test the QPU plugin with no readout correction"""
        device = np.random.choice(TEST_QPU_LATTICES)
        dev_qpu = qml.device(
            "forest.qpu",
            device=device,
            load_qc=False,
            readout_error=[0.9, 0.75],
            symmetrize_readout=None,
            calibrate_readout=None,
            parametric_compilation=False,
        )
        qubit = 0  # just run program on the first qubit

        @qml.qnode(dev_qpu)
        def circuit_Xpl():
            qml.RY(np.pi / 2, wires=qubit)
            return qml.expval(qml.PauliX(qubit))

        @qml.qnode(dev_qpu)
        def circuit_Xmi():
            qml.RY(-np.pi / 2, wires=qubit)
            return qml.expval(qml.PauliX(qubit))

        @qml.qnode(dev_qpu)
        def circuit_Ypl():
            qml.RX(-np.pi / 2, wires=qubit)
            return qml.expval(qml.PauliY(qubit))

        @qml.qnode(dev_qpu)
        def circuit_Ymi():
            qml.RX(np.pi / 2, wires=qubit)
            return qml.expval(qml.PauliY(qubit))

        @qml.qnode(dev_qpu)
        def circuit_Zpl():
            qml.RX(0.0, wires=qubit)
            return qml.expval(qml.PauliZ(qubit))

        @qml.qnode(dev_qpu)
        def circuit_Zmi():
            qml.RX(np.pi, wires=qubit)
            return qml.expval(qml.PauliZ(qubit))

        num_expts = 10
        results_unavged = np.zeros((num_expts, 6))

        for i in range(num_expts):
            results_unavged[i, :] = [
                circuit_Xpl(),
                circuit_Ypl(),
                circuit_Zpl(),
                circuit_Xmi(),
                circuit_Ymi(),
                circuit_Zmi(),
            ]

        results = np.mean(results_unavged, axis=0)

        assert np.allclose(results[:3], 0.8, atol=2e-2)
        assert np.allclose(results[3:], -0.5, atol=2e-2)

    def test_readout_correction(self):
        """Test the QPU plugin with readout correction"""
        device = np.random.choice(TEST_QPU_LATTICES)
        dev_qpu = qml.device(
            "forest.qpu",
            device=device,
            load_qc=False,
            readout_error=[0.9, 0.75],
            symmetrize_readout="exhaustive",
            calibrate_readout="plus-eig",
            timeout=100,
        )
        qubit = 0  # just run program on the first qubit

        @qml.qnode(dev_qpu)
        def circuit_Xpl():
            qml.RY(np.pi / 2, wires=qubit)
            return qml.expval(qml.PauliX(qubit))

        @qml.qnode(dev_qpu)
        def circuit_Xmi():
            qml.RY(-np.pi / 2, wires=qubit)
            return qml.expval(qml.PauliX(qubit))

        @qml.qnode(dev_qpu)
        def circuit_Ypl():
            qml.RX(-np.pi / 2, wires=qubit)
            return qml.expval(qml.PauliY(qubit))

        @qml.qnode(dev_qpu)
        def circuit_Ymi():
            qml.RX(np.pi / 2, wires=qubit)
            return qml.expval(qml.PauliY(qubit))

        @qml.qnode(dev_qpu)
        def circuit_Zpl():
            qml.RX(0.0, wires=qubit)
            return qml.expval(qml.PauliZ(qubit))

        @qml.qnode(dev_qpu)
        def circuit_Zmi():
            qml.RX(np.pi, wires=qubit)
            return qml.expval(qml.PauliZ(qubit))

        num_expts = 10
        results_unavged = np.zeros((num_expts, 6))

        for i in range(num_expts):
            results_unavged[i, :] = [
                circuit_Xpl(),
                circuit_Ypl(),
                circuit_Zpl(),
                circuit_Xmi(),
                circuit_Ymi(),
                circuit_Zmi(),
            ]

        results = np.mean(results_unavged, axis=0)

        assert np.allclose(results[:3], 1.0, atol=2e-2)
        assert np.allclose(results[3:], -1.0, atol=2e-2)

    @flaky(max_runs=5, min_passes=3)
    def test_multi_qub_no_readout_errors(self):
        """Test the QPU plugin with no readout errors or correction"""
        device = np.random.choice(TEST_QPU_LATTICES)
        dev_qpu = qml.device(
            "forest.qpu",
            device=device,
            load_qc=False,
            symmetrize_readout=None,
            calibrate_readout=None,
        )

        @qml.qnode(dev_qpu)
        def circuit():
            qml.RY(np.pi / 2, wires=0)
            qml.RY(np.pi / 3, wires=1)
            return qml.expval(qml.PauliX(0) @ qml.PauliZ(1))

        num_expts = 50
        result = 0.0
        for _ in range(num_expts):
            result += circuit()
        result /= num_expts

        assert np.isclose(result, 0.5, atol=2e-2)

    @flaky(max_runs=5, min_passes=3)
    def test_multi_qub_readout_errors(self):
        """Test the QPU plugin with readout errors"""
        device = np.random.choice(TEST_QPU_LATTICES)
        dev_qpu = qml.device("forest.qpu",
                             device=device,
                             load_qc=False,
                             shots=10_000,
                             readout_error=[0.9, 0.75],
                             symmetrize_readout=None,
                             calibrate_readout=None,
                             parametric_compilation=False)

        @qml.qnode(dev_qpu)
        def circuit():
            qml.RY(np.pi / 2, wires=0)
            qml.RY(np.pi / 3, wires=1)
            return qml.expval(qml.PauliX(0) @ qml.PauliZ(1))

        result = circuit()

        assert np.isclose(result, 0.38, atol=2e-2)

    @flaky(max_runs=5, min_passes=3)
    def test_multi_qub_readout_correction(self):
        """Test the QPU plugin with readout errors and correction"""
        device = np.random.choice(TEST_QPU_LATTICES)
        dev_qpu = qml.device("forest.qpu",
                             device=device,
                             load_qc=False,
                             shots=10_000,
                             readout_error=[0.9, 0.75],
                             symmetrize_readout='exhaustive',
                             calibrate_readout='plus-eig',
                             parametric_compilation=False)

        @qml.qnode(dev_qpu)
        def circuit():
            qml.RY(np.pi / 2, wires=0)
            qml.RY(np.pi / 3, wires=1)
            return qml.expval(qml.PauliX(0) @ qml.PauliZ(1))

        result = circuit()

        assert np.isclose(result, 0.5, atol=3e-2)

    @flaky(max_runs=5, min_passes=3)
    def test_2q_gate(self):
        """Test that the two qubit gate with the PauliZ observable works correctly.

        As the results coming from the qvm are stochastic, a constraint of 3 out of 5 runs was added.
        """

        device = np.random.choice(TEST_QPU_LATTICES)
        dev_qpu = qml.device(
            "forest.qpu",
            device=device,
            load_qc=False,
            readout_error=[0.9, 0.75],
            symmetrize_readout="exhaustive",
            calibrate_readout="plus-eig",
            shots=QVM_SHOTS,
        )

        @qml.qnode(dev_qpu)
        def circuit():
            qml.RY(np.pi / 2, wires=[0])
            qml.CNOT(wires=[0, 1])
            return qml.expval(qml.PauliZ(0))

        assert np.allclose(circuit(), 0.0, atol=2e-2)

    @flaky(max_runs=5, min_passes=3)
    def test_2q_gate_pauliz_identity_tensor(self):
        """Test that the PauliZ tensor Identity observable works correctly.

        As the results coming from the qvm are stochastic, a constraint of 3 out of 5 runs was added.
        """
        device = np.random.choice(TEST_QPU_LATTICES)
        dev_qpu = qml.device(
            "forest.qpu",
            device=device,
            load_qc=False,
            readout_error=[0.9, 0.75],
            symmetrize_readout="exhaustive",
            calibrate_readout="plus-eig",
            shots=QVM_SHOTS,
        )

        @qml.qnode(dev_qpu)
        def circuit():
            qml.RY(np.pi / 2, wires=[0])
            qml.CNOT(wires=[0, 1])
            return qml.expval(qml.PauliZ(0) @ qml.Identity(1))

        assert np.allclose(circuit(), 0.0, atol=2e-2)

    @flaky(max_runs=5, min_passes=3)
    @pytest.mark.parametrize("a", np.linspace(-0.5, 2, 6))
    def test_2q_gate_pauliz_pauliz_tensor(self, a):
        """Test that the PauliZ tensor PauliZ observable works correctly.

        As the results coming from the qvm are stochastic, a constraint of 3 out of 5 runs was added.
        """
        device = np.random.choice(TEST_QPU_LATTICES)
        dev_qpu = qml.device(
            "forest.qpu",
            device=device,
            load_qc=False,
            readout_error=[0.9, 0.75],
            symmetrize_readout="exhaustive",
            calibrate_readout="plus-eig",
            shots=QVM_SHOTS,
        )

        @qml.qnode(dev_qpu)
        def circuit(x):
            qml.RY(x, wires=[0])
            qml.Hadamard(wires=1)
            qml.CNOT(wires=[0, 1])
            return qml.expval(qml.PauliZ(0) @ qml.Identity(1))

        assert np.allclose(circuit(a), np.cos(a), atol=2e-2)
        # Check that repeated calling of the QNode works correctly
        assert np.allclose(circuit(a), np.cos(a), atol=2e-2)

    @flaky(max_runs=5, min_passes=3)
    @pytest.mark.parametrize("a", np.linspace(-np.pi / 2, 0, 3))
    @pytest.mark.parametrize("b", np.linspace(0, np.pi / 2, 3))
    def test_2q_circuit_pauliz_pauliz_tensor(self, a, b):
        """Test that the PauliZ tensor PauliZ observable works correctly, when parametric compilation
        is turned off.

        As the results coming from the qvm are stochastic, a constraint of 3 out of 5 runs was added.
        """

        device = np.random.choice(TEST_QPU_LATTICES)
        dev_qpu = qml.device(
            "forest.qpu",
            device=device,
            load_qc=False,
            readout_error=[0.9, 0.75],
            symmetrize_readout="exhaustive",
            calibrate_readout="plus-eig",
            shots=QVM_SHOTS,
        )

        @qml.qnode(dev_qpu)
        def circuit(x, y):
            qml.RY(x, wires=[0])
            qml.RY(y, wires=[1])
            qml.CNOT(wires=[0, 1])
            return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))

        analytic_value = (np.cos(a / 2)**2 * np.cos(b / 2)**2 +
                          np.cos(b / 2)**2 * np.sin(a / 2)**2 -
                          np.cos(a / 2)**2 * np.sin(b / 2)**2 -
                          np.sin(a / 2)**2 * np.sin(b / 2)**2)

        assert np.allclose(circuit(a, b), analytic_value, atol=2e-2)
        # Check that repeated calling of the QNode works correctly
        assert np.allclose(circuit(a, b), analytic_value, atol=2e-2)

    @pytest.mark.parametrize("a", np.linspace(-np.pi / 2, 0, 3))
    @pytest.mark.parametrize("b", np.linspace(0, np.pi / 2, 3))
    def test_2q_gate_pauliz_pauliz_tensor_parametric_compilation_off(
            self, a, b):
        """Test that the PauliZ tensor PauliZ observable works correctly, when parametric compilation
        is turned off.

        As the results coming from the qvm are stochastic, a constraint of 3 out of 5 runs was added.
        """

        device = np.random.choice(TEST_QPU_LATTICES)
        dev_qpu = qml.device(
            "forest.qpu",
            device=device,
            load_qc=False,
            readout_error=[0.9, 0.75],
            symmetrize_readout="exhaustive",
            calibrate_readout="plus-eig",
            shots=QVM_SHOTS // 20,
            parametric_compilation=False,
        )

        @qml.qnode(dev_qpu)
        def circuit(x, y):
            qml.RY(x, wires=[0])
            qml.RY(y, wires=[1])
            qml.CNOT(wires=[0, 1])
            return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))

        analytic_value = (np.cos(a / 2)**2 * np.cos(b / 2)**2 +
                          np.cos(b / 2)**2 * np.sin(a / 2)**2 -
                          np.cos(a / 2)**2 * np.sin(b / 2)**2 -
                          np.sin(a / 2)**2 * np.sin(b / 2)**2)

        expt = np.mean([circuit(a, b) for _ in range(20)])
        theory = analytic_value

        assert np.allclose(expt, theory, atol=2e-2)

    def test_timeout_set_correctly(self, shots):
        """Test that the timeout attrbiute for the QuantumComputer stored by the QVMDevice
        is set correctly when passing a value as keyword argument"""
        device = np.random.choice(TEST_QPU_LATTICES)
        dev = plf.QVMDevice(device=device, shots=shots, timeout=100)
        assert dev.qc.compiler.client.timeout == 100

    def test_timeout_default(self, shots):
        """Test that the timeout attrbiute for the QuantumComputer stored by the QVMDevice
        is set to default when no specific value is being passed."""
        device = np.random.choice(TEST_QPU_LATTICES)
        dev = plf.QVMDevice(device=device, shots=shots)
        qc = pyquil.get_qc(device, as_qvm=True)

        # Check that the timeouts are equal (it has not been changed as a side effect of
        # instantiation
        assert dev.qc.compiler.client.timeout == qc.compiler.client.timeout
예제 #26
0
# limitations under the License.
"""
Unit tests for the :mod:`pennylane.plugin.DefaultGaussian` device.
"""
# pylint: disable=protected-access,cell-var-from-loop

from scipy.linalg import block_diag
import pytest

import pennylane
from pennylane import numpy as np
import numpy.testing as np_testing
from pennylane.ops import cv
from pennylane.wires import Wires

s_vals = np.linspace(-3, 3, 13)
phis = np.linspace(-2 * np.pi, 2 * np.pi, 11)
mags = np.linspace(0.0, 1.0, 7)


class TestCV:
    """Tests the continuous variable based operations."""

    @pytest.mark.parametrize("phi", phis)
    def test_rotation_heisenberg(self, phi):
        """ops: Tests the Heisenberg representation of the Rotation gate."""
        matrix = cv.Rotation._heisenberg_rep([phi])
        true_matrix = np.array(
            [[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]]
        )
        assert np.allclose(matrix, true_matrix)
예제 #27
0
 def retrieve_landscape(self):
     x = np.linspace(-self.scale, self.scale, self.grid_size[0])
     y = np.linspace(-self.scale, self.scale, self.grid_size[1])
     x, y = np.meshgrid(x, y)
     landscape = self.cache.get_values()
     return x, y, landscape
예제 #28
0
U_cswap = np.array([[1, 0, 0, 0, 0, 0, 0, 0],
                    [0, 1, 0, 0, 0, 0, 0, 0],
                    [0, 0, 1, 0, 0, 0, 0, 0],
                    [0, 0, 0, 1, 0, 0, 0, 0],
                    [0, 0, 0, 0, 1, 0, 0, 0],
                    [0, 0, 0, 0, 0, 0, 1, 0],
                    [0, 0, 0, 0, 0, 1, 0, 0],
                    [0, 0, 0, 0, 0, 0, 0, 1]])


H = np.array(
    [[1.02789352, 1.61296440 - 0.3498192j], [1.61296440 + 0.3498192j, 1.23920938 + 0j]]
)


THETA = np.linspace(0.11, 1, 3)
PHI = np.linspace(0.32, 1, 3)
VARPHI = np.linspace(0.02, 1, 3)


def prep_par(par, op):
    "Convert par into a list of parameters that op expects."
    if op.par_domain == "A":
        return [np.diag([x, 1]) for x in par]
    return par


class TestTensornetIntegration:
    """Integration tests for expt.tensornet. This test ensures it integrates
    properly with the PennyLane interface, in particular QNode."""
예제 #29
0
class TestGradients:
    """Jacobian integration tests for qubit expectations."""
    @pytest.mark.parametrize("theta", np.linspace(-2 * np.pi, 2 * np.pi, 7))
    @pytest.mark.parametrize("G", [qml.RX, qml.RY, qml.RZ])
    def test_pauli_rotation_gradient(self, G, theta, tol):
        """Tests that the automatic gradients of Pauli rotations are correct."""
        dev = qml.device("default.qubit", wires=1)

        with ReversibleTape() as tape:
            qml.QubitStateVector(np.array([1.0, -1.0]) / np.sqrt(2), wires=0)
            G(theta, wires=[0])
            qml.expval(qml.PauliZ(0))

        tape.trainable_params = {1}

        autograd_val = tape.jacobian(dev, method="analytic")

        # compare to finite differences
        numeric_val = tape.jacobian(dev, method="numeric")
        assert np.allclose(autograd_val, numeric_val, atol=tol, rtol=0)

    @pytest.mark.parametrize("theta", np.linspace(-2 * np.pi, 2 * np.pi, 7))
    def test_Rot_gradient(self, theta, tol):
        """Tests that the automatic gradient of a arbitrary Euler-angle-parameterized gate is correct."""
        dev = qml.device("default.qubit", wires=1)
        params = np.array([theta, theta**3, np.sqrt(2) * theta])

        with ReversibleTape() as tape:
            qml.QubitStateVector(np.array([1.0, -1.0]) / np.sqrt(2), wires=0)
            qml.Rot(*params, wires=[0])
            qml.expval(qml.PauliZ(0))

        tape.trainable_params = {1, 2, 3}

        autograd_val = tape.jacobian(dev, method="analytic")

        # compare to finite differences
        numeric_val = tape.jacobian(dev, method="numeric")
        assert np.allclose(autograd_val, numeric_val, atol=tol, rtol=0)

    @pytest.mark.parametrize("par", [1, -2, 1.623, -0.051, 0]
                             )  # intergers, floats, zero
    def test_ry_gradient(self, par, mocker, tol):
        """Test that the gradient of the RY gate matches the exact analytic
        formula. Further, make sure the correct gradient methods
        are being called."""

        with ReversibleTape() as tape:
            qml.RY(par, wires=[0])
            qml.expval(qml.PauliX(0))

        tape.trainable_params = {0}

        dev = qml.device("default.qubit", wires=1)

        spy_numeric = mocker.spy(tape, "numeric_pd")
        spy_analytic = mocker.spy(tape, "analytic_pd")

        # gradients
        exact = np.cos(par)
        grad_F = tape.jacobian(dev, method="numeric")

        spy_numeric.assert_called()
        spy_analytic.assert_not_called()

        spy_device = mocker.spy(tape, "execute_device")
        grad_A = tape.jacobian(dev, method="analytic")

        spy_analytic.assert_called()
        spy_device.assert_called_once(
        )  # check that the state was only pre-computed once

        # different methods must agree
        assert np.allclose(grad_F, exact, atol=tol, rtol=0)
        assert np.allclose(grad_A, exact, atol=tol, rtol=0)

    def test_rx_gradient(self, tol):
        """Test that the gradient of the RX gate matches the known formula."""
        dev = qml.device("default.qubit", wires=2)
        a = 0.7418

        with ReversibleTape() as tape:
            qml.RX(a, wires=0)
            qml.expval(qml.PauliZ(0))

        circuit_output = tape.execute(dev)
        expected_output = np.cos(a)
        assert np.allclose(circuit_output, expected_output, atol=tol, rtol=0)

        # circuit jacobians
        circuit_jacobian = tape.jacobian(dev, method="analytic")
        expected_jacobian = -np.sin(a)
        assert np.allclose(circuit_jacobian,
                           expected_jacobian,
                           atol=tol,
                           rtol=0)

    def test_multiple_rx_gradient(self, tol):
        """Tests that the gradient of multiple RX gates in a circuit
        yeilds the correct result."""
        dev = qml.device("default.qubit", wires=3)
        params = np.array([np.pi, np.pi / 2, np.pi / 3])

        with ReversibleTape() as tape:
            qml.RX(params[0], wires=0)
            qml.RX(params[1], wires=1)
            qml.RX(params[2], wires=2)

            for idx in range(3):
                qml.expval(qml.PauliZ(idx))

        circuit_output = tape.execute(dev)
        expected_output = np.cos(params)
        assert np.allclose(circuit_output, expected_output, atol=tol, rtol=0)

        # circuit jacobians
        circuit_jacobian = tape.jacobian(dev, method="analytic")
        expected_jacobian = -np.diag(np.sin(params))
        assert np.allclose(circuit_jacobian,
                           expected_jacobian,
                           atol=tol,
                           rtol=0)

    qubit_ops = [getattr(qml, name) for name in qml.ops._qubit__ops__]
    analytic_qubit_ops = {cls for cls in qubit_ops if cls.grad_method == "A"}
    analytic_qubit_ops = analytic_qubit_ops - {
        qml.CRX,
        qml.CRY,
        qml.CRZ,
        qml.CRot,
        qml.PhaseShift,
        qml.PauliRot,
        qml.MultiRZ,
        qml.U1,
        qml.U2,
        qml.U3,
    }

    @pytest.mark.parametrize("obs", [qml.PauliX, qml.PauliY])
    @pytest.mark.parametrize("op", analytic_qubit_ops)
    def test_gradients(self, op, obs, mocker, tol):
        """Tests that the gradients of circuits match between the
        finite difference and analytic methods."""
        args = np.linspace(0.2, 0.5, op.num_params)

        with ReversibleTape() as tape:
            qml.Hadamard(wires=0)
            qml.RX(0.543, wires=0)
            qml.CNOT(wires=[0, 1])

            op(*args, wires=range(op.num_wires))

            qml.Rot(1.3, -2.3, 0.5, wires=[0])
            qml.RZ(-0.5, wires=0)
            qml.RY(0.5, wires=1)
            qml.CNOT(wires=[0, 1])

            qml.expval(obs(wires=0))
            qml.expval(qml.PauliZ(wires=1))

        dev = qml.device("default.qubit", wires=2)
        res = tape.execute(dev)

        tape._update_gradient_info()
        tape.trainable_params = set(range(1, 1 + op.num_params))

        # check that every parameter is analytic
        for i in range(op.num_params):
            assert tape._par_info[1 + i]["grad_method"][0] == "A"

        grad_F = tape.jacobian(dev, method="numeric")

        spy = mocker.spy(ReversibleTape, "analytic_pd")
        spy_execute = mocker.spy(tape, "execute_device")
        grad_A = tape.jacobian(dev, method="analytic")
        spy.assert_called()

        # check that the execute device method has only been called
        # once, for all parameters.
        spy_execute.assert_called_once()

        assert np.allclose(grad_A, grad_F, atol=tol, rtol=0)

    def test_gradient_gate_with_multiple_parameters(self, tol):
        """Tests that gates with multiple free parameters yield correct gradients."""
        x, y, z = [0.5, 0.3, -0.7]

        with ReversibleTape() as tape:
            qml.RX(0.4, wires=[0])
            qml.Rot(x, y, z, wires=[0])
            qml.RY(-0.2, wires=[0])
            qml.expval(qml.PauliZ(0))

        tape.trainable_params = {1, 2, 3}

        dev = qml.device("default.qubit", wires=1)
        grad_A = tape.jacobian(dev, method="analytic")
        grad_F = tape.jacobian(dev, method="numeric")

        # gradient has the correct shape and every element is nonzero
        assert grad_A.shape == (1, 3)
        assert np.count_nonzero(grad_A) == 3
        # the different methods agree
        assert np.allclose(grad_A, grad_F, atol=tol, rtol=0)
예제 #30
0
# circuit is a simple displacement of the ground state and returns a sample of size shots
@qml.qnode(dev)
def variational_circuit(x=None):
    qml.Displacement(x, 0.0, wires=0)
    return qml.sample(qml.X(0))


output = variational_circuit(x=input)

# calculate expectation value  using the Berry-Essen theorem
ev1 = np.mean(output)
var = np.var(output)
ev = np.random.normal(ev1, np.sqrt(var / shots))

# plot
min_ = min(output)
max_ = max(output)
bins = np.linspace(min_, max_, 100)
fig = plt.figure()
plt.hist(output, bins)
plt.plot([ev, ev], [0, 350], 'r-')
plt.yticks()
plt.xlabel("$x$", size=22)
plt.ylabel("f", rotation='horizontal', labelpad=15, size=22)
plt.tick_params(axis="both", which="major", labelsize=22)
plt.tick_params(axis="both", which="minor", labelsize=22)
plt.legend(['expectation value', 'data'], prop={'size': 12}, loc='upper right')
fig.savefig('expectation_val_10000_shots.pdf'.format(shots),
            bbox_inches='tight')
plt.show()