def cost(a, cache):
            with qml.tape.JacobianTape() as tape:
                qml.RY(a[0], wires=0)
                qml.RX(a[1], wires=0)
                qml.RY(a[2], wires=0)
                qml.expval(qml.PauliZ(0))
                qml.expval(qml.PauliZ(1))

            return execute(
                [tape],
                dev,
                gradient_fn="device",
                cache=cache,
                mode="backward",
                gradient_kwargs={"method": "adjoint_jacobian"},
                interface="torch",
            )[0]
        def cost_fn(x):
            with qml.tape.JacobianTape() as tape1:
                qml.Hadamard(0)
                qml.RY(x[0], wires=[0])
                qml.CNOT(wires=[0, 1])
                qml.expval(qml.PauliZ(0))

            with qml.tape.JacobianTape() as tape2:
                qml.Hadamard(0)
                qml.CRX(2 * x[0] * x[1], wires=[0, 1])
                qml.RX(2 * x[1], wires=[1])
                qml.expval(qml.PauliZ(0))

            return execute(tapes=[tape1, tape2],
                           device=dev,
                           interface="jax",
                           **execute_kwargs)
        def cost(x, cache):
            with qml.tape.JacobianTape() as tape:
                qml.RX(x[0], wires=[0])
                qml.RY(x[1], wires=[1])

                for i in range(2, num_params):
                    qml.RZ(x[i], wires=[i % 2])

                qml.CNOT(wires=[0, 1])
                qml.var(qml.PauliZ(0) @ qml.PauliX(1))

            return execute([tape],
                           dev,
                           gradient_fn=param_shift,
                           cache=cache,
                           interface="torch",
                           max_diff=2)[0]
        def cost_fn(x):
            with qml.tape.JacobianTape() as tape:
                qml.RX(x[0], wires=[0])
                qml.RY(x[1], wires=[1])
                qml.CNOT(wires=[0, 1])
                qml.expval(qml.PauliZ(0))

            return execute(
                [tape],
                dev,
                gradient_fn="device",
                gradient_kwargs={
                    "method": "adjoint_jacobian",
                    "use_device_state": True
                },
                interface="torch",
            )[0]
    def test_incorrect_mode(self):
        """Test that an error is raised if a gradient transform
        is used with mode=forward"""
        a = tf.Variable([0.1, 0.2])

        dev = qml.device("default.qubit", wires=1)

        with tf.GradientTape() as t:
            with qml.tape.JacobianTape() as tape:
                qml.RY(a[0], wires=0)
                qml.RX(a[1], wires=0)
                qml.expval(qml.PauliZ(0))

        with pytest.raises(
            ValueError, match="Gradient transforms cannot be used with mode='forward'"
        ):
            res = execute([tape], dev, gradient_fn=param_shift, mode="forward", interface="tf")[0]
        def cost(a, b, cache):
            with qml.tape.JacobianTape() as tape1:
                qml.RY(a, wires=0)
                qml.RX(b, wires=0)
                qml.expval(qml.PauliZ(0))

            with qml.tape.JacobianTape() as tape2:
                qml.RY(a, wires=0)
                qml.RX(b, wires=0)
                qml.expval(qml.PauliZ(0))

            res = execute([tape1, tape2],
                          dev,
                          gradient_fn=param_shift,
                          cache=cache,
                          interface="jax")
            return res[0][0]
    def test_hessian_vector_valued(self, tol):
        """Test hessian calculation of a vector valued QNode"""
        dev = qml.device("default.qubit.tf", wires=1)
        params = tf.Variable([0.543, -0.654], dtype=tf.float64)

        with tf.GradientTape() as t2:
            with tf.GradientTape(persistent=True) as t1:
                with qml.tape.JacobianTape() as tape:
                    qml.RY(params[0], wires=0)
                    qml.RX(params[1], wires=0)
                    qml.probs(wires=0)

                res = execute([tape], dev, gradient_fn=param_shift, interface="tf", max_diff=2)
                res = tf.stack(res)

            g = t1.jacobian(res, params, experimental_use_pfor=False)

        hess = tf.squeeze(t2.jacobian(g, params))

        a, b = params * 1.0

        expected_res = [
            0.5 + 0.5 * tf.cos(a) * tf.cos(b),
            0.5 - 0.5 * tf.cos(a) * tf.cos(b),
        ]
        assert np.allclose(res, expected_res, atol=tol, rtol=0)

        expected_g = [
            [-0.5 * tf.sin(a) * tf.cos(b), -0.5 * tf.cos(a) * tf.sin(b)],
            [0.5 * tf.sin(a) * tf.cos(b), 0.5 * tf.cos(a) * tf.sin(b)],
        ]
        assert np.allclose(g, expected_g, atol=tol, rtol=0)

        expected_hess = [
            [
                [-0.5 * tf.cos(a) * tf.cos(b), 0.5 * tf.sin(a) * tf.sin(b)],
                [0.5 * tf.sin(a) * tf.sin(b), -0.5 * tf.cos(a) * tf.cos(b)],
            ],
            [
                [0.5 * tf.cos(a) * tf.cos(b), -0.5 * tf.sin(a) * tf.sin(b)],
                [-0.5 * tf.sin(a) * tf.sin(b), 0.5 * tf.cos(a) * tf.cos(b)],
            ],
        ]

        np.testing.assert_allclose(hess, expected_hess, atol=tol, rtol=0, verbose=True)
Exemple #8
0
    def test_parameter_shift_hessian(self, params, tol):
        """Tests that the output of the parameter-shift transform
        can be differentiated using tensorflow, yielding second derivatives."""
        dev = qml.device("default.qubit.tf", wires=2)
        params = tf.Variable([0.543, -0.654], dtype=tf.float64)
        x, y = params * 1.0

        with tf.GradientTape() as t2:
            with tf.GradientTape() as t1:
                with qml.tape.JacobianTape() as tape1:
                    qml.RX(params[0], wires=[0])
                    qml.RY(params[1], wires=[1])
                    qml.CNOT(wires=[0, 1])
                    qml.var(qml.PauliZ(0) @ qml.PauliX(1))

                with qml.tape.JacobianTape() as tape2:
                    qml.RX(params[0], wires=0)
                    qml.RY(params[0], wires=1)
                    qml.CNOT(wires=[0, 1])
                    qml.probs(wires=1)

                result = execute([tape1, tape2],
                                 dev,
                                 gradient_fn=param_shift,
                                 interface="tf",
                                 max_diff=2)
                res = result[0][0] + result[1][0, 0]

            expected = 0.5 * (3 + np.cos(x)**2 * np.cos(2 * y))
            assert np.allclose(res, expected, atol=tol, rtol=0)

            grad = t1.gradient(res, params)
            expected = np.array([
                -np.cos(x) * np.cos(2 * y) * np.sin(x),
                -np.cos(x)**2 * np.sin(2 * y)
            ])
            assert np.allclose(grad, expected, atol=tol, rtol=0)

        hess = t2.jacobian(grad, params)
        expected = np.array([
            [-np.cos(2 * x) * np.cos(2 * y),
             np.sin(2 * x) * np.sin(2 * y)],
            [np.sin(2 * x) * np.sin(2 * y), -2 * np.cos(x)**2 * np.cos(2 * y)],
        ])
        assert np.allclose(hess, expected, atol=tol, rtol=0)
Exemple #9
0
    def test_sampling(self, torch_device, execute_kwargs):
        """Test sampling works as expected"""
        if execute_kwargs["gradient_fn"] == "device" and execute_kwargs[
                "mode"] == "forward":
            pytest.skip("Adjoint differentiation does not support samples")

        dev = qml.device("default.qubit", wires=2, shots=10)

        with qml.tape.JacobianTape() as tape:
            qml.Hadamard(wires=[0])
            qml.CNOT(wires=[0, 1])
            qml.sample(qml.PauliZ(0))
            qml.sample(qml.PauliX(1))

        res = execute([tape], dev, **execute_kwargs)[0]

        assert res.shape == (2, 10)
        assert isinstance(res, torch.Tensor)
Exemple #10
0
    def test_unknown_gradient_fn_error(self):
        """Test that an error is raised if an unknown gradient function
        is passed"""
        a = tf.Variable([0.1, 0.2])

        dev = qml.device("default.qubit", wires=1)

        with tf.GradientTape() as t:
            with qml.tape.JacobianTape() as tape:
                qml.RY(a[0], wires=0)
                qml.RX(a[1], wires=0)
                qml.expval(qml.PauliZ(0))

            res = execute([tape], dev, gradient_fn=lambda x: x,
                          interface="tf")[0]

        with pytest.raises(ValueError, match="Unknown gradient function"):
            print(t.jacobian(res, a))
Exemple #11
0
        def cost_fn(x):
            with qml.tape.JacobianTape() as tape1:
                qml.RX(x[0], wires=[0])
                qml.RY(x[1], wires=[1])
                qml.CNOT(wires=[0, 1])
                qml.var(qml.PauliZ(0) @ qml.PauliX(1))

            with qml.tape.JacobianTape() as tape2:
                qml.RX(x[0], wires=0)
                qml.RY(x[0], wires=1)
                qml.CNOT(wires=[0, 1])
                qml.probs(wires=1)

            result = execute([tape1, tape2],
                             dev,
                             gradient_fn=param_shift,
                             max_diff=1)
            return result[0] + result[1][0, 0]
    def test_probability_differentiation(self, execute_kwargs, tol):
        """Tests correct output shape and evaluation for a tape
        with prob outputs"""

        if execute_kwargs["gradient_fn"] == "device":
            pytest.skip("Adjoint differentiation does not yet support probabilities")

        dev = qml.device("default.qubit", wires=2)
        x = tf.Variable(0.543, dtype=tf.float64)
        y = tf.Variable(-0.654, dtype=tf.float64)

        with tf.GradientTape() as t:
            with qml.tape.JacobianTape() as tape:
                qml.RX(x, wires=[0])
                qml.RY(y, wires=[1])
                qml.CNOT(wires=[0, 1])
                qml.probs(wires=[0])
                qml.probs(wires=[1])

            res = execute([tape], dev, **execute_kwargs)[0]

        expected = np.array(
            [
                [tf.cos(x / 2) ** 2, tf.sin(x / 2) ** 2],
                [(1 + tf.cos(x) * tf.cos(y)) / 2, (1 - tf.cos(x) * tf.cos(y)) / 2],
            ]
        )
        assert np.allclose(res, expected, atol=tol, rtol=0)

        res = t.jacobian(res, [x, y])
        expected = np.array(
            [
                [
                    [-tf.sin(x) / 2, tf.sin(x) / 2],
                    [-tf.sin(x) * tf.cos(y) / 2, tf.cos(y) * tf.sin(x) / 2],
                ],
                [
                    [0, 0],
                    [-tf.cos(x) * tf.sin(y) / 2, tf.cos(x) * tf.sin(y) / 2],
                ],
            ]
        )
        assert np.allclose(res, expected, atol=tol, rtol=0)
Exemple #13
0
        def _cost_fn(weights, coeffs1, coeffs2, dev=None):
            obs1 = [
                qml.PauliZ(0),
                qml.PauliZ(0) @ qml.PauliX(1),
                qml.PauliY(0)
            ]
            H1 = qml.Hamiltonian(coeffs1, obs1)

            obs2 = [qml.PauliZ(0)]
            H2 = qml.Hamiltonian(coeffs2, obs2)

            with qml.tape.JacobianTape() as tape:
                qml.RX(weights[0], wires=0)
                qml.RY(weights[1], wires=1)
                qml.CNOT(wires=[0, 1])
                qml.expval(H1)
                qml.expval(H2)

            return execute([tape], dev, **execute_kwargs)[0]
Exemple #14
0
    def test_jacobian(self, torch_device, execute_kwargs, tol):
        """Test jacobian calculation by checking against analytic values"""
        a_val = 0.1
        b_val = 0.2

        a = torch.tensor(a_val, requires_grad=True, device=torch_device)
        b = torch.tensor(b_val, requires_grad=True, device=torch_device)

        dev = qml.device("default.qubit", wires=2)

        with qml.tape.JacobianTape() as tape:
            qml.RZ(torch.tensor(0.543, device=torch_device), wires=0)
            qml.RY(a, wires=0)
            qml.RX(b, wires=1)
            qml.CNOT(wires=[0, 1])
            qml.expval(qml.PauliZ(0))
            qml.expval(qml.PauliY(1))

        res = execute([tape], dev, **execute_kwargs)[0]
        assert tape.trainable_params == [1, 2]

        assert isinstance(res, torch.Tensor)
        assert res.shape == (2, )

        expected = torch.tensor(
            [np.cos(a_val), -np.cos(a_val) * np.sin(b_val)],
            device=torch_device)
        assert torch.allclose(res.detach(), expected, atol=tol, rtol=0)

        loss = torch.sum(res)

        loss.backward()
        expected = torch.tensor(
            [
                -np.sin(a_val) + np.sin(a_val) * np.sin(b_val),
                -np.cos(a_val) * np.cos(b_val)
            ],
            dtype=a.dtype,
            device=torch_device,
        )
        assert torch.allclose(a.grad, expected[0], atol=tol, rtol=0)
        assert torch.allclose(b.grad, expected[1], atol=tol, rtol=0)
    def test_cache_maxsize(self, mocker):
        """Test the cachesize property of the cache"""
        dev = qml.device("default.qubit", wires=1)
        spy = mocker.spy(qml.interfaces.batch, "cache_execute")
        a = tf.Variable([0.1, 0.2])

        with tf.GradientTape() as t:
            with qml.tape.JacobianTape() as tape:
                qml.RY(a[0], wires=0)
                qml.RX(a[1], wires=0)
                qml.probs(wires=0)

            res = execute([tape], dev, gradient_fn=param_shift, cachesize=2, interface="tf")[0]

        t.jacobian(res, a)
        cache = spy.call_args[0][1]

        assert cache.maxsize == 2
        assert cache.currsize == 2
        assert len(cache) == 2
Exemple #16
0
    def test_sampling_gradient_error(self, torch_device, execute_kwargs):
        """Test differentiating a tape with sampling results in an error"""
        if execute_kwargs["gradient_fn"] == "device" and execute_kwargs["mode"] == "forward":
            pytest.skip("Adjoint differentiation does not support samples")

        dev = qml.device("default.qubit", wires=1, shots=10)

        x = torch.tensor(0.65, requires_grad=True)

        with qml.tape.JacobianTape() as tape:
            qml.RX(x, wires=[0])
            qml.sample(qml.PauliZ(0))

        res = execute([tape], dev, **execute_kwargs)[0]

        with pytest.raises(
            RuntimeError,
            match="element 0 of tensors does not require grad and does not have a grad_fn",
        ):
            res.backward()
Exemple #17
0
    def test_max_diff(self, tol):
        """Test that setting the max_diff parameter blocks higher-order
        derivatives"""
        dev = qml.device("default.qubit.tf", wires=2)
        params = tf.Variable([0.543, -0.654], dtype=tf.float64)
        x, y = params * 1.0

        with tf.GradientTape() as t2:
            with tf.GradientTape() as t1:
                with qml.tape.JacobianTape() as tape1:
                    qml.RX(params[0], wires=[0])
                    qml.RY(params[1], wires=[1])
                    qml.CNOT(wires=[0, 1])
                    qml.var(qml.PauliZ(0) @ qml.PauliX(1))

                with qml.tape.JacobianTape() as tape2:
                    qml.RX(params[0], wires=0)
                    qml.RY(params[0], wires=1)
                    qml.CNOT(wires=[0, 1])
                    qml.probs(wires=1)

                result = execute([tape1, tape2],
                                 dev,
                                 gradient_fn=param_shift,
                                 max_diff=1,
                                 interface="tf")
                res = result[0][0] + result[1][0, 0]

                expected = 0.5 * (3 + np.cos(x)**2 * np.cos(2 * y))
                assert np.allclose(res, expected, atol=tol, rtol=0)

            grad = t1.gradient(res, params)

            expected = np.array([
                -np.cos(x) * np.cos(2 * y) * np.sin(x),
                -np.cos(x)**2 * np.sin(2 * y)
            ])
            assert np.allclose(grad, expected, atol=tol, rtol=0)

        hess = t2.jacobian(grad, params)
        assert hess is None
Exemple #18
0
    def test_execution(self, torch_device, execute_kwargs):
        """Test that the execute function produces results with the expected shapes"""
        dev = qml.device("default.qubit", wires=1)
        a = torch.tensor(0.1, requires_grad=True, device=torch_device)
        b = torch.tensor(0.2, requires_grad=False, device=torch_device)

        with qml.tape.JacobianTape() as tape1:
            qml.RY(a, wires=0)
            qml.RX(b, wires=0)
            qml.expval(qml.PauliZ(0))

        with qml.tape.JacobianTape() as tape2:
            qml.RY(a, wires=0)
            qml.RX(b, wires=0)
            qml.expval(qml.PauliZ(0))

        res = execute([tape1, tape2], dev, **execute_kwargs)

        assert len(res) == 2
        assert res[0].shape == (1, )
        assert res[1].shape == (1, )
Exemple #19
0
    def test_matrix_parameter(self, execute_kwargs, U, tol):
        """Test that the TF interface works correctly
        with a matrix parameter"""
        a = tf.Variable(0.1, dtype=tf.float64)

        dev = qml.device("default.qubit", wires=2)

        with tf.GradientTape() as t:

            with qml.tape.JacobianTape() as tape:
                qml.QubitUnitary(U, wires=0)
                qml.RY(a, wires=0)
                qml.expval(qml.PauliZ(0))

            res = execute([tape], dev, **execute_kwargs)[0]
            assert tape.trainable_params == {1}

        assert np.allclose(res, -tf.cos(a), atol=tol, rtol=0)

        res = t.jacobian(res, a)
        assert np.allclose(res, tf.sin(a), atol=tol, rtol=0)
    def test_custom_cache(self, mocker):
        """Test the use of a custom cache object"""
        dev = qml.device("default.qubit", wires=1)
        spy = mocker.spy(qml.interfaces.batch, "cache_execute")
        a = tf.Variable([0.1, 0.2])
        custom_cache = {}

        with tf.GradientTape() as t:
            with qml.tape.JacobianTape() as tape:
                qml.RY(a[0], wires=0)
                qml.RX(a[1], wires=0)
                qml.probs(wires=0)

            res = execute([tape], dev, gradient_fn=param_shift, cache=custom_cache, interface="tf")[
                0
            ]

        t.jacobian(res, a)

        cache = spy.call_args[0][1]
        assert cache is custom_cache
Exemple #21
0
    def test_no_trainable_parameters(self, execute_kwargs, tol):
        """Test evaluation and Jacobian if there are no trainable parameters"""
        b = tf.constant(0.2, dtype=tf.float64)
        dev = qml.device("default.qubit", wires=2)

        with tf.GradientTape() as t:
            with qml.tape.JacobianTape() as tape:
                qml.RY(0.2, wires=0)
                qml.RX(b, wires=0)
                qml.CNOT(wires=[0, 1])
                qml.expval(qml.PauliZ(0))
                qml.expval(qml.PauliZ(1))

            res = execute([tape], dev, **execute_kwargs)[0]
            loss = tf.reduce_sum(res)

        assert res.shape == (2, )
        assert isinstance(res, tf.Tensor)

        res = t.jacobian(res, b)
        assert res is None
Exemple #22
0
    def test_no_trainable_parameters(self, torch_device, execute_kwargs, tol):
        """Test evaluation and Jacobian if there are no trainable parameters"""
        dev = qml.device("default.qubit", wires=2)

        with qml.tape.JacobianTape() as tape:
            qml.RY(0.2, wires=0)
            qml.RX(torch.tensor(0.1, device=torch_device), wires=0)
            qml.CNOT(wires=[0, 1])
            qml.expval(qml.PauliZ(0))
            qml.expval(qml.PauliZ(1))

        res = execute([tape], dev, **execute_kwargs)[0]
        assert tape.trainable_params == []

        assert res.shape == (2,)
        assert isinstance(res, torch.Tensor)

        with pytest.raises(
            RuntimeError,
            match="element 0 of tensors does not require grad and does not have a grad_fn",
        ):
            res.backward()
Exemple #23
0
    def test_execution(self, execute_kwargs):
        """Test execution"""
        dev = qml.device("default.qubit", wires=1)
        a = tf.Variable(0.1)
        b = tf.Variable(0.2)

        with tf.GradientTape() as t:
            with qml.tape.JacobianTape() as tape1:
                qml.RY(a, wires=0)
                qml.RX(b, wires=0)
                qml.expval(qml.PauliZ(0))

            with qml.tape.JacobianTape() as tape2:
                qml.RY(a, wires=0)
                qml.RX(b, wires=0)
                qml.expval(qml.PauliZ(0))

            res = execute([tape1, tape2], dev, **execute_kwargs)

        assert len(res) == 2
        assert res[0].shape == (1, )
        assert res[1].shape == (1, )
Exemple #24
0
    def test_forward_mode(self, mocker):
        """Test that forward mode uses the `device.execute_and_gradients` pathway"""
        dev = qml.device("default.qubit", wires=1)
        spy = mocker.spy(dev, "execute_and_gradients")

        a = torch.tensor([0.1, 0.2], requires_grad=True)

        with qml.tape.JacobianTape() as tape:
            qml.RY(a[0], wires=0)
            qml.RX(a[1], wires=0)
            qml.expval(qml.PauliZ(0))

        res = execute(
            [tape],
            dev,
            gradient_fn="device",
            gradient_kwargs={"method": "adjoint_jacobian"},
            interface="torch",
        )[0]

        # two device executions; one for the value, one for the Jacobian
        assert dev.num_executions == 2
        spy.assert_called()
    def test_forward_mode(self, mocker):
        """Test that forward mode uses the `device.execute_and_gradients` pathway"""
        dev = qml.device("default.qubit", wires=1)
        a = tf.Variable([0.1, 0.2])
        spy = mocker.spy(dev, "execute_and_gradients")

        with tf.GradientTape() as t:
            with qml.tape.JacobianTape() as tape:
                qml.RY(a[0], wires=0)
                qml.RX(a[1], wires=0)
                qml.expval(qml.PauliZ(0))

            res = execute(
                [tape],
                dev,
                gradient_fn="device",
                gradient_kwargs={"method": "adjoint_jacobian", "use_device_state": True},
                interface="tf",
            )[0]

        # adjoint method only performs a single device execution, but gets both result and gradient
        assert dev.num_executions == 1
        spy.assert_called()
Exemple #26
0
    def test_classical_processing(self, execute_kwargs, tol):
        """Test classical processing within the quantum tape"""
        a = tf.Variable(0.1, dtype=tf.float64)
        b = tf.constant(0.2, dtype=tf.float64)
        c = tf.Variable(0.3, dtype=tf.float64)

        dev = qml.device("default.qubit", wires=1)

        with tf.GradientTape() as t:
            with qml.tape.JacobianTape() as tape:
                qml.RY(a * c, wires=0)
                qml.RZ(b, wires=0)
                qml.RX(c + c**2 + tf.sin(a), wires=0)
                qml.expval(qml.PauliZ(0))

            res = execute([tape], dev, **execute_kwargs)[0]
            assert tape.trainable_params == {0, 2}
            assert tape.get_parameters() == [a * c, c + c**2 + tf.sin(a)]

        res = t.jacobian(res, [a, b, c])
        assert isinstance(res[0], tf.Tensor)
        assert res[1] is None
        assert isinstance(res[2], tf.Tensor)
Exemple #27
0
    def test_scalar_jacobian(self, torch_device, execute_kwargs, tol):
        """Test scalar jacobian calculation by comparing two types of pipelines"""
        a = torch.tensor(0.1, requires_grad=True, dtype=torch.float64, device=torch_device)
        dev = qml.device("default.qubit", wires=2)

        with qml.tape.JacobianTape() as tape:
            qml.RY(a, wires=0)
            qml.expval(qml.PauliZ(0))

        res = execute([tape], dev, **execute_kwargs)[0]
        res.backward()

        # compare to backprop gradient
        def cost(a):
            with qml.tape.QuantumTape() as tape:
                qml.RY(a, wires=0)
                qml.expval(qml.PauliZ(0))

            dev = qml.device("default.qubit.autograd", wires=2)
            return dev.batch_execute([tape])[0]

        expected = qml.grad(cost, argnum=0)(0.1)
        assert torch.allclose(a.grad, torch.tensor(expected, device=torch_device), atol=tol, rtol=0)
Exemple #28
0
    def test_tape_no_parameters(self, torch_device, execute_kwargs, tol):
        """Test that a tape with no parameters is correctly
        ignored during the gradient computation"""
        dev = qml.device("default.qubit", wires=1)
        params = torch.tensor([0.1, 0.2],
                              requires_grad=True,
                              device=torch_device)
        x, y = params.detach()

        with qml.tape.JacobianTape() as tape1:
            qml.Hadamard(0)
            qml.expval(qml.PauliX(0))

        with qml.tape.JacobianTape() as tape2:
            qml.RY(0.5, wires=0)
            qml.expval(qml.PauliZ(0))

        with qml.tape.JacobianTape() as tape3:
            qml.RY(params[0], wires=0)
            qml.RX(params[1], wires=0)
            qml.expval(qml.PauliZ(0))

        res = sum(execute([tape1, tape2, tape3], dev, **execute_kwargs))
        expected = torch.tensor(1 + np.cos(0.5) + torch.cos(x) * torch.cos(y),
                                dtype=res.dtype,
                                device=res.device)

        assert torch.allclose(res, expected, atol=tol, rtol=0)

        res.backward()
        grad = params.grad.detach()
        expected = torch.tensor(
            [-torch.cos(y) * torch.sin(x), -torch.cos(x) * torch.sin(y)],
            dtype=grad.dtype,
            device=grad.device,
        )
        assert torch.allclose(grad, expected, atol=tol, rtol=0)
    def test_jacobian(self, execute_kwargs, tol):
        """Test jacobian calculation"""
        a = tf.Variable(0.1, dtype=tf.float64)
        b = tf.Variable(0.2, dtype=tf.float64)
        dev = qml.device("default.qubit", wires=2)

        with tf.GradientTape() as t:
            with qml.tape.JacobianTape() as tape:
                qml.RY(a, wires=0)
                qml.RX(b, wires=1)
                qml.CNOT(wires=[0, 1])
                qml.expval(qml.PauliZ(0))
                qml.expval(qml.PauliY(1))
            res = execute([tape], dev, max_diff=2, **execute_kwargs)[0]

        expected = [np.cos(a), -np.cos(a) * np.sin(b)]
        assert np.allclose(res, expected, atol=tol, rtol=0)

        (agrad, bgrad) = t.jacobian(res, [a, b])
        assert agrad.shape == (2,)
        assert bgrad.shape == (2,)

        expected = [[-np.sin(a), np.sin(a) * np.sin(b)], [0, -np.cos(a) * np.cos(b)]]
        assert np.allclose(expected, [agrad, bgrad], atol=tol, rtol=0)
Exemple #30
0
    def test_classical_processing(self, torch_device, execute_kwargs, tol):
        """Test the classical processing of gate parameters within the quantum tape"""
        p_val = [0.1, 0.2]
        params = torch.tensor(p_val, requires_grad=True, device=torch_device)

        dev = qml.device("default.qubit", wires=1)

        with qml.tape.JacobianTape() as tape:
            qml.RY(params[0] * params[1], wires=0)
            qml.RZ(0.2, wires=0)
            qml.RX(params[1] + params[1]**2 + torch.sin(params[0]), wires=0)
            qml.expval(qml.PauliZ(0))

        res = execute([tape], dev, **execute_kwargs)[0]

        assert tape.trainable_params == [0, 2]

        tape_params = torch.tensor([i.detach() for i in tape.get_parameters()],
                                   device=torch_device)
        expected = torch.tensor(
            [p_val[0] * p_val[1], p_val[1] + p_val[1]**2 + np.sin(p_val[0])],
            dtype=tape_params.dtype,
            device=torch_device,
        )

        assert torch.allclose(
            tape_params,
            expected,
            atol=tol,
            rtol=0,
        )

        res.backward()

        assert isinstance(params.grad, torch.Tensor)
        assert params.shape == (2, )