Beispiel #1
0
    def test_import_error(self, mocker):
        """Test that an exception is caught on import error"""

        mock = mocker.patch.object(autograd.extend, "defvjp")
        mock.side_effect = ImportError()

        try:
            del sys.modules["pennylane.interfaces.batch.autograd"]
        except:
            pass

        dev = qml.device("default.qubit", wires=2, shots=None)

        with qml.tape.JacobianTape() as tape:
            qml.expval(qml.PauliY(1))

        with pytest.raises(
                qml.QuantumFunctionError,
                match="Autograd not found. Please install the latest version "
                "of Autograd to enable the 'autograd' interface",
        ):
            qml.execute([tape],
                        dev,
                        gradient_fn=param_shift,
                        interface="autograd")
    def test_no_batch_transform(self, mocker):
        """Test that batch transforms can be disabled and enabled"""
        dev = qml.device("default.qubit", wires=2, shots=100000)

        H = qml.PauliZ(0) @ qml.PauliZ(1) - qml.PauliX(0)
        x = 0.6
        y = 0.2

        with qml.tape.JacobianTape() as tape:
            qml.RX(x, wires=0)
            qml.RY(y, wires=1)
            qml.CNOT(wires=[0, 1])
            qml.expval(H)

        spy = mocker.spy(dev, "batch_transform")

        with pytest.raises(AssertionError,
                           match="Hamiltonian must be used with shots=None"):
            qml.execute([tape], dev, None, device_batch_transform=False)

        spy.assert_not_called()

        res = qml.execute([tape], dev, None, device_batch_transform=True)
        spy.assert_called()
        assert np.allclose(res[0], np.cos(y), atol=0.1)
Beispiel #3
0
    def test_Rot_gradient(self, theta, dev):
        """Tests that the device gradient of an arbitrary Euler-angle-parameterized gate is
        correct."""

        params = np.array([theta, theta**3, np.sqrt(2) * theta])

        with qml.tape.QuantumTape() as tape:
            qml.QubitStateVector(np.array([1.0, -1.0]) / np.sqrt(2), wires=0)
            qml.Rot(*params, wires=[0])
            qml.expval(qml.PauliZ(0))

        tape.trainable_params = {1, 2, 3}

        calculated_val = dev.adjoint_jacobian(tape)

        h = 2e-3 if dev.R_DTYPE == np.float32 else 1e-7
        tol = 1e-3 if dev.R_DTYPE == np.float32 else 1e-7

        # compare to finite differences
        tapes, fn = qml.gradients.finite_diff(tape, h=h)
        numeric_val = fn(qml.execute(tapes, dev, None))
        assert np.allclose(calculated_val,
                           numeric_val[0][2:],
                           atol=tol,
                           rtol=0)
Beispiel #4
0
    def test_gradient_gate_with_multiple_parameters_hamiltonian(self, dev):
        """Tests that gates with multiple free parameters yield correct gradients."""
        x, y, z = [0.5, 0.3, -0.7]

        ham = qml.Hamiltonian(
            [1.0, 0.3, 0.3],
            [qml.PauliX(0) @ qml.PauliX(1),
             qml.PauliZ(0),
             qml.PauliZ(1)])

        with qml.tape.QuantumTape() as tape:
            qml.RX(0.4, wires=[0])
            qml.Rot(x, y, z, wires=[0])
            qml.RY(-0.2, wires=[0])
            qml.expval(ham)

        tape.trainable_params = {1, 2, 3}

        h = 2e-3 if dev.R_DTYPE == np.float32 else 1e-7
        tol = 1e-3 if dev.R_DTYPE == np.float32 else 1e-7

        grad_D = dev.adjoint_jacobian(tape)
        tapes, fn = qml.gradients.finite_diff(tape, h=h)
        grad_F = fn(qml.execute(tapes, dev, None))

        # gradient has the correct shape and every element is nonzero
        assert grad_D.shape == (1, 3)
        assert np.count_nonzero(grad_D) == 3
        # the different methods agree
        assert np.allclose(grad_D, grad_F, atol=tol, rtol=0)
Beispiel #5
0
    def test_gradients(self, op, obs, tol, dev):
        """Tests that the gradients of circuits match between the finite difference and device
        methods."""

        with qml.tape.JacobianTape() as tape:
            qml.Hadamard(wires=0)
            qml.RX(0.543, wires=0)
            qml.CNOT(wires=[0, 1])

            qml.apply(op)

            qml.Rot(1.3, -2.3, 0.5, wires=[0])
            qml.RZ(-0.5, wires=0)
            qml.RY(0.5, wires=1).inv()
            qml.CNOT(wires=[0, 1])

            qml.expval(obs(wires=0))
            qml.expval(qml.PauliZ(wires=1))

        tape.trainable_params = set(range(1, 1 + op.num_params))

        grad_F = (lambda t, fn: fn(qml.execute(t, dev, None)))(
            *qml.gradients.finite_diff(tape))
        grad_D = dev.adjoint_jacobian(tape)

        assert np.allclose(grad_D, grad_F, atol=tol, rtol=0)
Beispiel #6
0
    def test_hamiltonian_grad(self):
        """Test that the gradient of Hamiltonians works as expected."""
        dev = qml.device("default.qubit", wires=2)

        with qml.tape.JacobianTape() as tape:
            qml.RY(0.3, wires=0)
            qml.RX(0.5, wires=1)
            qml.CNOT(wires=[0, 1])
            qml.expval(
                qml.Hamiltonian([-1.5, 2.0],
                                [qml.PauliZ(0), qml.PauliZ(1)]))

        tape.trainable_params = {2, 3}
        res = qml.math.stack(tape.jacobian(dev)[0])

        with qml.tape.JacobianTape() as tape1:
            qml.RY(0.3, wires=0)
            qml.RX(0.5, wires=1)
            qml.CNOT(wires=[0, 1])
            qml.expval(qml.PauliZ(0))

        with qml.tape.JacobianTape() as tape2:
            qml.RY(0.3, wires=0)
            qml.RX(0.5, wires=1)
            qml.CNOT(wires=[0, 1])
            qml.expval(qml.PauliZ(1))

        expected = qml.math.stack(qml.execute([tape1, tape2], dev, None))
        assert np.allclose(expected, res)
Beispiel #7
0
        def _wrapper(*args, **kwargs):
            shots = kwargs.pop("shots", False)
            qnode.construct(args, kwargs)
            tapes, processing_fn = self.construct(qnode.qtape, *targs,
                                                  **tkwargs)

            interface = qnode.interface
            execute_kwargs = getattr(qnode, "execute_kwargs", {})
            max_diff = execute_kwargs.pop("max_diff", 2)
            max_diff = transform_max_diff or max_diff

            gradient_fn = getattr(qnode, "gradient_fn", qnode.diff_method)
            gradient_kwargs = getattr(qnode, "gradient_kwargs", {})

            if interface is None or not self.differentiable:
                gradient_fn = None

            elif gradient_fn in ("best", "parameter-shift"):
                gradient_fn = qml.gradients.param_shift

            elif gradient_fn == "finite-diff":
                gradient_fn = qml.gradients.finite_diff

            res = qml.execute(
                tapes,
                device=qnode.device,
                gradient_fn=gradient_fn,
                interface=interface,
                max_diff=max_diff,
                override_shots=shots,
                gradient_kwargs=gradient_kwargs,
                **execute_kwargs,
            )

            return processing_fn(res)
Beispiel #8
0
    def cost(coeffs, t):
        tape = create_tape(coeffs, t)

        if diff_method is qml.gradients.param_shift:
            tape = dev.expand_fn(tape)

        return qml.execute([tape], dev, diff_method)[0]
Beispiel #9
0
    def test_gradients_hermitian(self, op, dev):
        """Tests that the gradients of circuits match between the finite difference and device
        methods."""

        # op.num_wires and op.num_params must be initialized a priori
        with qml.tape.QuantumTape() as tape:
            qml.Hadamard(wires=0)
            qml.RX(0.543, wires=0)
            qml.CNOT(wires=[0, 1])

            op.queue()

            qml.Rot(1.3, -2.3, 0.5, wires=[0])
            qml.RZ(-0.5, wires=0)
            qml.RY(0.5, wires=1).inv()
            qml.CNOT(wires=[0, 1])

            qml.expval(
                qml.Hermitian(
                    [[0, 0, 1, 1], [0, 1, 2, 1], [1, 2, 1, 0], [1, 1, 0, 0]],
                    wires=[0, 1]))

        tape.trainable_params = set(range(1, 1 + op.num_params))

        h = 1e-3 if dev.R_DTYPE == np.float32 else 1e-7
        tol = 1e-3 if dev.R_DTYPE == np.float32 else 1e-7

        grad_F = (lambda t, fn: fn(qml.execute(t, dev, None)))(
            *qml.gradients.finite_diff(tape, h=h))
        grad_D = dev.adjoint_jacobian(tape)

        assert np.allclose(grad_D, grad_F, atol=tol, rtol=0)
Beispiel #10
0
    def test_torch(self, tol):
        """Tests that the output of the VJP transform
        can be differentiated using Torch."""
        torch = pytest.importorskip("torch")

        dev = qml.device("default.qubit", wires=2)

        params = torch.tensor([0.543, -0.654],
                              requires_grad=True,
                              dtype=torch.float64)
        dy = torch.tensor([-1.0, 0.0, 0.0, 1.0], dtype=torch.float64)

        with qml.tape.JacobianTape() as tape:
            ansatz(params[0], params[1])

        tape.trainable_params = {0, 1}
        tapes, fn = qml.gradients.vjp(tape, dy, param_shift)
        vjp = fn(
            qml.execute(tapes,
                        dev,
                        qml.gradients.param_shift,
                        interface="torch"))

        assert np.allclose(vjp.detach(),
                           expected(params.detach()),
                           atol=tol,
                           rtol=0)

        cost = vjp[0]
        cost.backward()

        exp = qml.jacobian(lambda x: expected(x)[0])(params.detach().numpy())
        assert np.allclose(params.grad, exp, atol=tol, rtol=0)
Beispiel #11
0
def test_insert_dev(mocker, monkeypatch):
    """Test if a device transformed by the insert function does successfully add noise to
    subsequent circuit executions"""
    with QuantumTape() as in_tape:
        qml.RX(0.9, wires=0)
        qml.RY(0.4, wires=1)
        qml.CNOT(wires=[0, 1])
        qml.RY(0.5, wires=0)
        qml.RX(0.6, wires=1)
        qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))

    dev = qml.device("default.mixed", wires=2)
    res_without_noise = qml.execute([in_tape], dev, qml.gradients.param_shift)

    new_dev = insert(qml.PhaseDamping, 0.4)(dev)
    spy = mocker.spy(new_dev, "default_expand_fn")

    res_with_noise = qml.execute([in_tape], new_dev, qml.gradients.param_shift)
    tape = spy.call_args[0][0]

    with QuantumTape() as tape_exp:
        qml.RX(0.9, wires=0)
        qml.PhaseDamping(0.4, wires=0)
        qml.RY(0.4, wires=1)
        qml.PhaseDamping(0.4, wires=1)
        qml.CNOT(wires=[0, 1])
        qml.PhaseDamping(0.4, wires=0)
        qml.PhaseDamping(0.4, wires=1)
        qml.RY(0.5, wires=0)
        qml.PhaseDamping(0.4, wires=0)
        qml.RX(0.6, wires=1)
        qml.PhaseDamping(0.4, wires=1)
        qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))

    assert all(o1.name == o2.name
               for o1, o2 in zip(tape.operations, tape_exp.operations))
    assert all(o1.wires == o2.wires
               for o1, o2 in zip(tape.operations, tape_exp.operations))
    assert all(
        np.allclose(o1.parameters, o2.parameters)
        for o1, o2 in zip(tape.operations, tape_exp.operations))
    assert len(tape.measurements) == 1
    assert tape.observables[0].name == ["PauliZ", "PauliZ"]
    assert tape.observables[0].wires.tolist() == [0, 1]
    assert tape.measurements[0].return_type is Expectation

    assert not np.allclose(res_without_noise, res_with_noise)
Beispiel #12
0
    def test_provide_starting_state(self, tol, dev):
        """Tests provides correct answer when provided starting state."""
        x, y, z = [0.5, 0.3, -0.7]

        with qml.tape.QuantumTape() as tape:
            qml.RX(0.4, wires=[0])
            qml.Rot(x, y, z, wires=[0])
            qml.RY(-0.2, wires=[0])
            qml.expval(qml.PauliZ(0))

        tape.trainable_params = {1, 2, 3}

        dM1 = dev.adjoint_jacobian(tape)

        qml.execute([tape], dev, None)
        dM2 = dev.adjoint_jacobian(tape, starting_state=dev._pre_rotated_state)

        assert np.allclose(dM1, dM2, atol=tol, rtol=0)
        def cost_fn(x):
            with qml.tape.JacobianTape() as tape:
                qml.Squeezing(x[0], 0, wires=0)
                qml.Rotation(x[1], wires=0)
                qml.var(qml.X(wires=[0]))

            tapes, fn = param_shift_cv(tape, dev)
            jac = fn(qml.execute(tapes, dev, param_shift_cv, gradient_kwargs={"dev": dev}))
            return jac[0, 2]
Beispiel #14
0
    def test_use_device_state(self, tol, dev):
        """Tests that when using the device state, the correct answer is still returned."""

        x, y, z = [0.5, 0.3, -0.7]

        with qml.tape.QuantumTape() as tape:
            qml.RX(0.4, wires=[0])
            qml.Rot(x, y, z, wires=[0])
            qml.RY(-0.2, wires=[0])
            qml.expval(qml.PauliZ(0))

        tape.trainable_params = {1, 2, 3}

        dM1 = dev.adjoint_jacobian(tape)

        qml.execute([tape], dev, None)
        dM2 = dev.adjoint_jacobian(tape, use_device_state=True)

        assert np.allclose(dM1, dM2, atol=tol, rtol=0)
    def test_use_device_state(self, tol, dev):
        """Tests that when using the device state, the correct answer is still returned."""
        x, y, z = [0.5, 0.3, -0.7]

        with qml.tape.QuantumTape() as tape:
            qml.RX(0.4, wires=[0])
            qml.Rot(x, y, z, wires=[0])
            qml.RY(-0.2, wires=[0])
            qml.expval(qml.PauliZ(0))

        tape.trainable_params = {1, 2, 3}

        dy = np.array([1.0])

        fn1 = dev.vjp(tape.measurements, dy)
        vjp1 = fn1(tape)

        qml.execute([tape], dev, None)
        fn2 = dev.vjp(tape.measurements, dy, use_device_state=True)
        vjp2 = fn2(tape)

        assert np.allclose(vjp1, vjp2, atol=tol, rtol=0)
        def cost_fn(x):
            with qml.tape.JacobianTape() as tape:
                qml.Squeezing(params[0], 0, wires=0)
                qml.Rotation(params[1], wires=0)
                qml.var(qml.X(wires=[0]))

            tape.trainable_params = {0, 2}
            tapes, fn = qml.gradients.param_shift_cv(tape, dev)
            jac = fn(
                qml.execute(
                    tapes, dev, param_shift_cv, gradient_kwargs={"dev": dev}, interface="jax"
                )
            )
            return jac
Beispiel #17
0
    def __call__(self, *args, **kwargs):
        override_shots = False

        if not self._qfunc_uses_shots_arg:
            # If shots specified in call but not in qfunc signature,
            # interpret it as device shots value for this call.
            override_shots = kwargs.pop("shots", False)

            if override_shots is not False:
                # Since shots has changed, we need to update the preferred gradient function.
                # This is because the gradient function chosen at initialization may
                # no longer be applicable.

                # store the initialization gradient function
                original_grad_fn = [self.gradient_fn, self.gradient_kwargs, self.device]

                # update the gradient function
                set_shots(self._original_device, override_shots)(self._update_gradient_fn)()

        # construct the tape
        self.construct(args, kwargs)

        # preprocess the tapes by applying any device-specific transforms
        tapes, processing_fn = self.device.batch_transform(self.tape)

        res = qml.execute(
            tapes,
            device=self.device,
            gradient_fn=self.gradient_fn,
            interface=self.interface,
            gradient_kwargs=self.gradient_kwargs,
            override_shots=override_shots,
            **self.execute_kwargs,
        )

        res = processing_fn(res)

        if override_shots is not False:
            # restore the initialization gradient function
            self.gradient_fn, self.gradient_kwargs, self.device = original_grad_fn

        self._update_original_device()

        if isinstance(self._qfunc_output, Sequence) or (
            self.tape.is_sampled and self.device._has_partitioned_shots()
        ):
            return res

        return qml.math.squeeze(res)
    def test_provide_starting_state(self, tol, dev):
        """Tests provides correct answer when provided starting state."""
        x, y, z = [0.5, 0.3, -0.7]

        with qml.tape.QuantumTape() as tape:
            qml.RX(0.4, wires=[0])
            qml.Rot(x, y, z, wires=[0])
            qml.RY(-0.2, wires=[0])
            qml.expval(qml.PauliZ(0))

        tape.trainable_params = {1, 2, 3}

        dy = np.array([1.0])

        fn1 = dev.vjp(tape.measurements, dy)
        vjp1 = fn1(tape)

        qml.execute([tape], dev, None)
        fn2 = dev.vjp(tape.measurements,
                      dy,
                      starting_state=dev._pre_rotated_state)
        vjp2 = fn2(tape)

        assert np.allclose(vjp1, vjp2, atol=tol, rtol=0)
Beispiel #19
0
    def get_omegas(self):
        r"""Measure the coefficients of the Riemannian gradient with respect to a Pauli word basis.
        We want to calculate the components of the Riemannian gradient in the Lie algebra
        with respect to a Pauli word basis. For a Hamiltonian of the form :math:`H = \sum_i c_i O_i`,
        where :math:`c_i\in\mathbb{R}`, this can be achieved by calculating

        .. math::

            \omega_{i,j} = \text{Tr}(c_i[\rho, O_i] P_j)

        where :math:`P_j` is a Pauli word in the set of Pauli monomials on :math:`N` qubits.

        Via the parameter shift rule, the commutator can be calculated as

        .. math::

            [\rho, O_i] = \frac{1}{2}(V(\pi/2) \rho V^\dagger(\pi/2) - V(-\pi/2) \rho V^\dagger(-\pi/2))

        where :math:`V` is the unitary generated by the Pauli word :math:`V(\theta) = \exp\{-i\theta P_j\}`.

        Returns:
            array: array of omegas for each direction in the Lie algebra.

        """

        obs_groupings, _ = qml.grouping.group_observables(self.observables, self.coeffs)
        # get all circuits we need to calculate the coefficients
        circuits = algebra_commutator(
            self.circuit.qtape,
            obs_groupings,
            self.lie_algebra_basis_names,
            self.nqubits,
        )[0]
        circuits = qml.execute(circuits, self.circuit.device, gradient_fn=None)
        circuits_plus = np.array(circuits[: len(circuits) // 2]).reshape(
            len(self.coeffs), len(self.lie_algebra_basis_names)
        )
        circuits_min = np.array(circuits[len(circuits) // 2 :]).reshape(
            len(self.coeffs), len(self.lie_algebra_basis_names)
        )

        # For each observable O_i in the Hamiltonian, we have to calculate all Lie coefficients
        omegas = 0.5 * (circuits_plus - circuits_min)

        return np.dot(self.coeffs, omegas)
Beispiel #20
0
def test_trainable_hamiltonian(dev_name, diff_method):
    """Test that the ApproxTimeEvolution template
    can be differentiated if the Hamiltonian coefficients are trainable"""
    dev = qml.device(dev_name, wires=2)

    obs = [qml.PauliX(0) @ qml.PauliY(1), qml.PauliY(0) @ qml.PauliX(1)]

    def create_tape(coeffs, t):
        H = qml.Hamiltonian(coeffs, obs)

        with qml.tape.JacobianTape() as tape:
            qml.templates.ApproxTimeEvolution(H, t, 2)
            qml.expval(qml.PauliZ(0))

        return tape

    def cost(coeffs, t):
        tape = create_tape(coeffs, t)

        if diff_method is qml.gradients.param_shift:
            tape = dev.expand_fn(tape)

        return qml.execute([tape], dev, diff_method)[0]

    t = pnp.array(0.54, requires_grad=True)
    coeffs = pnp.array([-0.6, 2.0], requires_grad=True)

    res = cost(coeffs, t)
    grad = qml.grad(cost)(coeffs, t)

    assert len(grad) == 2

    assert isinstance(grad[0], np.ndarray)
    assert grad[0].shape == (2,)

    assert isinstance(grad[1], np.ndarray)
    assert grad[1].shape == tuple()

    # compare to finite-differences
    tape = create_tape(coeffs, t)
    g_tapes, fn = qml.gradients.finite_diff(tape, _expand=False, validate_params=False)
    expected = fn(qml.execute(g_tapes, dev, None))[0]

    assert np.allclose(grad[0], expected[0:1])
    assert np.allclose(grad[1], expected[2])
Beispiel #21
0
    def test_ry_gradient(self, par, tol, dev):
        """Test that the gradient of the RY gate matches the exact analytic formula."""

        with qml.tape.JacobianTape() as tape:
            qml.RY(par, wires=[0])
            qml.expval(qml.PauliX(0))

        tape.trainable_params = {0}

        # gradients
        exact = np.cos(par)
        grad_F = (lambda t, fn: fn(qml.execute(t, dev, None)))(
            *qml.gradients.finite_diff(tape))
        grad_A = dev.adjoint_jacobian(tape)

        # different methods must agree
        assert np.allclose(grad_F, exact, atol=tol, rtol=0)
        assert np.allclose(grad_A, exact, atol=tol, rtol=0)
Beispiel #22
0
    def test_pauli_rotation_gradient(self, G, theta, tol, dev):
        """Tests that the automatic gradients of Pauli rotations are correct."""

        with qml.tape.JacobianTape() as tape:
            qml.QubitStateVector(np.array([1.0, -1.0], requires_grad=False) /
                                 np.sqrt(2),
                                 wires=0)
            G(theta, wires=[0])
            qml.expval(qml.PauliZ(0))

        tape.trainable_params = {1}

        calculated_val = dev.adjoint_jacobian(tape)

        # compare to finite differences
        tapes, fn = qml.gradients.finite_diff(tape)
        numeric_val = fn(qml.execute(tapes, dev, None))
        assert np.allclose(calculated_val, numeric_val, atol=tol, rtol=0)
    def test_hamiltonian_dif_tensorflow(self):
        """Tests that the hamiltonian_expand tape transform is differentiable with the Tensorflow interface"""

        tf = pytest.importorskip("tensorflow")

        H = qml.Hamiltonian(
            [-0.2, 0.5, 1],
            [qml.PauliX(1),
             qml.PauliZ(1) @ qml.PauliY(2),
             qml.PauliZ(0)])
        var = tf.Variable([[0.1, 0.67, 0.3], [0.4, -0.5, 0.7]],
                          dtype=tf.float64)
        output = 0.42294409781940356
        output2 = [
            9.68883500e-02,
            -2.90832724e-01,
            -1.04448033e-01,
            -1.94289029e-09,
            3.50307411e-01,
            -3.41123470e-01,
        ]

        with tf.GradientTape() as gtape:
            with qml.tape.JacobianTape() as tape:
                for i in range(2):
                    qml.RX(var[i, 0], wires=0)
                    qml.RX(var[i, 1], wires=1)
                    qml.RX(var[i, 2], wires=2)
                    qml.CNOT(wires=[0, 1])
                    qml.CNOT(wires=[1, 2])
                    qml.CNOT(wires=[2, 0])
                qml.expval(H)

            tapes, fn = qml.transforms.hamiltonian_expand(tape)
            res = fn(
                qml.execute(tapes,
                            dev,
                            qml.gradients.param_shift,
                            interface="tf"))

            assert np.isclose(res, output)

            g = gtape.gradient(res, var)
            assert np.allclose(list(g[0]) + list(g[1]), output2)
Beispiel #24
0
    def test_pauli_rotation_gradient(self, G, theta, dev):
        """Tests that the automatic gradients of Pauli rotations are correct."""

        with qml.tape.QuantumTape() as tape:
            qml.QubitStateVector(np.array([1.0, -1.0]) / np.sqrt(2), wires=0)
            G(theta, wires=[0])
            qml.expval(qml.PauliZ(0))

        tape.trainable_params = {1}

        calculated_val = dev.adjoint_jacobian(tape)

        h = 2e-3 if dev.R_DTYPE == np.float32 else 1e-7
        tol = 1e-3 if dev.R_DTYPE == np.float32 else 1e-7

        # compare to finite differences
        tapes, fn = qml.gradients.finite_diff(tape, h=h)
        numeric_val = fn(qml.execute(tapes, dev, None))
        assert np.allclose(calculated_val, numeric_val[0][2], atol=tol, rtol=0)
Beispiel #25
0
    def test_torch(self, tol):
        """Tests that the output of the parameter-shift CV transform
        can be executed using Torch."""
        torch = pytest.importorskip("torch")

        dev = qml.device("default.gaussian", wires=1)
        params = torch.tensor([0.543, -0.654],
                              dtype=torch.float64,
                              requires_grad=True)

        with qml.tape.JacobianTape() as tape:
            qml.Squeezing(params[0], 0, wires=0)
            qml.Rotation(params[1], wires=0)
            qml.var(qml.X(wires=[0]))

        tape.trainable_params = {0, 2}
        tapes, fn = qml.gradients.param_shift_cv(tape, dev)
        jac = fn(
            qml.execute(tapes,
                        dev,
                        param_shift_cv,
                        gradient_kwargs={"dev": dev},
                        interface="torch"))

        r, phi = params.detach().numpy()

        expected = np.array([
            2 * np.exp(2 * r) * np.sin(phi)**2 -
            2 * np.exp(-2 * r) * np.cos(phi)**2,
            2 * np.sinh(2 * r) * np.sin(2 * phi),
        ])
        assert np.allclose(jac.detach().numpy(), expected, atol=tol, rtol=0)

        cost = jac[0, 1]
        cost.backward()
        hess = params.grad
        expected = np.array([
            4 * np.cosh(2 * r) * np.sin(2 * phi),
            4 * np.cos(2 * phi) * np.sinh(2 * r)
        ])
        assert np.allclose(hess.detach().numpy(), expected, atol=0.1, rtol=0)
Beispiel #26
0
    def test_gradient_gate_with_multiple_parameters(self, tol, dev):
        """Tests that gates with multiple free parameters yield correct gradients."""
        x, y, z = [0.5, 0.3, -0.7]

        with qml.tape.JacobianTape() as tape:
            qml.RX(0.4, wires=[0])
            qml.Rot(x, y, z, wires=[0])
            qml.RY(-0.2, wires=[0])
            qml.expval(qml.PauliZ(0))

        tape.trainable_params = {1, 2, 3}

        grad_D = dev.adjoint_jacobian(tape)
        grad_F = (lambda t, fn: fn(qml.execute(t, dev, None)))(
            *qml.gradients.finite_diff(tape))

        # gradient has the correct shape and every element is nonzero
        assert grad_D.shape == (1, 3)
        assert np.count_nonzero(grad_D) == 3
        # the different methods agree
        assert np.allclose(grad_D, grad_F, atol=tol, rtol=0)
Beispiel #27
0
    def test_tf(self, tol):
        """Tests that the output of the parameter-shift CV transform
        can be executed using TF"""
        tf = pytest.importorskip("tensorflow")

        dev = qml.device("default.gaussian", wires=1)
        params = tf.Variable([0.543, -0.654], dtype=tf.float64)

        with tf.GradientTape() as t:
            with qml.tape.JacobianTape() as tape:
                qml.Squeezing(params[0], 0, wires=0)
                qml.Rotation(params[1], wires=0)
                qml.var(qml.X(wires=[0]))

            tape.trainable_params = {0, 2}
            tapes, fn = param_shift_cv(tape, dev)
            jac = fn(
                qml.execute(tapes,
                            dev,
                            param_shift_cv,
                            gradient_kwargs={"dev": dev},
                            interface="tf"))
            res = jac[0, 1]

        r, phi = 1.0 * params

        expected = np.array([
            2 * np.exp(2 * r) * np.sin(phi)**2 -
            2 * np.exp(-2 * r) * np.cos(phi)**2,
            2 * np.sinh(2 * r) * np.sin(2 * phi),
        ])
        assert np.allclose(jac, expected, atol=tol, rtol=0)

        grad = t.jacobian(res, params)
        expected = np.array([
            4 * np.cosh(2 * r) * np.sin(2 * phi),
            4 * np.cos(2 * phi) * np.sinh(2 * r)
        ])
        assert np.allclose(grad, expected, atol=tol, rtol=0)
Beispiel #28
0
    def __call__(self, *args, **kwargs):
        override_shots = False

        if not self._qfunc_uses_shots_arg:
            # If shots specified in call but not in qfunc signature,
            # interpret it as device shots value for this call.
            override_shots = kwargs.pop("shots", False)

            if override_shots is not False:
                # Since shots has changed, we need to update the preferred gradient function.
                # This is because the gradient function chosen at initialization may
                # no longer be applicable.

                # store the initialization gradient function
                original_grad_fn = [
                    self.gradient_fn, self.gradient_kwargs, self.device
                ]

                # update the gradient function
                set_shots(self._original_device,
                          override_shots)(self._update_gradient_fn)()

        # construct the tape
        self.construct(args, kwargs)

        res = qml.execute(
            [self.tape],
            device=self.device,
            gradient_fn=self.gradient_fn,
            interface=self.interface,
            gradient_kwargs=self.gradient_kwargs,
            override_shots=override_shots,
            **self.execute_kwargs,
        )

        if autograd.isinstance(res, (tuple, list)) and len(res) == 1:
            # If a device batch transform was applied, we need to 'unpack'
            # the returned tuple/list to a float.
            #
            # Note that we use autograd.isinstance, because on the backwards pass
            # with Autograd, lists and tuples are converted to autograd.box.SequenceBox.
            # autograd.isinstance is a 'safer' isinstance check that supports
            # autograd backwards passes.
            #
            # TODO: find a more explicit way of determining that a batch transform
            # was applied.

            res = res[0]

        if override_shots is not False:
            # restore the initialization gradient function
            self.gradient_fn, self.gradient_kwargs, self.device = original_grad_fn

        self._update_original_device()

        if isinstance(self._qfunc_output,
                      Sequence) or (self.tape.is_sampled
                                    and self.device._has_partitioned_shots()):
            return res

        return qml.math.squeeze(res)
 def cost(x):
     tape.set_parameters(x, trainable_only=False)
     tapes, fn = qml.transforms.hamiltonian_expand(tape)
     res = qml.execute(tapes, dev, qml.gradients.param_shift)
     return fn(res)