def test_complex_min_version(self, monkeypatch): """Test if an error is raised when a version of torch before 1.6.0 is used as the dtype in the apply() method""" with monkeypatch.context() as m: m.setattr(qml.interfaces.torch, "COMPLEX_SUPPORT", False) with pytest.raises(qml.QuantumFunctionError, match=r"Version 1\.6\.0 or above of PyTorch"): TorchInterface.apply(JacobianTape(), dtype=torch.complex128)
def test_repeated_interface_construction(self): """Test that the interface is correctly applied multiple times""" with TorchInterface.apply(JacobianTape()) as tape: qml.RX(0.5, wires=0) qml.expval(qml.PauliX(0)) assert tape.interface == "torch" assert isinstance(tape, TorchInterface) assert tape.__bare__ == JacobianTape TorchInterface.apply(tape, dtype=torch.float32) assert tape.interface == "torch" assert isinstance(tape, TorchInterface) assert tape.__bare__ == JacobianTape assert tape.dtype is torch.float32
def test_classical_processing(self, tol): """Test classical processing within the quantum tape""" p_val = [0.1, 0.2] params = torch.tensor(p_val, requires_grad=True) dev = qml.device("default.qubit", wires=1) with TorchInterface.apply(JacobianTape()) as tape: qml.RY(params[0] * params[1], wires=0) qml.RZ(0.2, wires=0) qml.RX(params[1] + params[1]**2 + torch.sin(params[0]), wires=0) qml.expval(qml.PauliZ(0)) assert tape.trainable_params == [0, 2] tape_params = [i.detach().numpy() for i in tape.get_parameters()] assert np.allclose( tape_params, [p_val[0] * p_val[1], p_val[1] + p_val[1]**2 + np.sin(p_val[0])], atol=tol, rtol=0, ) res = tape.execute(dev) res.backward() assert isinstance(params.grad, torch.Tensor) assert params.shape == (2, )
def test_torch(self, tol): """Tests that the output of the finite-difference transform can be differentiated using Torch, yielding second derivatives.""" torch = pytest.importorskip("torch") from pennylane.interfaces.torch import TorchInterface dev = qml.device("default.qubit", wires=2) params = torch.tensor([0.543, -0.654], dtype=torch.float64, requires_grad=True) with TorchInterface.apply(qml.tape.QubitParamShiftTape()) as tape: qml.RX(params[0], wires=[0]) qml.RY(params[1], wires=[1]) qml.CNOT(wires=[0, 1]) qml.var(qml.PauliZ(0) @ qml.PauliX(1)) tapes, fn = qml.gradients.param_shift(tape) jac = fn([t.execute(dev) for t in tapes]) cost = jac[0, 0] cost.backward() hess = params.grad x, y = params.detach().numpy() expected = np.array( [np.sin(2 * x) * np.sin(y)**2, -np.cos(x)**2 * np.sin(2 * y)]) assert np.allclose(jac.detach().numpy(), expected, atol=tol, rtol=0) expected = np.array( [2 * np.cos(2 * x) * np.sin(y)**2, np.sin(2 * x) * np.sin(2 * y)]) assert np.allclose(hess.detach().numpy(), expected, atol=0.1, rtol=0)
def test_jacobian_dtype(self, tol): """Test calculating the jacobian with a different datatype""" a_val = 0.1 b_val = 0.2 a = torch.tensor(a_val, requires_grad=True, dtype=torch.float32) b = torch.tensor(b_val, requires_grad=True, dtype=torch.float32) dev = qml.device("default.qubit", wires=2) with TorchInterface.apply(JacobianTape(), dtype=torch.float32) as tape: qml.RY(a, wires=0) qml.RX(b, wires=1) qml.CNOT(wires=[0, 1]) qml.expval(qml.PauliZ(0)) qml.expval(qml.PauliY(1)) assert tape.trainable_params == [0, 1] res = tape.execute(dev) assert isinstance(res, torch.Tensor) assert res.shape == (2, ) assert res.dtype is torch.float32 loss = torch.sum(res) loss.backward() assert a.grad.dtype is torch.float32 assert b.grad.dtype is torch.float32
def test_ragged_differentiation(self, tol): """Tests correct output shape and evaluation for a tape with prob and expval outputs""" dev = qml.device("default.qubit", wires=2) x_val = 0.543 y_val = -0.654 x = torch.tensor(x_val, requires_grad=True) y = torch.tensor(y_val, requires_grad=True) with TorchInterface.apply(JacobianTape()) as tape: qml.RX(x, wires=[0]) qml.RY(y, wires=[1]) qml.CNOT(wires=[0, 1]) qml.expval(qml.PauliZ(0)) qml.probs(wires=[1]) res = tape.execute(dev) expected = np.array([ np.cos(x_val), (1 + np.cos(x_val) * np.cos(y_val)) / 2, (1 - np.cos(x_val) * np.cos(y_val)) / 2, ]) assert np.allclose(res.detach().numpy(), expected, atol=tol, rtol=0) loss = torch.sum(res) loss.backward() expected = np.array([ -np.sin(x_val) + -np.sin(x_val) * np.cos(y_val) / 2 + np.cos(y_val) * np.sin(x_val) / 2, -np.cos(x_val) * np.sin(y_val) / 2 + np.cos(x_val) * np.sin(y_val) / 2, ]) assert np.allclose(x.grad, expected[0], atol=tol, rtol=0) assert np.allclose(y.grad, expected[1], atol=tol, rtol=0)
def test_torch(self, tol): """Tests that the output of the VJP transform can be differentiated using Torch.""" torch = pytest.importorskip("torch") from pennylane.interfaces.torch import TorchInterface dev = qml.device("default.qubit", wires=2) params = torch.tensor([0.543, -0.654], requires_grad=True, dtype=torch.float64) dy = torch.tensor([-1.0, 0.0, 0.0, 1.0], dtype=torch.float64) with TorchInterface.apply(qml.tape.QubitParamShiftTape()) as tape: ansatz(params[0], params[1]) tape.trainable_params = {0, 1} tapes, fn = qml.gradients.vjp(tape, dy, param_shift) vjp = fn([t.execute(dev) for t in tapes]) assert np.allclose(vjp.detach(), expected(params.detach()), atol=tol, rtol=0) cost = vjp[0] cost.backward() exp = qml.jacobian(lambda x: expected(x)[0])(params.detach().numpy()) assert np.allclose(params.grad, exp, atol=tol, rtol=0)
def test_torch(self, approx_order, strategy, tol): """Tests that the output of the finite-difference transform can be differentiated using Torch, yielding second derivatives.""" torch = pytest.importorskip("torch") from pennylane.interfaces.torch import TorchInterface dev = qml.device("default.qubit", wires=2) params = torch.tensor([0.543, -0.654], dtype=torch.float64, requires_grad=True) with TorchInterface.apply(qml.tape.QubitParamShiftTape()) as tape: qml.RX(params[0], wires=[0]) qml.RY(params[1], wires=[1]) qml.CNOT(wires=[0, 1]) qml.expval(qml.PauliZ(0) @ qml.PauliX(1)) tapes, fn = finite_diff(tape, n=1, approx_order=approx_order, strategy=strategy) jac = fn([t.execute(dev) for t in tapes]) cost = torch.sum(jac) cost.backward() hess = params.grad x, y = params.detach().numpy() expected = np.array([-np.sin(x) * np.sin(y), np.cos(x) * np.cos(y)]) assert np.allclose(jac.detach().numpy(), expected, atol=tol, rtol=0) expected = np.array( [ [-np.cos(x) * np.sin(y), -np.cos(y) * np.sin(x)], [-np.cos(y) * np.sin(x), -np.cos(x) * np.sin(y)], ] ) assert np.allclose(hess.detach().numpy(), np.sum(expected, axis=0), atol=tol, rtol=0)
def test_differentiable_expand(self, tol): """Test that operation and nested tapes expansion is differentiable""" class U3(qml.U3): def expand(self): tape = JacobianTape() theta, phi, lam = self.data wires = self.wires tape._ops += [ qml.Rot(lam, theta, -lam, wires=wires), qml.PhaseShift(phi + lam, wires=wires), ] return tape tape = JacobianTape() dev = qml.device("default.qubit", wires=1) a = np.array(0.1) p_val = [0.1, 0.2, 0.3] p = torch.tensor(p_val, requires_grad=True) with tape: qml.RX(a, wires=0) U3(p[0], p[1], p[2], wires=0) qml.expval(qml.PauliX(0)) tape = TorchInterface.apply(tape.expand()) assert tape.trainable_params == [1, 2, 3, 4] assert [i.name for i in tape.operations] == ["RX", "Rot", "PhaseShift"] tape_params = [i.detach().numpy() for i in tape.get_parameters()] assert np.allclose( tape_params, [p_val[2], p_val[0], -p_val[2], p_val[1] + p_val[2]], atol=tol, rtol=0) res = tape.execute(device=dev) expected = np.cos(a) * np.cos(p_val[1]) * np.sin(p_val[0]) + np.sin( a) * (np.cos(p_val[2]) * np.sin(p_val[1]) + np.cos(p_val[0]) * np.cos(p_val[1]) * np.sin(p_val[2])) assert np.allclose(res.detach().numpy(), expected, atol=tol, rtol=0) res.backward() expected = np.array([ np.cos(p_val[1]) * (np.cos(a) * np.cos(p_val[0]) - np.sin(a) * np.sin(p_val[0]) * np.sin(p_val[2])), np.cos(p_val[1]) * np.cos(p_val[2]) * np.sin(a) - np.sin(p_val[1]) * (np.cos(a) * np.sin(p_val[0]) + np.cos(p_val[0]) * np.sin(a) * np.sin(p_val[2])), np.sin(a) * (np.cos(p_val[0]) * np.cos(p_val[1]) * np.cos(p_val[2]) - np.sin(p_val[1]) * np.sin(p_val[2])), ]) assert np.allclose(p.grad, expected, atol=tol, rtol=0)
def test_interface_construction(self): """Test that the interface is correctly applied""" with TorchInterface.apply(JacobianTape()) as tape: qml.RX(0.5, wires=0) qml.expval(qml.PauliX(0)) assert tape.interface == "torch" assert isinstance(tape, TorchInterface) assert tape.__bare__ == JacobianTape
def to_torch(self, dtype=None): """Apply the Torch interface to the internal quantum tape. Args: dtype (tf.dtype): The dtype that the Torch QNode should output. If not provided, the default is ``torch.float64``. Raises: .QuantumFunctionError: if PyTorch >= 1.3 is not installed """ # pylint: disable=import-outside-toplevel try: import torch from pennylane.interfaces.torch import TorchInterface if self.interface != "torch" and self.interface is not None: # Since the interface is changing, need to re-validate the tape class. self._tape, interface, self.device, diff_options = self.get_tape( self._original_device, "torch", self.diff_method ) self.interface = interface self.diff_options.update(diff_options) else: self.interface = "torch" if not isinstance(self.dtype, torch.dtype): self.dtype = None self.dtype = dtype or self.dtype or TorchInterface.dtype if self.dtype is np.complex128: self.dtype = torch.complex128 if self.qtape is not None: TorchInterface.apply(self.qtape, dtype=self.dtype) except ImportError as e: raise qml.QuantumFunctionError( "PyTorch not found. Please install the latest " "version of PyTorch to enable the 'torch' interface." ) from e
def test_repeated_application_after_expand(self, tol): """Test that the Torch interface continues to work after tape expansions, and repeated torch application""" n_qubits = 2 dev = qml.device("default.qubit", wires=n_qubits) weights = torch.ones((3, )) with TorchInterface.apply(qml.tape.QuantumTape()) as tape: qml.U3(*weights, wires=0) qml.expval(qml.PauliZ(wires=0)) tape = tape.expand() res1 = tape.execute(dev) TorchInterface.apply(tape) res2 = tape.execute(dev) assert np.allclose(res1, res2, atol=tol, rtol=0)
def test_sampling(self): """Test sampling works as expected""" dev = qml.device("default.qubit", wires=2, shots=10) with TorchInterface.apply(JacobianTape()) as tape: qml.Hadamard(wires=[0]) qml.CNOT(wires=[0, 1]) qml.sample(qml.PauliZ(0)) qml.sample(qml.PauliX(1)) res = tape.execute(dev) assert res.shape == (2, 10) assert isinstance(res, torch.Tensor)
def test_execution(self): """Test execution""" a = torch.tensor(0.1, requires_grad=True) dev = qml.device("default.qubit", wires=1) with TorchInterface.apply(JacobianTape()) as tape: qml.RY(a, wires=0) qml.RX(torch.tensor(0.2), wires=0) qml.expval(qml.PauliZ(0)) assert tape.trainable_params == [0] res = tape.execute(dev) assert isinstance(res, torch.Tensor) assert res.shape == (1, )
def test_get_parameters(self): """Test that the get parameters function correctly sets and returns the trainable parameters""" a = torch.tensor(0.1, requires_grad=True) b = torch.tensor(0.2) c = torch.tensor(0.3, requires_grad=True) d = 0.4 with TorchInterface.apply(JacobianTape()) as tape: qml.Rot(a, b, c, wires=0) qml.RX(d, wires=1) qml.CNOT(wires=[0, 1]) qml.expval(qml.PauliX(0)) assert tape.trainable_params == [0, 2] assert np.all(tape.get_parameters() == [a, c])
def test_jacobian_options(self, mocker, tol): """Test setting jacobian options""" spy = mocker.spy(JacobianTape, "numeric_pd") a = torch.tensor([0.1, 0.2], requires_grad=True) dev = qml.device("default.qubit", wires=1) with TorchInterface.apply(JacobianTape()) as tape: qml.RY(a[0], wires=0) qml.RX(a[1], wires=0) qml.expval(qml.PauliZ(0)) tape.jacobian_options = {"h": 1e-8, "order": 2} res = tape.execute(dev) res.backward() for args in spy.call_args_list: assert args[1]["order"] == 2 assert args[1]["h"] == 1e-8
def test_matrix_parameter(self, U, tol): """Test that the Torch interface works correctly with a matrix parameter""" a_val = 0.1 a = torch.tensor(a_val, requires_grad=True) dev = qml.device("default.qubit", wires=2) with TorchInterface.apply(JacobianTape()) as tape: qml.QubitUnitary(U, wires=0) qml.RY(a, wires=0) qml.expval(qml.PauliZ(0)) assert tape.trainable_params == {1} res = tape.execute(dev) assert np.allclose(res.detach().numpy(), -np.cos(a_val), atol=tol, rtol=0) res.backward() assert np.allclose(a.grad, np.sin(a_val), atol=tol, rtol=0)
def test_jacobian(self, mocker, tol): """Test jacobian calculation""" spy = mocker.spy(JacobianTape, "jacobian") a_val = 0.1 b_val = 0.2 a = torch.tensor(a_val, requires_grad=True) b = torch.tensor(b_val, requires_grad=True) dev = qml.device("default.qubit", wires=2) with TorchInterface.apply(JacobianTape()) as tape: qml.RZ(torch.tensor(0.543), wires=0) qml.RY(a, wires=0) qml.RX(b, wires=1) qml.CNOT(wires=[0, 1]) qml.expval(qml.PauliZ(0)) qml.expval(qml.PauliY(1)) assert tape.trainable_params == [1, 2] res = tape.execute(dev) assert isinstance(res, torch.Tensor) assert res.shape == (2, ) expected = [np.cos(a_val), -np.cos(a_val) * np.sin(b_val)] assert np.allclose(res.detach().numpy(), expected, atol=tol, rtol=0) loss = torch.sum(res) loss.backward() expected = [ -np.sin(a_val) + np.sin(a_val) * np.sin(b_val), -np.cos(a_val) * np.cos(b_val) ] assert np.allclose(a.grad, expected[0], atol=tol, rtol=0) assert np.allclose(b.grad, expected[1], atol=tol, rtol=0) spy.assert_called()
def test_reusing_quantum_tape(self, tol): """Test re-using a quantum tape by passing new parameters""" a = torch.tensor(0.1, requires_grad=True) b = torch.tensor(0.2, requires_grad=True) dev = qml.device("default.qubit", wires=2) with TorchInterface.apply(JacobianTape()) as tape: qml.RY(a, wires=0) qml.RX(b, wires=1) qml.CNOT(wires=[0, 1]) qml.expval(qml.PauliZ(0)) qml.expval(qml.PauliY(1)) assert tape.trainable_params == [0, 1] loss = torch.sum(tape.execute(dev)) loss.backward() a_val = 0.54 b_val = 0.8 a = torch.tensor(a_val, requires_grad=True) b = torch.tensor(b_val, requires_grad=True) res2 = tape.execute(dev, params=[2 * a, b]) expected = [np.cos(2 * a_val), -np.cos(2 * a_val) * np.sin(b_val)] assert np.allclose(res2.detach().numpy(), expected, atol=tol, rtol=0) loss = torch.sum(res2) loss.backward() expected = [ -2 * np.sin(2 * a_val) + 2 * np.sin(2 * a_val) * np.sin(b_val), -np.cos(2 * a_val) * np.cos(b_val), ] assert np.allclose(a.grad, expected[0], atol=tol, rtol=0) assert np.allclose(b.grad, expected[1], atol=tol, rtol=0)
def test_no_trainable_parameters(self, tol): """Test evaluation and Jacobian if there are no trainable parameters""" dev = qml.device("default.qubit", wires=2) with TorchInterface.apply(JacobianTape()) as tape: qml.RY(0.2, wires=0) qml.RX(torch.tensor(0.1), wires=0) qml.CNOT(wires=[0, 1]) qml.expval(qml.PauliZ(0)) qml.expval(qml.PauliZ(1)) assert tape.trainable_params == set() res = tape.execute(dev) assert res.shape == (2,) assert isinstance(res, torch.Tensor) with pytest.raises( RuntimeError, match="element 0 of tensors does not require grad and does not have a grad_fn", ): res.backward()
def test_torch(self, tol): """Tests that the output of the parameter-shift CV transform can be executed using Torch.""" torch = pytest.importorskip("torch") from pennylane.interfaces.torch import TorchInterface dev = qml.device("default.gaussian", wires=1) params = torch.tensor([0.543, -0.654], dtype=torch.float64, requires_grad=True) with TorchInterface.apply(qml.tape.CVParamShiftTape()) as tape: qml.Squeezing(params[0], 0, wires=0) qml.Rotation(params[1], wires=0) qml.var(qml.X(wires=[0])) tapes, fn = qml.gradients.param_shift_cv(tape, dev) jac = fn([t.execute(dev) for t in tapes]) r, phi = params.detach().numpy() expected = np.array([ 2 * np.exp(2 * r) * np.sin(phi)**2 - 2 * np.exp(-2 * r) * np.cos(phi)**2, 2 * np.sinh(2 * r) * np.sin(2 * phi), ]) assert np.allclose(jac.detach().numpy(), expected, atol=tol, rtol=0) cost = jac[0, 1] cost.backward() hess = params.grad expected = np.array([ 4 * np.cosh(2 * r) * np.sin(2 * phi), 4 * np.cos(2 * phi) * np.sinh(2 * r) ]) assert np.allclose(hess.detach().numpy(), expected, atol=0.1, rtol=0)