def test_tf(self, tol):
        """Tests that the output of the parameter-shift CV transform
        can be executed using TF"""
        tf = pytest.importorskip("tensorflow")

        dev = qml.device("default.gaussian", wires=1)
        params = tf.Variable([0.543, -0.654], dtype=tf.float64)

        with tf.GradientTape() as t:
            with qml.tape.JacobianTape() as tape:
                qml.Squeezing(params[0], 0, wires=0)
                qml.Rotation(params[1], wires=0)
                qml.var(qml.X(wires=[0]))

            tape.trainable_params = {0, 2}
            tapes, fn = param_shift_cv(tape, dev)
            jac = fn(
                qml.execute(
                    tapes, dev, param_shift_cv, gradient_kwargs={"dev": dev}, interface="tf"
                )
            )
            res = jac[0, 1]

        r, phi = 1.0 * params

        expected = np.array(
            [
                2 * np.exp(2 * r) * np.sin(phi) ** 2 - 2 * np.exp(-2 * r) * np.cos(phi) ** 2,
                2 * np.sinh(2 * r) * np.sin(2 * phi),
            ]
        )
        assert np.allclose(jac, expected, atol=tol, rtol=0)

        grad = t.jacobian(res, params)
        expected = np.array(
            [4 * np.cosh(2 * r) * np.sin(2 * phi), 4 * np.cos(2 * phi) * np.sinh(2 * r)]
        )
        assert np.allclose(grad, expected, atol=tol, rtol=0)
    def test_torch(self, tol):
        """Tests that the output of the parameter-shift CV transform
        can be executed using Torch."""
        torch = pytest.importorskip("torch")
        from pennylane.interfaces.torch import TorchInterface

        dev = qml.device("default.gaussian", wires=1)
        params = torch.tensor([0.543, -0.654],
                              dtype=torch.float64,
                              requires_grad=True)

        with TorchInterface.apply(qml.tape.CVParamShiftTape()) as tape:
            qml.Squeezing(params[0], 0, wires=0)
            qml.Rotation(params[1], wires=0)
            qml.var(qml.X(wires=[0]))

        tapes, fn = qml.gradients.param_shift_cv(tape, dev)
        jac = fn([t.execute(dev) for t in tapes])

        r, phi = params.detach().numpy()

        expected = np.array([
            2 * np.exp(2 * r) * np.sin(phi)**2 -
            2 * np.exp(-2 * r) * np.cos(phi)**2,
            2 * np.sinh(2 * r) * np.sin(2 * phi),
        ])
        assert np.allclose(jac.detach().numpy(), expected, atol=tol, rtol=0)

        cost = jac[0, 1]
        cost.backward()
        hess = params.grad
        expected = np.array([
            4 * np.cosh(2 * r) * np.sin(2 * phi),
            4 * np.cos(2 * phi) * np.sinh(2 * r)
        ])
        assert np.allclose(hess.detach().numpy(), expected, atol=0.1, rtol=0)
示例#3
0
    def test_tensor_number_displaced_squeezed(self, dev, disp_sq_circuit, pars,
                                              tol):
        """Test the variance of the TensorN observable for a squeezed displaced
        state"""

        # Checking the circuit variance and the analytic expression
        def squared_term(a, r, phi):
            """Analytic expression for <N^2>"""
            magnitude_squared = np.abs(a)**2
            squared_term = (
                -magnitude_squared + magnitude_squared**2 +
                2 * magnitude_squared * np.cosh(2 * r) -
                np.exp(-1j * phi) * a**2 * np.cosh(r) * np.sinh(r) -
                np.exp(1j * phi) * np.conj(a)**2 * np.cosh(r) * np.sinh(r) +
                np.sinh(r)**4 + np.cosh(r) * np.sinh(r) * np.sinh(2 * r))
            return squared_term

        var = disp_sq_circuit(pars)

        n0 = np.sinh(rs0)**2 + np.abs(alpha0)**2
        n1 = np.sinh(rs1)**2 + np.abs(alpha1)**2
        expected = (squared_term(alpha0, rs0, phis0) *
                    squared_term(alpha1, rs1, phis1) - n0**2 * n1**2)
        assert np.allclose(var, expected, atol=tol, rtol=0)
    def test_torch(self, tol):
        """Tests that the output of the parameter-shift CV transform
        can be executed using Torch."""
        torch = pytest.importorskip("torch")

        dev = qml.device("default.gaussian", wires=1)
        params = torch.tensor([0.543, -0.654], dtype=torch.float64, requires_grad=True)

        with qml.tape.JacobianTape() as tape:
            qml.Squeezing(params[0], 0, wires=0)
            qml.Rotation(params[1], wires=0)
            qml.var(qml.X(wires=[0]))

        tape.trainable_params = {0, 2}
        tapes, fn = qml.gradients.param_shift_cv(tape, dev)
        jac = fn(
            qml.execute(tapes, dev, param_shift_cv, gradient_kwargs={"dev": dev}, interface="torch")
        )

        r, phi = params.detach().numpy()

        expected = np.array(
            [
                2 * np.exp(2 * r) * np.sin(phi) ** 2 - 2 * np.exp(-2 * r) * np.cos(phi) ** 2,
                2 * np.sinh(2 * r) * np.sin(2 * phi),
            ]
        )
        assert np.allclose(jac.detach().numpy(), expected, atol=tol, rtol=0)

        cost = jac[0, 1]
        cost.backward()
        hess = params.grad
        expected = np.array(
            [4 * np.cosh(2 * r) * np.sin(2 * phi), 4 * np.cos(2 * phi) * np.sinh(2 * r)]
        )
        assert np.allclose(hess.detach().numpy(), expected, atol=0.1, rtol=0)
示例#5
0
    def test_finite_diff_squeezed(self, tol):
        """Test that the jacobian of the probability for a squeezed states is
        approximated well with finite differences"""
        cutoff = 5

        dev = qml.device("strawberryfields.fock", wires=1, cutoff_dim=cutoff)

        @qml.qnode(dev)
        def circuit(r, phi):
            qml.Squeezing(r, phi, wires=0)
            return qml.probs(wires=[0])

        r = 0.4
        phi = -0.12

        n = np.arange(cutoff)

        # construct tape
        circuit.construct([r, phi], {})

        # differentiate with respect to parameter a
        circuit.qtape.trainable_params = {0}
        tapes, fn = qml.gradients.finite_diff(circuit.qtape)
        res_F = fn(dev.batch_execute(tapes)).flatten()
        assert res_F.shape == (cutoff,)

        expected_gradient = (
            np.abs(np.tanh(r)) ** n
            * (1 + 2 * n - np.cosh(2 * r))
            * fac(n)
            / (2 ** (n + 1) * np.cosh(r) ** 2 * np.sinh(r) * fac(n / 2) ** 2)
        )
        expected_gradient[n % 2 != 0] = 0
        assert np.allclose(res_F, expected_gradient, atol=tol, rtol=0)

        # re-construct tape to reset trainable_params
        circuit.construct([r, phi], {})

        # differentiate with respect to parameter phi
        circuit.qtape.trainable_params = {1}

        tapes, fn = qml.gradients.finite_diff(circuit.qtape)
        res_F = fn(dev.batch_execute(tapes)).flatten()
        expected_gradient = 0
        assert np.allclose(res_F, expected_gradient, atol=tol, rtol=0)
    def test_squeezed_mean_photon_variance(self, tol):
        """Test gradient of the photon variance of a displaced thermal state"""
        dev = qml.device("default.gaussian", wires=1)

        r = 0.12
        phi = 0.105

        with qml.tape.JacobianTape() as tape:
            qml.Squeezing(r, 0, wires=0)
            qml.Rotation(phi, wires=0)
            qml.var(qml.X(wires=[0]))

        tape.trainable_params = {0, 2}
        tapes, fn = param_shift_cv(tape, dev)
        grad = fn(dev.batch_execute(tapes))
        expected = np.array([
            2 * np.exp(2 * r) * np.sin(phi)**2 -
            2 * np.exp(-2 * r) * np.cos(phi)**2,
            2 * np.sinh(2 * r) * np.sin(2 * phi),
        ])
        assert np.allclose(grad, expected, atol=tol, rtol=0)
示例#7
0
 def pd_sr(rs0, phis0, rd0, phid0, rs1, phis1, rd1, phid1):
     """Analytic expression for the partial derivative with respect to
     the r argument of the first squeezing operation (rs0)"""
     return (
         (0.25 + rd0**2 * (-0.25 - 2 * rd1**2 + 2 * rd1**4) +
          (-(rd1**2) + rd0**2 * (-1 + 6 * rd1**2)) * np.cosh(2 * rs1) +
          (-0.25 + 1.25 * rd0**2) * np.cosh(4 * rs1)) * np.sinh(2 * rs0)
         + (-(rd1**2) + rd1**4 +
            (-0.5 + 2.5 * rd1**2) * np.cosh(2 * rs1) +
            0.5 * np.cosh(4 * rs1)) * np.sinh(4 * rs0) +
         rd1**2 * np.cos(2 * phid1 - phis1) *
         ((1 - 4 * rd0**2) * np.sinh(2 * rs0) - 1.5 * np.sinh(4 * rs0))
         * np.sinh(2 * rs1) +
         rd0**2 * np.cos(2 * phid0 - phis0) * np.cosh(2 * rs0) *
         (-0.25 + 2 * rd1**2 - 2 * rd1**4 +
          (1 - 4 * rd1**2) * np.cosh(2 * rs1) - 0.75 * np.cosh(4 * rs1)
          + 2 * rd1**2 * np.cos(2 * phid1 - phis1) * np.sinh(2 * rs1)))
    def test_autograd_gradient(self, tol):
        """Tests that the output of the parameter-shift CV transform
        can be differentiated using autograd, yielding second derivatives."""
        dev = qml.device("default.gaussian", wires=1)

        r = 0.12
        phi = 0.105

        def cost_fn(x):
            with qml.tape.JacobianTape() as tape:
                qml.Squeezing(x[0], 0, wires=0)
                qml.Rotation(x[1], wires=0)
                qml.var(qml.X(wires=[0]))

            tapes, fn = param_shift_cv(tape, dev)
            jac = fn(qml.execute(tapes, dev, param_shift_cv, gradient_kwargs={"dev": dev}))
            return jac[0, 2]

        params = np.array([r, phi], requires_grad=True)
        grad = qml.jacobian(cost_fn)(params)
        expected = np.array(
            [4 * np.cosh(2 * r) * np.sin(2 * phi), 4 * np.cos(2 * phi) * np.sinh(2 * r)]
        )
        assert np.allclose(grad, expected, atol=tol, rtol=0)