Example #1
0
    def test_adam_optimizer_univar(self, x_start, tol):
        """Tests that adam optimizer takes one and two steps correctly
        for univariate functions."""
        stepsize, gamma, delta = 0.1, 0.5, 0.8
        adam_opt = AdamOptimizer(stepsize, beta1=gamma, beta2=delta)

        univariate_funcs = [np.sin, lambda x: np.exp(x / 10.0), lambda x: x ** 2]
        grad_uni_fns = [
            lambda x: (np.cos(x),),
            lambda x: (np.exp(x / 10.0) / 10.0,),
            lambda x: (2 * x,),
        ]

        for gradf, f in zip(grad_uni_fns, univariate_funcs):
            adam_opt.reset()

            x_onestep = adam_opt.step(f, x_start)
            adapted_stepsize = stepsize * np.sqrt(1 - delta) / (1 - gamma)
            firstmoment = (1 - gamma) * gradf(x_start)[0]
            secondmoment = (1 - delta) * gradf(x_start)[0] * gradf(x_start)[0]
            x_onestep_target = x_start - adapted_stepsize * firstmoment / (
                np.sqrt(secondmoment) + 1e-8
            )
            assert np.allclose(x_onestep, x_onestep_target, atol=tol)

            x_twosteps = adam_opt.step(f, x_onestep)
            adapted_stepsize = stepsize * np.sqrt(1 - delta ** 2) / (1 - gamma ** 2)
            firstmoment = gamma * firstmoment + (1 - gamma) * gradf(x_onestep)[0]
            secondmoment = (
                delta * secondmoment + (1 - delta) * gradf(x_onestep)[0] * gradf(x_onestep)[0]
            )
            x_twosteps_target = x_onestep - adapted_stepsize * firstmoment / (
                np.sqrt(secondmoment) + 1e-8
            )
            assert np.allclose(x_twosteps, x_twosteps_target, atol=tol)
Example #2
0
    def test_adagrad_optimizer_univar(self, x_start, tol):
        """Tests that adagrad optimizer takes one and two steps correctly
        for univariate functions."""
        stepsize = 0.1
        adag_opt = AdagradOptimizer(stepsize)

        univariate_funcs = [np.sin, lambda x: np.exp(x / 10.0), lambda x: x ** 2]
        grad_uni_fns = [
            lambda x: (np.cos(x),),
            lambda x: (np.exp(x / 10.0) / 10.0,),
            lambda x: (2 * x,),
        ]

        for gradf, f in zip(grad_uni_fns, univariate_funcs):
            adag_opt.reset()

            x_onestep = adag_opt.step(f, x_start)
            past_grads = gradf(x_start)[0] * gradf(x_start)[0]
            adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
            x_onestep_target = x_start - gradf(x_start)[0] * adapt_stepsize
            assert np.allclose(x_onestep, x_onestep_target, atol=tol)

            x_twosteps = adag_opt.step(f, x_onestep)
            past_grads = (
                gradf(x_start)[0] * gradf(x_start)[0] + gradf(x_onestep)[0] * gradf(x_onestep)[0]
            )
            adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
            x_twosteps_target = x_onestep - gradf(x_onestep)[0] * adapt_stepsize
            assert np.allclose(x_twosteps, x_twosteps_target, atol=tol)
Example #3
0
    def predict_proba(self, features):
        """Predicts the probabilities of a prediction for an
           array of features

        Args:
            features (array):features to be predicted

        Returns:
            preds: float or int
                prediction of the model
        """
        model_output = [
            self.neural_network(self.var, features=x_) for x_ in features
        ]

        if self.type_problem == "classification":
            model_output = np.array(model_output)
            predicted_probabilities = 0.5 + 0.5 * model_output

        elif self.type_problem == "multiclassification":
            if self.interface == "autograd":
                predicted_probabilities = np.exp(model_output) / \
                        np.sum(np.exp(model_output), axis=1)[:, None]
            elif self.interface == "tf":
                predicted_probabilities = tf.nn.softmax(model_output)
        elif self.type_problem in ["regression", "reinforcement_learning"]:
            raise ValueError("Cannot predict probabilities when type_problem "
                             "is set to: "
                             "regression or reinforcement_learning")

        return predicted_probabilities
    def test_gradient_descent_optimizer_multivar(self, tol):
        """Tests that basic stochastic gradient descent takes gradient-descent steps correctly
        for multivariate functions."""
        stepsize = 0.1
        sgd_opt = GradientDescentOptimizer(stepsize)

        multivariate_funcs = [
            lambda x: np.sin(x[0]) + np.cos(x[1]),
            lambda x: np.exp(x[0] / 3) * np.tanh(x[1]),
            lambda x: np.sum([x_**2 for x_ in x]),
        ]
        grad_multi_funcs = [
            lambda x: (np.array([np.cos(x[0]), -np.sin(x[1])]), ),
            lambda x: (np.array([
                np.exp(x[0] / 3) / 3 * np.tanh(x[1]),
                np.exp(x[0] / 3) * (1 - np.tanh(x[1])**2),
            ]), ),
            lambda x: (np.array([2 * x_ for x_ in x]), ),
        ]

        x_vals = np.linspace(-10, 10, 16, endpoint=False)

        for gradf, f in zip(grad_multi_funcs, multivariate_funcs):
            for jdx in range(len(x_vals[:-1])):
                x_vec = x_vals[jdx:jdx + 2]
                x_new = sgd_opt.step(f, x_vec)
                x_correct = x_vec - gradf(x_vec)[0] * stepsize
                assert np.allclose(x_new, x_correct, atol=tol)
    def test_first_order_observable(self, diff_method, kwargs, tol):
        """Test variance of a first order CV observable"""
        dev = qml.device("default.gaussian", wires=1)

        r = 0.543
        phi = -0.654

        @qnode(dev, interface="jax", diff_method=diff_method, **kwargs)
        def circuit(r, phi):
            qml.Squeezing(r, 0, wires=0)
            qml.Rotation(phi, wires=0)
            return qml.var(qml.X(0))

        res = circuit(r, phi)
        expected = np.exp(2 * r) * np.sin(phi) ** 2 + np.exp(-2 * r) * np.cos(phi) ** 2
        assert np.allclose(res, expected, atol=tol, rtol=0)

        # circuit jacobians
        res = jax.grad(circuit, argnums=[0, 1])(r, phi)
        expected = np.array(
            [
                2 * np.exp(2 * r) * np.sin(phi) ** 2 - 2 * np.exp(-2 * r) * np.cos(phi) ** 2,
                2 * np.sinh(2 * r) * np.sin(2 * phi),
            ]
        )
        assert np.allclose(res, expected, atol=tol, rtol=0)
Example #6
0
    def test_exp(self):
        """Tests multiarg gradients with exp and tanh functions."""
        x = -2.5
        y = 1.5
        gradf = lambda x, y: (
            np.exp(x / 3) / 3 * np.tanh(y),
            np.exp(x / 3) * (1 - np.tanh(y)**2),
        )
        f = lambda x, y: np.exp(x / 3) * np.tanh(y)

        # gradient wrt first argument
        gx = qml.grad(f, 0)
        auto_gradx = gx(x, y)
        correct_gradx = gradf(x, y)[0]
        np.allclose(auto_gradx, correct_gradx)

        # gradient wrt second argument
        gy = qml.grad(f, 1)
        auto_grady = gy(x, y)
        correct_grady = gradf(x, y)[1]
        np.allclose(auto_grady, correct_grady)

        # gradient wrt both arguments
        gxy = qml.grad(f, [0, 1])
        auto_gradxy = gxy(x, y)
        correct_gradxy = gradf(x, y)
        np.allclose(auto_gradxy, correct_gradxy)
Example #7
0
    def test_first_order_cv(self, tol):
        """Test variance of a first order CV expectation value"""
        dev = qml.device("strawberryfields.gaussian", wires=1)

        @qml.qnode(dev)
        def circuit(r, phi):
            qml.Squeezing(r, 0, wires=0)
            qml.Rotation(phi, wires=0)
            return qml.var(qml.X(0))

        r = 0.543
        phi = -0.654

        var = circuit(r, phi)
        expected = np.exp(2 * r) * np.sin(phi)**2 + np.exp(
            -2 * r) * np.cos(phi)**2
        assert np.allclose(var, expected, atol=tol, rtol=0)

        # circuit jacobians
        gradA = circuit.jacobian([r, phi], method="A")
        gradF = circuit.jacobian([r, phi], method="F")
        expected = np.array([
            2 * np.exp(2 * r) * np.sin(phi)**2 -
            2 * np.exp(-2 * r) * np.cos(phi)**2,
            2 * np.sinh(2 * r) * np.sin(2 * phi),
        ])
        assert np.allclose(gradA, expected, atol=tol, rtol=0)
        assert np.allclose(gradF, expected, atol=tol, rtol=0)
Example #8
0
    def test_momentum_optimizer_univar(self, x_start, tol):
        """Tests that momentum optimizer takes one and two steps correctly
        for univariate functions."""
        stepsize, gamma = 0.1, 0.5
        mom_opt = MomentumOptimizer(stepsize, momentum=gamma)

        univariate_funcs = [np.sin, lambda x: np.exp(x / 10.0), lambda x: x**2]
        grad_uni_fns = [
            lambda x: (np.cos(x), ),
            lambda x: (np.exp(x / 10.0) / 10.0, ),
            lambda x: (2 * x, ),
        ]

        for gradf, f in zip(grad_uni_fns, univariate_funcs):
            mom_opt.reset()

            x_onestep = mom_opt.step(f, x_start)
            x_onestep_target = x_start - gradf(x_start)[0] * stepsize
            assert np.allclose(x_onestep, x_onestep_target, atol=tol)

            x_twosteps = mom_opt.step(f, x_onestep)
            momentum_term = gamma * gradf(x_start)[0]
            x_twosteps_target = x_onestep - (gradf(x_onestep)[0] +
                                             momentum_term) * stepsize
            assert np.allclose(x_twosteps, x_twosteps_target, atol=tol)
Example #9
0
    def test_first_order_cv(self, tol):
        """Test variance of a first order CV expectation value"""
        dev = qml.device("strawberryfields.fock", wires=1, cutoff_dim=15)

        @qml.qnode(dev)
        def circuit(r, phi):
            qml.Squeezing(r, 0, wires=0)
            qml.Rotation(phi, wires=0)
            return qml.var(qml.X(0))

        r = np.array(0.105, requires_grad=True)
        phi = np.array(-0.654, requires_grad=True)

        var = circuit(r, phi)
        expected = np.exp(2 * r) * np.sin(phi) ** 2 + np.exp(-2 * r) * np.cos(phi) ** 2
        assert np.allclose(var, expected, atol=tol, rtol=0)
        # circuit jacobians
        tapes, fn = qml.gradients.param_shift_cv(circuit.qtape, dev)
        gradA = fn(dev.batch_execute(tapes))

        tapes, fn = qml.gradients.finite_diff(circuit.qtape)
        gradF = fn(dev.batch_execute(tapes))
        expected = np.array(
            [
                2 * np.exp(2 * r) * np.sin(phi) ** 2 - 2 * np.exp(-2 * r) * np.cos(phi) ** 2,
                2 * np.sinh(2 * r) * np.sin(2 * phi),
            ]
        )
        assert np.allclose(gradA, expected, atol=tol, rtol=0)
        assert np.allclose(gradF, expected, atol=tol, rtol=0)
    def test_first_order_observable(self, tol):
        """Test variance of a first order CV observable"""
        dev = qml.device("default.gaussian", wires=1)

        r = 0.543
        phi = -0.654

        with qml.tape.JacobianTape() as tape:
            qml.Squeezing(r, 0, wires=0)
            qml.Rotation(phi, wires=0)
            qml.var(qml.X(0))

        tape.trainable_params = {0, 2}

        res = tape.execute(dev)
        expected = np.exp(2 * r) * np.sin(phi)**2 + np.exp(
            -2 * r) * np.cos(phi)**2
        assert np.allclose(res, expected, atol=tol, rtol=0)

        # circuit jacobians
        tapes, fn = qml.gradients.finite_diff(tape)
        grad_F = fn(dev.batch_execute(tapes))

        tapes, fn = param_shift_cv(tape, dev)
        grad_A = fn(dev.batch_execute(tapes))

        expected = np.array([[
            2 * np.exp(2 * r) * np.sin(phi)**2 -
            2 * np.exp(-2 * r) * np.cos(phi)**2,
            2 * np.sinh(2 * r) * np.sin(2 * phi),
        ]])
        assert np.allclose(grad_A, expected, atol=tol, rtol=0)
        assert np.allclose(grad_F, expected, atol=tol, rtol=0)
    def test_nesterovmomentum_optimizer_usergrad(self, x_start, tol):
        """Tests that nesterov momentum optimizer takes gradient-descent steps correctly
        using user-provided gradients."""
        stepsize, gamma = 0.1, 0.5
        nesmom_opt = NesterovMomentumOptimizer(stepsize, momentum=gamma)

        univariate_funcs = [np.sin, lambda x: np.exp(x / 10.0), lambda x: x ** 2]
        grad_uni_fns = [
            lambda x: (np.cos(x),),
            lambda x: (np.exp(x / 10.0) / 10.0,),
            lambda x: (2 * x,),
        ]

        for gradf, f in zip(grad_uni_fns[::-1], univariate_funcs):
            nesmom_opt.reset()

            x_onestep = nesmom_opt.step(f, x_start, grad_fn=gradf)
            x_onestep_target = x_start - gradf(x_start)[0] * stepsize
            assert np.allclose(x_onestep, x_onestep_target, atol=tol)

            x_twosteps = nesmom_opt.step(f, x_onestep, grad_fn=gradf)
            momentum_term = gamma * gradf(x_start)[0]
            shifted_grad_term = gradf(x_onestep - stepsize * momentum_term)[0]
            x_twosteps_target = x_onestep - (shifted_grad_term + momentum_term) * stepsize
            assert np.allclose(x_twosteps, x_twosteps_target, atol=tol)
    def test_tf(self, tol):
        """Tests that the output of the parameter-shift CV transform
        can be executed using TF"""
        tf = pytest.importorskip("tensorflow")
        from pennylane.interfaces.tf import TFInterface

        dev = qml.device("default.gaussian", wires=1)
        params = tf.Variable([0.543, -0.654], dtype=tf.float64)

        with tf.GradientTape() as t:
            with TFInterface.apply(qml.tape.CVParamShiftTape()) as tape:
                qml.Squeezing(params[0], 0, wires=0)
                qml.Rotation(params[1], wires=0)
                qml.var(qml.X(wires=[0]))

            tapes, fn = qml.gradients.param_shift_cv(tape, dev)
            jac = fn([tp.execute(dev) for tp in tapes])
            res = jac[0, 1]

        r, phi = 1.0 * params

        expected = np.array([
            2 * np.exp(2 * r) * np.sin(phi)**2 -
            2 * np.exp(-2 * r) * np.cos(phi)**2,
            2 * np.sinh(2 * r) * np.sin(2 * phi),
        ])
        assert np.allclose(jac, expected, atol=tol, rtol=0)

        grad = t.jacobian(res, params)
        expected = np.array([
            4 * np.cosh(2 * r) * np.sin(2 * phi),
            4 * np.cos(2 * phi) * np.sinh(2 * r)
        ])
        assert np.allclose(grad, expected, atol=tol, rtol=0)
Example #13
0
    def test_rmsprop_optimizer_univar(self, x_start, tol):
        """Tests that rmsprop optimizer takes one and two steps correctly
        for univariate functions."""
        stepsize, gamma = 0.1, 0.5
        rms_opt = RMSPropOptimizer(stepsize, decay=gamma)

        univariate_funcs = [np.sin, lambda x: np.exp(x / 10.0), lambda x: x**2]
        grad_uni_fns = [
            lambda x: (np.cos(x), ),
            lambda x: (np.exp(x / 10.0) / 10.0, ),
            lambda x: (2 * x, ),
        ]

        for gradf, f in zip(grad_uni_fns, univariate_funcs):
            rms_opt.reset()

            x_onestep = rms_opt.step(f, x_start)
            past_grads = (1 - gamma) * gradf(x_start)[0] * gradf(x_start)[0]
            adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
            x_onestep_target = x_start - gradf(x_start)[0] * adapt_stepsize
            assert np.allclose(x_onestep, x_onestep_target, atol=tol)

            x_twosteps = rms_opt.step(f, x_onestep)
            past_grads = (
                1 - gamma) * gamma * gradf(x_start)[0] * gradf(x_start)[0] + (
                    1 - gamma) * gradf(x_onestep)[0] * gradf(x_onestep)[0]
            adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
            x_twosteps_target = x_onestep - gradf(
                x_onestep)[0] * adapt_stepsize
            assert np.allclose(x_twosteps, x_twosteps_target, atol=tol)
Example #14
0
 def test_exp(self):
     """Tests exp function."""
     x_vals = np.linspace(-10, 10, 16, endpoint=False)
     func = lambda x: np.exp(x / 10.0) / 10.0
     g = qml.grad(func, 0)
     auto_grad = [g(x) for x in x_vals]
     correct_grad = np.exp(x_vals / 10.0)
     np.allclose(auto_grad, correct_grad)
Example #15
0
 def arbitrary_rotation(x, y, z):
     """arbitrary single qubit rotation"""
     c = np.cos(y / 2)
     s = np.sin(y / 2)
     return np.array(
         [[np.exp(-0.5j * (x + z)) * c, -np.exp(0.5j * (x - z)) * s],
          [np.exp(-0.5j * (x - z)) * s,
           np.exp(0.5j * (x + z)) * c]])
def target_function(x):
    """Generate a truncated Fourier series, where the data gets re-scaled."""
    res = coeff0
    for idx, coeff in enumerate(coeffs):
        exponent = np.complex(0, scaling * (idx + 1) * x)
        conj_coeff = np.conjugate(coeff)
        res += coeff * np.exp(exponent) + conj_coeff * np.exp(-exponent)
    return np.real(res)
Example #17
0
 def squared_term(a, r, phi):
     """Analytic expression for <N^2>"""
     magnitude_squared = np.abs(a)**2
     squared_term = (
         -magnitude_squared + magnitude_squared**2 +
         2 * magnitude_squared * np.cosh(2 * r) -
         np.exp(-1j * phi) * a**2 * np.cosh(r) * np.sinh(r) -
         np.exp(1j * phi) * np.conj(a)**2 * np.cosh(r) * np.sinh(r) +
         np.sinh(r)**4 + np.cosh(r) * np.sinh(r) * np.sinh(2 * r))
     return squared_term
Example #18
0
    def test_jax(self, tol):
        """Tests that the output of the parameter-shift CV transform
        can be differentiated using JAX, yielding second derivatives."""
        jax = pytest.importorskip("jax")
        from jax import numpy as jnp
        from jax.config import config

        config.update("jax_enable_x64", True)

        dev = qml.device("default.gaussian", wires=2)
        params = jnp.array([0.543, -0.654])

        def cost_fn(x):
            with qml.tape.JacobianTape() as tape:
                qml.Squeezing(params[0], 0, wires=0)
                qml.Rotation(params[1], wires=0)
                qml.var(qml.X(wires=[0]))

            tape.trainable_params = {0, 2}
            tapes, fn = qml.gradients.param_shift_cv(tape, dev)
            jac = fn(
                qml.execute(tapes,
                            dev,
                            param_shift_cv,
                            gradient_kwargs={"dev": dev},
                            interface="jax"))
            return jac

        r, phi = params
        res = cost_fn(params)
        expected = np.array([
            2 * np.exp(2 * r) * np.sin(phi)**2 -
            2 * np.exp(-2 * r) * np.cos(phi)**2,
            2 * np.sinh(2 * r) * np.sin(2 * phi),
        ])
        assert np.allclose(res, expected, atol=tol, rtol=0)

        pytest.xfail(
            "The CV Operation methods have not been updated to support autodiff"
        )

        res = jax.jacobian(cost_fn)(params)
        expected = np.array([
            [
                4 * np.exp(-2 * r) *
                (np.cos(phi)**2 + np.exp(4 * r) * np.sin(phi)**2),
                4 * np.cosh(2 * r) * np.sin(2 * phi),
            ],
            [
                4 * np.cosh(2 * r) * np.sin(2 * phi),
                4 * np.cos(2 * phi) * np.sinh(2 * r)
            ],
        ])
        assert np.allclose(res, expected, atol=tol, rtol=0)
Example #19
0
 def test_exp(self):
     """Tests gradients with a multivariate exp and tanh."""
     multi_var = lambda x: np.exp(x[0] / 3) * np.tanh(x[1])
     grad_multi_var = lambda x: np.array([
         np.exp(x[0] / 3) / 3 * np.tanh(x[1]),
         np.exp(x[0] / 3) * (1 - np.tanh(x[1])**2),
     ])
     x_vec = np.random.uniform(-5, 5, size=(2))
     g = qml.grad(multi_var, 0)
     auto_grad = g(x_vec)
     correct_grad = grad_multi_var(x_vec)
     np.allclose(auto_grad, correct_grad)
Example #20
0
 def arbitrary_Crotation(x, y, z):
     """controlled arbitrary single qubit rotation"""
     c = np.cos(y / 2)
     s = np.sin(y / 2)
     return np.array(
         [
             [1, 0, 0, 0],
             [0, 1, 0, 0],
             [0, 0, np.exp(-0.5j * (x + z)) * c, -np.exp(0.5j * (x - z)) * s],
             [0, 0, np.exp(-0.5j * (x - z)) * s, np.exp(0.5j * (x + z)) * c]
         ]
     )
Example #21
0
    def test_adam_optimizer_multivar(self, tol):
        """Tests that adam optimizer takes one and two steps correctly
        for multivariate functions."""
        stepsize, gamma, delta = 0.1, 0.5, 0.8
        adam_opt = AdamOptimizer(stepsize, beta1=gamma, beta2=delta)

        multivariate_funcs = [
            lambda x: np.sin(x[0]) + np.cos(x[1]),
            lambda x: np.exp(x[0] / 3) * np.tanh(x[1]),
            lambda x: np.sum([x_ ** 2 for x_ in x]),
        ]
        grad_multi_funcs = [
            lambda x: (np.array([np.cos(x[0]), -np.sin(x[1])]),),
            lambda x: (
                np.array(
                    [
                        np.exp(x[0] / 3) / 3 * np.tanh(x[1]),
                        np.exp(x[0] / 3) * (1 - np.tanh(x[1]) ** 2),
                    ]
                ),
            ),
            lambda x: (np.array([2 * x_ for x_ in x]),),
        ]

        x_vals = np.linspace(-10, 10, 16, endpoint=False)

        for gradf, f in zip(grad_multi_funcs, multivariate_funcs):
            for jdx in range(len(x_vals[:-1])):
                adam_opt.reset()

                x_vec = x_vals[jdx : jdx + 2]
                x_onestep = adam_opt.step(f, x_vec)
                adapted_stepsize = stepsize * np.sqrt(1 - delta) / (1 - gamma)
                firstmoment = (1 - gamma) * gradf(x_vec)[0]
                secondmoment = (1 - delta) * gradf(x_vec)[0] * gradf(x_vec)[0]
                x_onestep_target = x_vec - adapted_stepsize * firstmoment / (
                    np.sqrt(secondmoment) + 1e-8
                )
                assert np.allclose(x_onestep, x_onestep_target, atol=tol)

                x_twosteps = adam_opt.step(f, x_onestep)
                adapted_stepsize = stepsize * np.sqrt(1 - delta ** 2) / (1 - gamma ** 2)
                firstmoment = gamma * firstmoment + (1 - gamma) * gradf(x_onestep)[0]
                secondmoment = (
                    delta * secondmoment + (1 - delta) * gradf(x_onestep)[0] * gradf(x_onestep)[0]
                )
                x_twosteps_target = x_onestep - adapted_stepsize * firstmoment / (
                    np.sqrt(secondmoment) + 1e-8
                )
                assert np.allclose(x_twosteps, x_twosteps_target, atol=tol)
        def qfunc_with_qubit_unitary(angles):
            z = angles[0]
            x = angles[1]

            Z_mat = np.array([[np.exp(-1j * z / 2), 0.0], [0.0, np.exp(1j * z / 2)]])

            c = np.cos(x / 2)
            s = np.sin(x / 2) * 1j
            X_mat = np.array([[c, -s], [-s, c]])

            qml.Hadamard(wires="a")
            qml.QubitUnitary(Z_mat, wires="a")
            qml.QubitUnitary(X_mat, wires="b")
            qml.CNOT(wires=["b", "a"])
            return qml.expval(qml.PauliX(wires="a"))
Example #23
0
    def test_exp(self):
        """Tests gradients with multivariate multidimensional exp and tanh."""
        x_vec = np.random.uniform(-5, 5, size=(2))
        x_vec_multidim = np.expand_dims(x_vec, axis=1)

        gradf = lambda x: np.array([
            [np.exp(x[0, 0] / 3) / 3 * np.tanh(x[1, 0])],
            [np.exp(x[0, 0] / 3) * (1 - np.tanh(x[1, 0])**2)],
        ])
        f = lambda x: np.exp(x[0, 0] / 3) * np.tanh(x[1, 0])

        g = qml.grad(f, 0)
        auto_grad = g(x_vec_multidim)
        correct_grad = gradf(x_vec_multidim)
        np.allclose(auto_grad, correct_grad)
Example #24
0
    def test_squeezed_state(self):
        """Test the squeezed state is correct."""
        self.logTestName()
        r = 0.432
        phi = 0.123
        means, cov = squeezed_state(r, phi, hbar=hbar)

        # test vector of means is zero
        self.assertAllAlmostEqual(means, np.zeros([2]), delta=self.tol)

        R = rotation(phi / 2)
        expected = R @ np.array([[np.exp(-2 * r), 0], [0, np.exp(2 * r)]
                                 ]) * hbar / 2 @ R.T
        # test covariance matrix is correct
        self.assertAllAlmostEqual(cov, expected, delta=self.tol)
Example #25
0
    def test_laplace_kernel(self):
        """Test square_kernel_matrix and kernel_matrix of the _laplace_kernel above."""
        X1 = [0.1, 0.4, 0.2]
        X2 = [0.0, 0.1, 0.3, 0.2]

        K1_expected = pnp.exp(-np.array([[0.0, 0.3, 0.1], [0.3, 0.0, 0.2], [0.1, 0.2, 0.0]]))
        K2_expected = pnp.exp(
            -np.array([[0.1, 0.0, 0.2, 0.1], [0.4, 0.3, 0.1, 0.2], [0.2, 0.1, 0.1, 0.0]])
        )

        K1 = kern.square_kernel_matrix(X1, _laplace_kernel, assume_normalized_kernel=False)
        K2 = kern.kernel_matrix(X1, X2, _laplace_kernel)

        assert np.allclose(K1, K1_expected)
        assert np.allclose(K2, K2_expected)
Example #26
0
    def test_polyxp_variance(self, tol):
        """Tests that variance for PolyXP measurement works"""
        dev = qml.device("strawberryfields.fock", wires=1, cutoff_dim=15)

        @qml.qnode(dev)
        def circuit(r, phi):
            qml.Squeezing(r, 0, wires=0)
            qml.Rotation(phi, wires=0)
            return qml.var(qml.PolyXP(np.array([0, 1, 0]), wires=0))

        r = 0.105
        phi = -0.654

        var = circuit(r, phi)
        expected = np.exp(2 * r) * np.sin(phi) ** 2 + np.exp(-2 * r) * np.cos(phi) ** 2
        assert np.allclose(var, expected, atol=tol, rtol=0)
Example #27
0
    def test_fock_state_projector(self, tol):
        """Test that FockStateProjector works as expected"""
        cutoff_dim = 12
        a = 0.54321
        r = 0.123

        hbar = 2
        dev = qml.device("strawberryfields.fock", wires=2, hbar=hbar, cutoff_dim=cutoff_dim)

        # test correct number state expectation |<n|a>|^2
        @qml.qnode(dev)
        def circuit(x):
            qml.Displacement(x, 0, wires=0)
            return qml.expval(qml.FockStateProjector(np.array([2]), wires=0))

        expected = np.abs(np.exp(-np.abs(a) ** 2 / 2) * a**2 / np.sqrt(2)) ** 2
        assert np.allclose(circuit(a), expected, atol=tol, rtol=0)

        # test correct number state expectation |<n|S(r)>|^2
        @qml.qnode(dev)
        def circuit(x):
            qml.Squeezing(x, 0, wires=0)
            return qml.expval(qml.FockStateProjector(np.array([2, 0]), wires=[0, 1]))

        expected = np.abs(np.sqrt(2) / (2) * (-np.tanh(r)) / np.sqrt(np.cosh(r))) ** 2
        assert np.allclose(circuit(r), expected, atol=tol, rtol=0)
Example #28
0
    def test_finite_diff_coherent_two_wires(self, tol):
        """Test that the jacobian of the probability for a coherent states is
        approximated well with finite differences"""
        cutoff = 4

        dev = qml.device("strawberryfields.fock", wires=2, cutoff_dim=cutoff)

        @qml.qnode(dev)
        def circuit(a, phi):
            qml.Displacement(a, phi, wires=0)
            qml.Displacement(a, phi, wires=1)
            return qml.probs(wires=[0, 1])

        a = 0.4
        phi = -0.12

        c = np.arange(cutoff)
        d = np.arange(cutoff)
        n0, n1 = np.meshgrid(c, d)
        n0 = n0.flatten()
        n1 = n1.flatten()

        # differentiate with respect to parameter a
        res_F = circuit.jacobian([a, phi], wrt={0}, method="F").flat
        expected_gradient = (2 * (a**(-1 + 2 * n0 + 2 * n1)) *
                             np.exp(-2 * a**2) * (-2 * a**2 + n0 + n1) /
                             (fac(n0) * fac(n1)))
        assert np.allclose(res_F, expected_gradient, atol=tol, rtol=0)

        # differentiate with respect to parameter phi
        res_F = circuit.jacobian([a, phi], wrt={1}, method="F").flat
        expected_gradient = 0
        assert np.allclose(res_F, expected_gradient, atol=tol, rtol=0)
Example #29
0
    def test_finite_diff_coherent(self, tol):
        """Test that the jacobian of the probability for a coherent states is
        approximated well with finite differences"""
        cutoff = 10

        dev = qml.device("strawberryfields.gaussian", wires=1)

        @qml.qnode(dev)
        def circuit(a, phi):
            qml.Displacement(a, phi, wires=0)
            return qml.probs(wires=[0])

        a = 0.4
        phi = -0.12

        n = np.arange(cutoff)

        # differentiate with respect to parameter a
        res_F = circuit.jacobian([a, phi], wrt={0}, method="F").flat
        expected_gradient = 2 * np.exp(-(a**2)) * a**(2 * n -
                                                      1) * (n - a**2) / fac(n)
        assert np.allclose(res_F, expected_gradient, atol=tol, rtol=0)

        # differentiate with respect to parameter phi
        res_F = circuit.jacobian([a, phi], wrt={1}, method="F").flat
        expected_gradient = 0
        assert np.allclose(res_F, expected_gradient, atol=tol, rtol=0)
Example #30
0
    def test_cat_state(self, tol):
        """Test that the CatState gate works correctly"""
        a = 0.312
        b = 0.123
        c = 0.532
        wires = [0]

        gate_name = "CatState"
        operation = qml.CatState

        cutoff_dim = 10
        dev = qml.device("strawberryfields.fock",
                         wires=2,
                         cutoff_dim=cutoff_dim)

        sf_operation = dev._operation_map[gate_name]

        assert dev.supports_operation(gate_name)

        @qml.qnode(dev)
        def circuit(*args):
            qml.TwoModeSqueezing(0.1, 0, wires=[0, 1])
            operation(*args, wires=wires)
            return qml.expval(qml.NumberOperator(0)), qml.expval(
                qml.NumberOperator(1))

        res = circuit(a, b, c)
        sf_res = SF_gate_reference(sf_operation, cutoff_dim, wires,
                                   a * np.exp(1j * b), c)
        assert np.allclose(res, sf_res, atol=tol, rtol=0)