示例#1
0
    def test_exp(self):
        """Tests multiarg gradients with exp and tanh functions."""
        x = -2.5
        y = 1.5
        gradf = lambda x, y: (
            np.exp(x / 3) / 3 * np.tanh(y),
            np.exp(x / 3) * (1 - np.tanh(y)**2),
        )
        f = lambda x, y: np.exp(x / 3) * np.tanh(y)

        # gradient wrt first argument
        gx = qml.grad(f, 0)
        auto_gradx = gx(x, y)
        correct_gradx = gradf(x, y)[0]
        np.allclose(auto_gradx, correct_gradx)

        # gradient wrt second argument
        gy = qml.grad(f, 1)
        auto_grady = gy(x, y)
        correct_grady = gradf(x, y)[1]
        np.allclose(auto_grady, correct_grady)

        # gradient wrt both arguments
        gxy = qml.grad(f, [0, 1])
        auto_gradxy = gxy(x, y)
        correct_gradxy = gradf(x, y)
        np.allclose(auto_gradxy, correct_gradxy)
    def test_gradient_descent_optimizer_multivar(self, tol):
        """Tests that basic stochastic gradient descent takes gradient-descent steps correctly
        for multivariate functions."""
        stepsize = 0.1
        sgd_opt = GradientDescentOptimizer(stepsize)

        multivariate_funcs = [
            lambda x: np.sin(x[0]) + np.cos(x[1]),
            lambda x: np.exp(x[0] / 3) * np.tanh(x[1]),
            lambda x: np.sum([x_**2 for x_ in x]),
        ]
        grad_multi_funcs = [
            lambda x: (np.array([np.cos(x[0]), -np.sin(x[1])]), ),
            lambda x: (np.array([
                np.exp(x[0] / 3) / 3 * np.tanh(x[1]),
                np.exp(x[0] / 3) * (1 - np.tanh(x[1])**2),
            ]), ),
            lambda x: (np.array([2 * x_ for x_ in x]), ),
        ]

        x_vals = np.linspace(-10, 10, 16, endpoint=False)

        for gradf, f in zip(grad_multi_funcs, multivariate_funcs):
            for jdx in range(len(x_vals[:-1])):
                x_vec = x_vals[jdx:jdx + 2]
                x_new = sgd_opt.step(f, x_vec)
                x_correct = x_vec - gradf(x_vec)[0] * stepsize
                assert np.allclose(x_new, x_correct, atol=tol)
示例#3
0
 def test_exp(self):
     """Tests gradients with a multivariate exp and tanh."""
     multi_var = lambda x: np.exp(x[0] / 3) * np.tanh(x[1])
     grad_multi_var = lambda x: np.array([
         np.exp(x[0] / 3) / 3 * np.tanh(x[1]),
         np.exp(x[0] / 3) * (1 - np.tanh(x[1])**2),
     ])
     x_vec = np.random.uniform(-5, 5, size=(2))
     g = qml.grad(multi_var, 0)
     auto_grad = g(x_vec)
     correct_grad = grad_multi_var(x_vec)
     np.allclose(auto_grad, correct_grad)
示例#4
0
    def test_adam_optimizer_multivar(self, tol):
        """Tests that adam optimizer takes one and two steps correctly
        for multivariate functions."""
        stepsize, gamma, delta = 0.1, 0.5, 0.8
        adam_opt = AdamOptimizer(stepsize, beta1=gamma, beta2=delta)

        multivariate_funcs = [
            lambda x: np.sin(x[0]) + np.cos(x[1]),
            lambda x: np.exp(x[0] / 3) * np.tanh(x[1]),
            lambda x: np.sum([x_ ** 2 for x_ in x]),
        ]
        grad_multi_funcs = [
            lambda x: (np.array([np.cos(x[0]), -np.sin(x[1])]),),
            lambda x: (
                np.array(
                    [
                        np.exp(x[0] / 3) / 3 * np.tanh(x[1]),
                        np.exp(x[0] / 3) * (1 - np.tanh(x[1]) ** 2),
                    ]
                ),
            ),
            lambda x: (np.array([2 * x_ for x_ in x]),),
        ]

        x_vals = np.linspace(-10, 10, 16, endpoint=False)

        for gradf, f in zip(grad_multi_funcs, multivariate_funcs):
            for jdx in range(len(x_vals[:-1])):
                adam_opt.reset()

                x_vec = x_vals[jdx : jdx + 2]
                x_onestep = adam_opt.step(f, x_vec)
                adapted_stepsize = stepsize * np.sqrt(1 - delta) / (1 - gamma)
                firstmoment = (1 - gamma) * gradf(x_vec)[0]
                secondmoment = (1 - delta) * gradf(x_vec)[0] * gradf(x_vec)[0]
                x_onestep_target = x_vec - adapted_stepsize * firstmoment / (
                    np.sqrt(secondmoment) + 1e-8
                )
                assert np.allclose(x_onestep, x_onestep_target, atol=tol)

                x_twosteps = adam_opt.step(f, x_onestep)
                adapted_stepsize = stepsize * np.sqrt(1 - delta ** 2) / (1 - gamma ** 2)
                firstmoment = gamma * firstmoment + (1 - gamma) * gradf(x_onestep)[0]
                secondmoment = (
                    delta * secondmoment + (1 - delta) * gradf(x_onestep)[0] * gradf(x_onestep)[0]
                )
                x_twosteps_target = x_onestep - adapted_stepsize * firstmoment / (
                    np.sqrt(secondmoment) + 1e-8
                )
                assert np.allclose(x_twosteps, x_twosteps_target, atol=tol)
示例#5
0
    def test_exp(self):
        """Tests gradients with multivariate multidimensional exp and tanh."""
        x_vec = np.random.uniform(-5, 5, size=(2))
        x_vec_multidim = np.expand_dims(x_vec, axis=1)

        gradf = lambda x: np.array([
            [np.exp(x[0, 0] / 3) / 3 * np.tanh(x[1, 0])],
            [np.exp(x[0, 0] / 3) * (1 - np.tanh(x[1, 0])**2)],
        ])
        f = lambda x: np.exp(x[0, 0] / 3) * np.tanh(x[1, 0])

        g = qml.grad(f, 0)
        auto_grad = g(x_vec_multidim)
        correct_grad = gradf(x_vec_multidim)
        np.allclose(auto_grad, correct_grad)
示例#6
0
    def test_number_state(self):
        """Test that NumberState works as expected"""
        self.logTestName()

        a = 0.54321
        r = 0.123

        hbar = 2
        dev = qml.device('strawberryfields.gaussian', wires=2, hbar=hbar)

        # test correct number state expectation |<n|a>|^2
        @qml.qnode(dev)
        def circuit(x):
            qml.Displacement(x, 0, 0)
            return qml.expval.NumberState(np.array([2]), wires=0)

        expected = np.abs(np.exp(-np.abs(a)**2 / 2) * a**2 / np.sqrt(2))**2
        self.assertAlmostEqual(circuit(a), expected)

        # test correct number state expectation |<n|S(r)>|^2
        @qml.qnode(dev)
        def circuit(x):
            qml.Squeezing(x, 0, 0)
            return qml.expval.NumberState(np.array([2, 0]), wires=[0, 1])

        expected = np.abs(
            np.sqrt(2) / (2) * (-np.tanh(r)) / np.sqrt(np.cosh(r)))**2
        self.assertAlmostEqual(circuit(r), expected)
    def test_squeezed_number_state_gradient(self, mocker, tol):
        """Test the numerical gradient of the squeeze gate with
        with number state expectation is correct"""
        dev = qml.device("default.gaussian", wires=2, hbar=hbar)

        r = 0.23354

        with qml.tape.JacobianTape() as tape:
            qml.Squeezing(r, 0.0, wires=[0])
            # the fock state projector is a 'non-Gaussian' observable
            qml.expval(qml.FockStateProjector(np.array([2, 0]), wires=[0, 1]))

        tape.trainable_params = {0}

        spy = mocker.spy(qml.gradients.parameter_shift_cv,
                         "second_order_param_shift")

        tapes, fn = param_shift_cv(tape, dev)
        grad = fn(dev.batch_execute(tapes))
        assert tape._par_info[0]["grad_method"] == "F"

        spy.assert_not_called()

        # (d/dr) |<2|S(r)>|^2 = 0.5 tanh(r)^3 (2 csch(r)^2 - 1) sech(r)
        expected = 0.5 * np.tanh(r)**3 * (2 / (np.sinh(r)**2) - 1) / np.cosh(r)
        assert np.allclose(grad, expected, atol=tol, rtol=0)
示例#8
0
    def test_finite_diff_squeezed(self, tol):
        """Test that the jacobian of the probability for a squeezed states is
        approximated well with finite differences"""
        cutoff = 5

        dev = qml.device("strawberryfields.gaussian",
                         wires=1,
                         cutoff_dim=cutoff)

        @qml.qnode(dev)
        def circuit(r, phi):
            qml.Squeezing(r, phi, wires=0)
            return qml.probs(wires=[0])

        r = 0.4
        phi = -0.12

        n = np.arange(cutoff)

        # differentiate with respect to parameter r
        res_F = circuit.jacobian([r, phi], wrt={0}, method="F").flatten()
        assert res_F.shape == (cutoff, )

        expected_gradient = (
            np.abs(np.tanh(r))**n * (1 + 2 * n - np.cosh(2 * r)) * fac(n) /
            (2**(n + 1) * np.cosh(r)**2 * np.sinh(r) * fac(n / 2)**2))
        expected_gradient[n % 2 != 0] = 0
        assert np.allclose(res_F, expected_gradient, atol=tol, rtol=0)

        # differentiate with respect to parameter phi
        res_F = circuit.jacobian([r, phi], wrt={1}, method="F").flat
        expected_gradient = 0
        assert np.allclose(res_F, expected_gradient, atol=tol, rtol=0)
示例#9
0
    def test_fock_state_projector(self, tol):
        """Test that FockStateProjector works as expected"""
        cutoff_dim = 12
        a = 0.54321
        r = 0.123

        hbar = 2
        dev = qml.device("strawberryfields.fock", wires=2, hbar=hbar, cutoff_dim=cutoff_dim)

        # test correct number state expectation |<n|a>|^2
        @qml.qnode(dev)
        def circuit(x):
            qml.Displacement(x, 0, wires=0)
            return qml.expval(qml.FockStateProjector(np.array([2]), wires=0))

        expected = np.abs(np.exp(-np.abs(a) ** 2 / 2) * a**2 / np.sqrt(2)) ** 2
        assert np.allclose(circuit(a), expected, atol=tol, rtol=0)

        # test correct number state expectation |<n|S(r)>|^2
        @qml.qnode(dev)
        def circuit(x):
            qml.Squeezing(x, 0, wires=0)
            return qml.expval(qml.FockStateProjector(np.array([2, 0]), wires=[0, 1]))

        expected = np.abs(np.sqrt(2) / (2) * (-np.tanh(r)) / np.sqrt(np.cosh(r))) ** 2
        assert np.allclose(circuit(r), expected, atol=tol, rtol=0)
示例#10
0
    def test_fock_state(self):
        """Test that FockStateProjector works as expected"""
        self.logTestName()

        cutoff_dim = 12
        a = 0.54321
        r = 0.123

        hbar = 2
        dev = qml.device('strawberryfields.fock',
                         wires=2,
                         hbar=hbar,
                         cutoff_dim=cutoff_dim)

        # test correct number state expectation |<n|a>|^2
        @qml.qnode(dev)
        def circuit(x):
            qml.Displacement(x, 0, wires=0)
            return qml.expval(qml.FockStateProjector(np.array([2]), wires=0))

        expected = np.abs(np.exp(-np.abs(a)**2 / 2) * a**2 / np.sqrt(2))**2
        self.assertAlmostEqual(circuit(a), expected)

        # test correct number state expectation |<n|S(r)>|^2
        @qml.qnode(dev)
        def circuit(x):
            qml.Squeezing(x, 0, wires=0)
            return qml.expval(
                qml.FockStateProjector(np.array([2, 0]), wires=[0, 1]))

        expected = np.abs(
            np.sqrt(2) / (2) * (-np.tanh(r)) / np.sqrt(np.cosh(r)))**2
        self.assertAlmostEqual(circuit(r), expected)
示例#11
0
    def test_controlled_addition(self):
        """Test the CX symplectic transform."""
        self.logTestName()

        s = 0.543
        S = controlled_addition(s)

        # test that S = B(theta+pi/2, 0) [S(z) x S(-z)] B(theta, 0)
        r = np.arcsinh(-s / 2)
        theta = 0.5 * np.arctan2(-1 / np.cosh(r), -np.tanh(r))
        Sz = block_diag(squeezing(r, 0),
                        squeezing(-r, 0))[:, [0, 2, 1, 3]][[0, 2, 1, 3]]

        expected = beamsplitter(theta + np.pi / 2, 0) @ Sz @ beamsplitter(
            theta, 0)
        self.assertAllAlmostEqual(S, expected, delta=self.tol)

        # test that S[x1, x2, p1, p2] -> [x1, x2+sx1, p1-sp2, p2]
        x1 = 0.5432
        x2 = -0.453
        p1 = 0.154
        p2 = -0.123
        out = S @ np.array([x1, x2, p1, p2]) * np.sqrt(2 * hbar)
        expected = np.array([x1, x2 + s * x1, p1 - s * p2, p2]) * np.sqrt(
            2 * hbar)
        self.assertAllAlmostEqual(out, expected, delta=self.tol)
示例#12
0
    def test_adagrad_optimizer_multivar(self, tol):
        """Tests that adagrad optimizer takes one and two steps correctly
        for multivariate functions."""
        stepsize = 0.1
        adag_opt = AdagradOptimizer(stepsize)

        multivariate_funcs = [
            lambda x: np.sin(x[0]) + np.cos(x[1]),
            lambda x: np.exp(x[0] / 3) * np.tanh(x[1]),
            lambda x: np.sum([x_ ** 2 for x_ in x]),
        ]
        grad_multi_funcs = [
            lambda x: (np.array([np.cos(x[0]), -np.sin(x[1])]),),
            lambda x: (
                np.array(
                    [
                        np.exp(x[0] / 3) / 3 * np.tanh(x[1]),
                        np.exp(x[0] / 3) * (1 - np.tanh(x[1]) ** 2),
                    ]
                ),
            ),
            lambda x: (np.array([2 * x_ for x_ in x]),),
        ]

        x_vals = np.linspace(-10, 10, 16, endpoint=False)

        for gradf, f in zip(grad_multi_funcs, multivariate_funcs):
            for jdx in range(len(x_vals[:-1])):
                adag_opt.reset()

                x_vec = x_vals[jdx : jdx + 2]
                x_onestep = adag_opt.step(f, x_vec)
                past_grads = gradf(x_vec)[0] * gradf(x_vec)[0]
                adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
                x_onestep_target = x_vec - gradf(x_vec)[0] * adapt_stepsize
                assert np.allclose(x_onestep, x_onestep_target, atol=tol)

                x_twosteps = adag_opt.step(f, x_onestep)
                past_grads = (
                    gradf(x_vec)[0] * gradf(x_vec)[0] + gradf(x_onestep)[0] * gradf(x_onestep)[0]
                )
                adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
                x_twosteps_target = x_onestep - gradf(x_onestep)[0] * adapt_stepsize
                assert np.allclose(x_twosteps, x_twosteps_target, atol=tol)
    def test_nesterovmomentum_optimizer_multivar(self, tol):
        """Tests that nesterov momentum optimizer takes one and two steps correctly
        for multivariate functions."""
        stepsize, gamma = 0.1, 0.5
        nesmom_opt = NesterovMomentumOptimizer(stepsize, momentum=gamma)

        multivariate_funcs = [
            lambda x: np.sin(x[0]) + np.cos(x[1]),
            lambda x: np.exp(x[0] / 3) * np.tanh(x[1]),
            lambda x: np.sum([x_ ** 2 for x_ in x]),
        ]
        grad_multi_funcs = [
            lambda x: (np.array([np.cos(x[0]), -np.sin(x[1])]),),
            lambda x: (
                np.array(
                    [
                        np.exp(x[0] / 3) / 3 * np.tanh(x[1]),
                        np.exp(x[0] / 3) * (1 - np.tanh(x[1]) ** 2),
                    ]
                ),
            ),
            lambda x: (np.array([2 * x_ for x_ in x]),),
        ]

        x_vals = np.linspace(-10, 10, 16, endpoint=False)

        for gradf, f in zip(grad_multi_funcs, multivariate_funcs):
            for jdx in range(len(x_vals[:-1])):
                nesmom_opt.reset()

                x_vec = x_vals[jdx : jdx + 2]
                x_onestep = nesmom_opt.step(f, x_vec)
                x_onestep_target = x_vec - gradf(x_vec)[0] * stepsize
                assert np.allclose(x_onestep, x_onestep_target, atol=tol)

                x_twosteps = nesmom_opt.step(f, x_onestep)
                momentum_term = gamma * gradf(x_vec)[0]
                shifted_grad_term = gradf(x_onestep - stepsize * momentum_term)[0]
                x_twosteps_target = x_onestep - (shifted_grad_term + momentum_term) * stepsize
                assert np.allclose(x_twosteps, x_twosteps_target, atol=tol)
    def test_gradient_descent_optimizer_multivar_multidim(self, tol):
        """Tests that basic stochastic gradient descent takes gradient-descent steps correctly
        for multivariate functions and with higher dimensional inputs."""
        stepsize = 0.1
        sgd_opt = GradientDescentOptimizer(stepsize)

        mvar_mdim_funcs = [
            lambda x: np.sin(x[0, 0]) + np.cos(x[1, 0]) - np.sin(x[0, 1]) + x[
                1, 1],
            lambda x: np.exp(x[0, 0] / 3) * np.tanh(x[0, 1]),
            lambda x: np.sum([x_[0]**2 for x_ in x]),
        ]
        grad_mvar_mdim_funcs = [
            lambda x: (np.array([[np.cos(x[0, 0]), -np.cos(x[0, 1])],
                                 [-np.sin(x[1, 0]), 1.0]]), ),
            lambda x: (np.array([
                [
                    np.exp(x[0, 0] / 3) / 3 * np.tanh(x[0, 1]),
                    np.exp(x[0, 0] / 3) * (1 - np.tanh(x[0, 1])**2),
                ],
                [0.0, 0.0],
            ]), ),
            lambda x: (np.array([[2 * x_[0], 0.0] for x_ in x]), ),
        ]

        x_vals = np.linspace(-10, 10, 16, endpoint=False)

        for gradf, f in zip(grad_mvar_mdim_funcs, mvar_mdim_funcs):
            for jdx in range(len(x_vals[:-3])):
                x_vec = x_vals[jdx:jdx + 4]
                x_vec_multidim = np.reshape(x_vec, (2, 2))
                x_new = sgd_opt.step(f, x_vec_multidim)
                x_correct = x_vec_multidim - gradf(
                    x_vec_multidim)[0] * stepsize
                x_new_flat = x_new.flatten()
                x_correct_flat = x_correct.flatten()
                assert np.allclose(x_new_flat, x_correct_flat, atol=tol)
示例#15
0
    def test_expectation(self):
        """Test that expectation values are calculated correctly"""
        self.logTestName()

        dev = qml.device('default.gaussian', wires=1, hbar=hbar)

        # test correct mean and variance for <n> of a displaced thermal state
        nbar = 0.5431
        alpha = 0.324 - 0.59j
        dev.apply('ThermalState', wires=[0], par=[nbar])
        dev.apply('Displacement', wires=[0], par=[alpha, 0])
        mean = dev.expval('MeanPhoton', [0], [])
        self.assertAlmostEqual(mean, np.abs(alpha)**2 + nbar, delta=self.tol)
        # self.assertAlmostEqual(var, nbar**2+nbar+np.abs(alpha)**2*(1+2*nbar), delta=self.tol)

        # test correct mean and variance for Homodyne P measurement
        alpha = 0.324 - 0.59j
        dev.apply('CoherentState', wires=[0], par=[alpha])
        mean = dev.expval('P', [0], [])
        self.assertAlmostEqual(mean,
                               alpha.imag * np.sqrt(2 * hbar),
                               delta=self.tol)
        # self.assertAlmostEqual(var, hbar/2, delta=self.tol)

        # test correct mean and variance for Homodyne measurement
        mean = dev.expval('Homodyne', [0], [np.pi / 2])
        self.assertAlmostEqual(mean,
                               alpha.imag * np.sqrt(2 * hbar),
                               delta=self.tol)
        # self.assertAlmostEqual(var, hbar/2, delta=self.tol)

        # test correct mean and variance for number state expectation |<n|alpha>|^2
        # on a coherent state
        for n in range(3):
            mean = dev.expval('NumberState', [0], [np.array([n])])
            expected = np.abs(
                np.exp(-np.abs(alpha)**2 / 2) * alpha**n / np.sqrt(fac(n)))**2
            self.assertAlmostEqual(mean, expected, delta=self.tol)

        # test correct mean and variance for number state expectation |<n|S(r)>|^2
        # on a squeezed state
        n = 1
        r = 0.4523
        dev.apply('SqueezedState', wires=[0], par=[r, 0])
        mean = dev.expval('NumberState', [0], [np.array([2 * n])])
        expected = np.abs(
            np.sqrt(fac(2 * n)) / (2**n * fac(n)) * (-np.tanh(r))**n /
            np.sqrt(np.cosh(r)))**2
        self.assertAlmostEqual(mean, expected, delta=self.tol)
示例#16
0
    def test_variance_squeezed_numberstate(self):
        """test correct variance for number state expectation |<n|S(r)>|^2
        on a squeezed state
        """
        self.logTestName()
        dev = qml.device('default.gaussian', wires=1, hbar=hbar)

        n = 1
        r = 0.4523
        dev.apply('SqueezedState', wires=[0], par=[r, 0])
        var = dev.var('FockStateProjector', [0], [np.array([2 * n])])
        mean = np.abs(
            np.sqrt(fac(2 * n)) / (2**n * fac(n)) * (-np.tanh(r))**n /
            np.sqrt(np.cosh(r)))**2
        self.assertAlmostEqual(var, mean * (1 - mean), delta=self.tol)
示例#17
0
    def setUp(self):
        self.sgd_opt = GradientDescentOptimizer(stepsize)
        self.mom_opt = MomentumOptimizer(stepsize, momentum=gamma)
        self.nesmom_opt = NesterovMomentumOptimizer(stepsize, momentum=gamma)
        self.adag_opt = AdagradOptimizer(stepsize)
        self.rms_opt = RMSPropOptimizer(stepsize, decay=gamma)
        self.adam_opt = AdamOptimizer(stepsize, beta1=gamma, beta2=delta)

        self.fnames = ['test_function_1', 'test_function_2', 'test_function_3']
        self.univariate_funcs = [
            np.sin, lambda x: np.exp(x / 10.), lambda x: x**2
        ]
        self.grad_uni_fns = [
            np.cos, lambda x: np.exp(x / 10.) / 10., lambda x: 2 * x
        ]
        self.multivariate_funcs = [
            lambda x: np.sin(x[0]) + np.cos(x[1]),
            lambda x: np.exp(x[0] / 3) * np.tanh(x[1]),
            lambda x: np.sum([x_**2 for x_ in x])
        ]
        self.grad_multi_funcs = [
            lambda x: np.array([np.cos(x[0]), -np.sin(x[1])]),
            lambda x: np.array([
                np.exp(x[0] / 3) / 3 * np.tanh(x[1]),
                np.exp(x[0] / 3) * (1 - np.tanh(x[1])**2)
            ]), lambda x: np.array([2 * x_ for x_ in x])
        ]
        self.mvar_mdim_funcs = [
            lambda x: np.sin(x[0, 0]) + np.cos(x[1, 0]) - np.sin(x[0, 1]) + x[
                1, 1], lambda x: np.exp(x[0, 0] / 3) * np.tanh(x[0, 1]),
            lambda x: np.sum([x_[0]**2 for x_ in x])
        ]
        self.grad_mvar_mdim_funcs = [
            lambda x: np.array([[np.cos(x[0, 0]), -np.cos(x[0, 1])],
                                [-np.sin(x[1, 0]), 1.]]),
            lambda x: np.array([[
                np.exp(x[0, 0] / 3) / 3 * np.tanh(x[0, 1]),
                np.exp(x[0, 0] / 3) * (1 - np.tanh(x[0, 1])**2)
            ], [0., 0.]]), lambda x: np.array([[2 * x_[0], 0.] for x_ in x])
        ]

        self.class_fun = class_fun
        self.quant_fun = quant_fun
        self.hybrid_fun = hybrid_fun
        self.hybrid_fun_nested = hybrid_fun_nested
        self.hybrid_fun_flat = hybrid_fun_flat
        self.hybrid_fun_mdarr = hybrid_fun_mdarr
        self.hybrid_fun_mdlist = hybrid_fun_mdlist

        self.mixed_list = [(0.2, 0.3), np.array([0.4, 0.2, 0.4]), 0.1]
        self.mixed_tuple = (np.array([0.2, 0.3]), [0.4, 0.2, 0.4], 0.1)
        self.nested_list = [[[0.2], 0.3], [0.1, [0.4]], -0.1]
        self.flat_list = [0.2, 0.3, 0.1, 0.4, -0.1]
        self.multid_array = np.array([[0.1, 0.2], [-0.1, -0.4]])
        self.multid_list = [[0.1, 0.2], [-0.1, -0.4]]
示例#18
0
    def test_finite_diff_squeezed(self, tol):
        """Test that the jacobian of the probability for a squeezed states is
        approximated well with finite differences"""
        cutoff = 5

        dev = qml.device("strawberryfields.fock", wires=1, cutoff_dim=cutoff)

        @qml.qnode(dev)
        def circuit(r, phi):
            qml.Squeezing(r, phi, wires=0)
            return qml.probs(wires=[0])

        r = 0.4
        phi = -0.12

        n = np.arange(cutoff)

        # construct tape
        circuit.construct([r, phi], {})

        # differentiate with respect to parameter a
        circuit.qtape.trainable_params = {0}
        tapes, fn = qml.gradients.finite_diff(circuit.qtape)
        res_F = fn(dev.batch_execute(tapes)).flatten()
        assert res_F.shape == (cutoff,)

        expected_gradient = (
            np.abs(np.tanh(r)) ** n
            * (1 + 2 * n - np.cosh(2 * r))
            * fac(n)
            / (2 ** (n + 1) * np.cosh(r) ** 2 * np.sinh(r) * fac(n / 2) ** 2)
        )
        expected_gradient[n % 2 != 0] = 0
        assert np.allclose(res_F, expected_gradient, atol=tol, rtol=0)

        # re-construct tape to reset trainable_params
        circuit.construct([r, phi], {})

        # differentiate with respect to parameter phi
        circuit.qtape.trainable_params = {1}

        tapes, fn = qml.gradients.finite_diff(circuit.qtape)
        res_F = fn(dev.batch_execute(tapes)).flatten()
        expected_gradient = 0
        assert np.allclose(res_F, expected_gradient, atol=tol, rtol=0)
nested_list = [[[0.2], 0.3], [0.1, [0.4]], -0.1]
flat_list = [0.2, 0.3, 0.1, 0.4, -0.1]
multid_array = np.array([[0.1, 0.2], [-0.1, -0.4]])
multid_list = [[0.1, 0.2], [-0.1, -0.4]]

# functions and their gradients
fnames = ["test_function_1", "test_function_2", "test_function_3"]
univariate_funcs = [np.sin, lambda x: np.exp(x / 10.0), lambda x: x**2]
grad_uni_fns = [
    lambda x: (np.cos(x), ), lambda x: (np.exp(x / 10.0) / 10.0, ), lambda x:
    (2 * x, )
]

multivariate_funcs = [
    lambda x: np.sin(x[0]) + np.cos(x[1]),
    lambda x: np.exp(x[0] / 3) * np.tanh(x[1]),
    lambda x: np.sum([x_**2 for x_ in x]),
]
grad_multi_funcs = [
    lambda x: (np.array([np.cos(x[0]), -np.sin(x[1])]), ),
    lambda x: (np.array([
        np.exp(x[0] / 3) / 3 * np.tanh(x[1]),
        np.exp(x[0] / 3) * (1 - np.tanh(x[1])**2)
    ]), ),
    lambda x: (np.array([2 * x_ for x_ in x]), ),
]

mvar_mdim_funcs = [
    lambda x: np.sin(x[0, 0]) + np.cos(x[1, 0]) - np.sin(x[0, 1]) + x[1, 1],
    lambda x: np.exp(x[0, 0] / 3) * np.tanh(x[0, 1]),
    lambda x: np.sum([x_[0]**2 for x_ in x]),
 def setUp(self):
     self.fnames = ['test_function_1', 'test_function_2', 'test_function_3']
     self.univariate_funcs = [
         np.sin, lambda x: np.exp(x / 10.), lambda x: x**2
     ]
     self.grad_uni_fns = [
         np.cos, lambda x: np.exp(x / 10.) / 10., lambda x: 2 * x
     ]
     self.multivariate_funcs = [
         lambda x: np.sin(x[0]) + np.cos(x[1]),
         lambda x: np.exp(x[0] / 3) * np.tanh(x[1]),
         lambda x: np.sum(x_**2 for x_ in x)
     ]
     self.grad_multi_funcs = [
         lambda x: np.array([np.cos(x[0]), -np.sin(x[1])]),
         lambda x: np.array([
             np.exp(x[0] / 3) / 3 * np.tanh(x[1]),
             np.exp(x[0] / 3) * (1 - np.tanh(x[1])**2)
         ]), lambda x: np.array([2 * x_ for x_ in x])
     ]
     self.mvar_mdim_funcs = [
         lambda x: np.sin(x[0, 0]) + np.cos(x[1, 0]),
         lambda x: np.exp(x[0, 0] / 3) * np.tanh(x[1, 0]),
         lambda x: np.sum(x_[0]**2 for x_ in x)
     ]
     self.grad_mvar_mdim_funcs = [
         lambda x: np.array([[np.cos(x[0, 0])], [-np.sin(x[[1]])]]),
         lambda x: np.array([[np.exp(x[0, 0] / 3) / 3 * np.tanh(x[
             1, 0])], [np.exp(x[0, 0] / 3) * (1 - np.tanh(x[1, 0])**2)]]),
         lambda x: np.array([[2 * x_[0]] for x_ in x])
     ]
     self.margs_fns = [
         lambda x, y: np.sin(x) + np.cos(y),
         lambda x, y: np.exp(x / 3) * np.tanh(y),
         lambda x, y: np.sum(x_**2 for x_ in [x, y])
     ]
     self.grad_margs_funcs = [
         lambda x, y: (np.cos(x), -np.sin(y)), lambda x, y:
         (np.exp(x / 3) / 3 * np.tanh(y), np.exp(x / 3) *
          (1 - np.tanh(y)**2)), lambda x, y: (2 * x, 2 * y)
     ]
     self.margs_mdim_fns = [
         lambda x, y: (np.sin(x), np.cos(y)), lambda x, y:
         (np.exp(x / 3) * np.tanh(y), np.sinh(x * y)), lambda x, y:
         (x**2 + y**2, x * y)
     ]
     self.grad_margs_mdim_funcs = [
         lambda x, y: np.diag([np.cos(x), -np.sin(y)]),
         lambda x, y: np.array([[
             np.exp(x / 3) / 3 * np.tanh(y),
             np.exp(x / 3) * np.sech(y)**2
         ], [np.cosh(x * y) * y, np.cosh(x * y) * x]]),
         lambda x, y: np.array([[2 * x, 2 * y], [y, x]])
     ]
示例#21
0
nested_list = [[[0.2], 0.3], [0.1, [0.4]], -0.1]
flat_list = [0.2, 0.3, 0.1, 0.4, -0.1]
multid_array = np.array([[0.1, 0.2], [-0.1, -0.4]])
multid_list = [[0.1, 0.2], [-0.1, -0.4]]


# functions and their gradients
fnames = ['test_function_1', 'test_function_2', 'test_function_3']
univariate_funcs = [np.sin,
                    lambda x: np.exp(x / 10.),
                    lambda x: x ** 2]
grad_uni_fns = [np.cos,
                lambda x: np.exp(x / 10.) / 10.,
                lambda x: 2 * x]
multivariate_funcs = [lambda x: np.sin(x[0]) + np.cos(x[1]),
                      lambda x: np.exp(x[0] / 3) * np.tanh(x[1]),
                      lambda x: np.sum([x_ ** 2 for x_ in x])]
grad_multi_funcs = [lambda x: np.array([np.cos(x[0]), -np.sin(x[1])]),
                    lambda x: np.array([np.exp(x[0] / 3) / 3 * np.tanh(x[1]),
                                        np.exp(x[0] / 3) * (1 - np.tanh(x[1]) ** 2)]),
                    lambda x: np.array([2 * x_ for x_ in x])]
mvar_mdim_funcs = [lambda x: np.sin(x[0, 0]) + np.cos(x[1, 0]) - np.sin(x[0, 1]) + x[1, 1],
                   lambda x: np.exp(x[0, 0] / 3) * np.tanh(x[0, 1]),
                   lambda x: np.sum([x_[0] ** 2 for x_ in x])]
grad_mvar_mdim_funcs = [lambda x: np.array([[np.cos(x[0, 0]), -np.cos(x[0, 1])],
                                            [-np.sin(x[1, 0]), 1.]]),
                        lambda x: np.array([[np.exp(x[0, 0] / 3) / 3 * np.tanh(x[0, 1]),
                                             np.exp(x[0, 0] / 3) * (1 - np.tanh(x[0, 1]) ** 2)],
                                            [0., 0.]]),
                        lambda x: np.array([[2 * x_[0], 0.] for x_ in x])]