Exemplo n.º 1
0
    def test_gradient_descent_optimizer_multivar(self, tol):
        """Tests that basic stochastic gradient descent takes gradient-descent steps correctly
        for multivariate functions."""
        stepsize = 0.1
        sgd_opt = GradientDescentOptimizer(stepsize)

        multivariate_funcs = [
            lambda x: np.sin(x[0]) + np.cos(x[1]),
            lambda x: np.exp(x[0] / 3) * np.tanh(x[1]),
            lambda x: np.sum([x_**2 for x_ in x]),
        ]
        grad_multi_funcs = [
            lambda x: (np.array([np.cos(x[0]), -np.sin(x[1])]), ),
            lambda x: (np.array([
                np.exp(x[0] / 3) / 3 * np.tanh(x[1]),
                np.exp(x[0] / 3) * (1 - np.tanh(x[1])**2),
            ]), ),
            lambda x: (np.array([2 * x_ for x_ in x]), ),
        ]

        x_vals = np.linspace(-10, 10, 16, endpoint=False)

        for gradf, f in zip(grad_multi_funcs, multivariate_funcs):
            for jdx in range(len(x_vals[:-1])):
                x_vec = x_vals[jdx:jdx + 2]
                x_new = sgd_opt.step(f, x_vec)
                x_correct = x_vec - gradf(x_vec)[0] * stepsize
                assert np.allclose(x_new, x_correct, atol=tol)
Exemplo n.º 2
0
    def setUp(self):
        self.sgd_opt = GradientDescentOptimizer(stepsize)
        self.mom_opt = MomentumOptimizer(stepsize, momentum=gamma)
        self.nesmom_opt = NesterovMomentumOptimizer(stepsize, momentum=gamma)
        self.adag_opt = AdagradOptimizer(stepsize)
        self.rms_opt = RMSPropOptimizer(stepsize, decay=gamma)
        self.adam_opt = AdamOptimizer(stepsize, beta1=gamma, beta2=delta)

        self.fnames = ['test_function_1', 'test_function_2', 'test_function_3']
        self.univariate_funcs = [
            np.sin, lambda x: np.exp(x / 10.), lambda x: x**2
        ]
        self.grad_uni_fns = [
            np.cos, lambda x: np.exp(x / 10.) / 10., lambda x: 2 * x
        ]
        self.multivariate_funcs = [
            lambda x: np.sin(x[0]) + np.cos(x[1]),
            lambda x: np.exp(x[0] / 3) * np.tanh(x[1]),
            lambda x: np.sum([x_**2 for x_ in x])
        ]
        self.grad_multi_funcs = [
            lambda x: np.array([np.cos(x[0]), -np.sin(x[1])]),
            lambda x: np.array([
                np.exp(x[0] / 3) / 3 * np.tanh(x[1]),
                np.exp(x[0] / 3) * (1 - np.tanh(x[1])**2)
            ]), lambda x: np.array([2 * x_ for x_ in x])
        ]
        self.mvar_mdim_funcs = [
            lambda x: np.sin(x[0, 0]) + np.cos(x[1, 0]) - np.sin(x[0, 1]) + x[
                1, 1], lambda x: np.exp(x[0, 0] / 3) * np.tanh(x[0, 1]),
            lambda x: np.sum([x_[0]**2 for x_ in x])
        ]
        self.grad_mvar_mdim_funcs = [
            lambda x: np.array([[np.cos(x[0, 0]), -np.cos(x[0, 1])],
                                [-np.sin(x[1, 0]), 1.]]),
            lambda x: np.array([[
                np.exp(x[0, 0] / 3) / 3 * np.tanh(x[0, 1]),
                np.exp(x[0, 0] / 3) * (1 - np.tanh(x[0, 1])**2)
            ], [0., 0.]]), lambda x: np.array([[2 * x_[0], 0.] for x_ in x])
        ]

        self.class_fun = class_fun
        self.quant_fun = quant_fun
        self.hybrid_fun = hybrid_fun
        self.hybrid_fun_nested = hybrid_fun_nested
        self.hybrid_fun_flat = hybrid_fun_flat
        self.hybrid_fun_mdarr = hybrid_fun_mdarr
        self.hybrid_fun_mdlist = hybrid_fun_mdlist

        self.mixed_list = [(0.2, 0.3), np.array([0.4, 0.2, 0.4]), 0.1]
        self.mixed_tuple = (np.array([0.2, 0.3]), [0.4, 0.2, 0.4], 0.1)
        self.nested_list = [[[0.2], 0.3], [0.1, [0.4]], -0.1]
        self.flat_list = [0.2, 0.3, 0.1, 0.4, -0.1]
        self.multid_array = np.array([[0.1, 0.2], [-0.1, -0.4]])
        self.multid_list = [[0.1, 0.2], [-0.1, -0.4]]
Exemplo n.º 3
0
    def test_apply_grad(self, grad, args, tol):
        """
        Test that a gradient step can be applied correctly to a set of parameters.
        """
        stepsize = 0.1
        sgd_opt = GradientDescentOptimizer(stepsize)
        grad, args = np.array(grad), np.array(args, requires_grad=True)

        res = sgd_opt.apply_grad(grad, args)
        expected = args - stepsize * grad
        assert np.allclose(res, expected, atol=tol)
Exemplo n.º 4
0
def getCost(var):
iimport pennylane as qml
from pennylane import numpy as np
from pennylane.optimize import GradientDescentOptimizer

dev = qml.device('default.qubit', wires=2)


def GetAnsatz():

    qml.Rot(0.3, 1.8, 5.4, wires=1)
    qml.RX(-0.5, wires=0)
    qml.RY( 0.5, wires=1)
    qml.CNOT(wires=[0, 1])


@qml.qnode(dev)
def getCircuitX():

    GetAnsatz()
    return qml.expval.PauliX(1)


@qml.qnode(dev)
def getCircuitY():

    GetAnsatz()
    return qml.expval.PauliY(1)


def getCost(var):


    expX = getCircuitX()
    expY = getCircuitY()

    return (var[0] * expX + var[1] * expY) ** 2


optimizer = GradientDescentOptimizer(0.5)


variables = [0.3, 2.5]
variables_gd = [variables]
num_iterations = 20
for i in range(num_iterations):
    variables = optimizer.step(getCost, variables)
    variables_gd.append(variables)

    print('Cost - step {:5d}: {: .7f} | Variable values: [{: .5f},{: .5f}]'
          .format(i+1, getCost(variables), variables[0], variables[1]) )
Exemplo n.º 5
0
    def test_step_and_cost_supplied_grad(self, args):
        """Test that returned cost is correct if gradient function is supplied"""
        stepsize = 0.1
        sgd_opt = GradientDescentOptimizer(stepsize)

        univariate_funcs = [np.sin, lambda x: np.exp(x / 10.0), lambda x: x**2]
        grad_uni_fns = [
            lambda x: (np.cos(x), ),
            lambda x: (np.exp(x / 10.0) / 10.0, ),
            lambda x: (2 * x, ),
        ]

        for gradf, f in zip(grad_uni_fns, univariate_funcs):
            _, res = sgd_opt.step_and_cost(f, args, grad_fn=gradf)
            expected = f(args)
            assert np.all(res == expected)
Exemplo n.º 6
0
class GradientOptimizer(object):
    def __init__(self, cost, step_size):
        self.cost = cost
        self.optimizer = GradientDescentOptimizer(step_size)

    def optimize(self, **kwargs):
        params = kwargs.get("params", None)
        steps = kwargs.get("steps", None)

        # Validate the cost function parameters
        if params is None:
            raise ValueError(
                "Excepted 'params' passed as a keyword argument to be used by the cost function."
            )
        if isinstance(params, list) == False:
            raise ValueError(
                "The 'params' keyword argument must be a list of parameters to be passed to the cost function."
            )

        # Make sure we were given the number of steps unti convergence
        if steps is None:
            raise ValueError(
                "Excepted 'steps' to be passed a keyword argument for the number of steps the optimizer is to take towards convergence."
            )

        # Run the optmizer for the given number of steps
        for i in range(steps):
            params = self.optimizer.step(self.cost, params)

        return self.cost(params)
Exemplo n.º 7
0
 class A:
     sgd_opt = GradientDescentOptimizer(stepsize)
     mom_opt = MomentumOptimizer(stepsize, momentum=gamma)
     nesmom_opt = NesterovMomentumOptimizer(stepsize, momentum=gamma)
     adag_opt = AdagradOptimizer(stepsize)
     rms_opt = RMSPropOptimizer(stepsize, decay=gamma)
     adam_opt = AdamOptimizer(stepsize, beta1=gamma, beta2=delta)
Exemplo n.º 8
0
    def test_gradient_descent_optimizer_usergrad(self, x_start, tol):
        """Tests that basic stochastic gradient descent takes gradient-descent steps correctly
        using user-provided gradients."""
        stepsize = 0.1
        sgd_opt = GradientDescentOptimizer(stepsize)

        univariate_funcs = [np.sin, lambda x: np.exp(x / 10.0), lambda x: x**2]
        grad_uni_fns = [
            lambda x: (np.cos(x), ),
            lambda x: (np.exp(x / 10.0) / 10.0, ),
            lambda x: (2 * x, ),
        ]

        for gradf, f in zip(grad_uni_fns[::-1], univariate_funcs):
            x_new = sgd_opt.step(f, x_start, grad_fn=gradf)
            x_correct = x_start - gradf(x_start)[0] * stepsize
            assert np.allclose(x_new, x_correct, atol=tol)
Exemplo n.º 9
0
    def test_step_and_cost_autograd_sgd_single_multid_input(self):
        """Test that the correct cost is returned via the step_and_cost method for the
        gradient-descent optimizer"""
        stepsize = 0.1
        sgd_opt = GradientDescentOptimizer(stepsize)
        multid_array = np.array([[0.1, 0.2], [-0.1, -0.4]])

        @qml.qnode(qml.device("default.qubit", wires=1))
        def quant_fun_mdarr(var):
            qml.RX(var[0, 1], wires=[0])
            qml.RY(var[1, 0], wires=[0])
            qml.RY(var[1, 1], wires=[0])
            return qml.expval(qml.PauliZ(0))

        _, res = sgd_opt.step_and_cost(quant_fun_mdarr, multid_array)
        expected = quant_fun_mdarr(multid_array)

        assert np.all(res == expected)
Exemplo n.º 10
0
    def test_step_and_cost_autograd_sgd_multiple_inputs(self):
        """Test that the correct cost is returned via the step_and_cost method for the
        gradient-descent optimizer"""
        stepsize = 0.1
        sgd_opt = GradientDescentOptimizer(stepsize)

        @qml.qnode(qml.device("default.qubit", wires=1))
        def quant_fun(*variables):
            qml.RX(variables[0][1], wires=[0])
            qml.RY(variables[1][2], wires=[0])
            qml.RY(variables[2], wires=[0])
            return qml.expval(qml.PauliZ(0))

        inputs = [
            np.array((0.2, 0.3), requires_grad=True),
            np.array([0.4, 0.2, 0.4], requires_grad=True),
            np.array(0.1, requires_grad=True),
        ]

        _, res = sgd_opt.step_and_cost(quant_fun, *inputs)
        expected = quant_fun(*inputs)

        assert np.all(res == expected)
Exemplo n.º 11
0
    def test_gradient_descent_optimizer_multivar_multidim(self, tol):
        """Tests that basic stochastic gradient descent takes gradient-descent steps correctly
        for multivariate functions and with higher dimensional inputs."""
        stepsize = 0.1
        sgd_opt = GradientDescentOptimizer(stepsize)

        mvar_mdim_funcs = [
            lambda x: np.sin(x[0, 0]) + np.cos(x[1, 0]) - np.sin(x[0, 1]) + x[
                1, 1],
            lambda x: np.exp(x[0, 0] / 3) * np.tanh(x[0, 1]),
            lambda x: np.sum([x_[0]**2 for x_ in x]),
        ]
        grad_mvar_mdim_funcs = [
            lambda x: (np.array([[np.cos(x[0, 0]), -np.cos(x[0, 1])],
                                 [-np.sin(x[1, 0]), 1.0]]), ),
            lambda x: (np.array([
                [
                    np.exp(x[0, 0] / 3) / 3 * np.tanh(x[0, 1]),
                    np.exp(x[0, 0] / 3) * (1 - np.tanh(x[0, 1])**2),
                ],
                [0.0, 0.0],
            ]), ),
            lambda x: (np.array([[2 * x_[0], 0.0] for x_ in x]), ),
        ]

        x_vals = np.linspace(-10, 10, 16, endpoint=False)

        for gradf, f in zip(grad_mvar_mdim_funcs, mvar_mdim_funcs):
            for jdx in range(len(x_vals[:-3])):
                x_vec = x_vals[jdx:jdx + 4]
                x_vec_multidim = np.reshape(x_vec, (2, 2))
                x_new = sgd_opt.step(f, x_vec_multidim)
                x_correct = x_vec_multidim - gradf(
                    x_vec_multidim)[0] * stepsize
                x_new_flat = x_new.flatten()
                x_correct_flat = x_correct.flatten()
                assert np.allclose(x_new_flat, x_correct_flat, atol=tol)
Exemplo n.º 12
0
    def test_private_stepsize(self):
        """
        Tests whether it is possible to get and set ``stepsize`` using ``_stepsize``
        and whether a ``UserWarning`` is raised while doing so.
        """
        eta = 0.5
        opt = GradientDescentOptimizer(eta)
        assert opt._stepsize == eta

        with pytest.warns(
                UserWarning,
                match=
                "'_stepsize' is deprecated. Please use 'stepsize' instead."):
            opt._stepsize

        eta2 = 0.1
        opt._stepsize = eta2
        assert opt.stepsize == eta2

        with pytest.warns(
                UserWarning,
                match=
                "'_stepsize' is deprecated. Please use 'stepsize' instead."):
            opt._stepsize = eta2
Exemplo n.º 13
0
def opt(opt_name):
    stepsize, gamma, delta = 0.1, 0.5, 0.8

    if opt_name == "gd":
        return GradientDescentOptimizer(stepsize)

    if opt_name == "nest":
        return NesterovMomentumOptimizer(stepsize, momentum=gamma)

    if opt_name == "moment":
        return MomentumOptimizer(stepsize, momentum=gamma)

    if opt_name == "ada":
        return AdagradOptimizer(stepsize)

    if opt_name == "rms":
        return RMSPropOptimizer(stepsize, decay=gamma)

    if opt_name == "adam":
        return AdamOptimizer(stepsize, beta1=gamma, beta2=delta)
Exemplo n.º 14
0
    def test_update_stepsize(self):
        """
        Tests whether the stepsize value is updated correctly and whether a ``UserWarning``
        is raised when the ``update_stepsize`` method is used.
        """

        eta = 0.5
        opt = GradientDescentOptimizer(eta)
        assert opt.stepsize == eta

        eta2 = 0.1
        opt.update_stepsize(eta2)
        assert opt.stepsize == eta2

        with pytest.warns(
                UserWarning,
                match=
                "'update_stepsize' is deprecated. Stepsize value can be updated using "
                "the 'stepsize' attribute.",
        ):
            opt.update_stepsize(eta)
Exemplo n.º 15
0
import pennylane as qml
from pennylane.optimize import GradientDescentOptimizer
# Create device
dev = qml.device('default.qubit', wires=1)


# Quantum node
@qml.qnode(dev)
def circuit1(var):
    qml.RX(var[0], wires=0)
    qml.RY(var[1], wires=0)
    return qml.expval(qml.PauliZ(0))


# Create optimizer
opt = GradientDescentOptimizer(0.25)
# Optimize circuit output
var = [0.5, 0.2]
for it in range(30):
    var = opt.step(circuit1, var)
    print("Step {}: cost: {}".format(it, circuit1(var)))
        assert opt._stepsize == eta

        eta2 = 0.1
        opt.update_stepsize(eta2)
        assert opt._stepsize == eta2


def reset(opt):
    if getattr(opt, "reset", None):
        opt.reset()


@pytest.mark.parametrize(
    "opt, opt_name",
    [
        (GradientDescentOptimizer(stepsize), "gd"),
        (MomentumOptimizer(stepsize, momentum=gamma), "moment"),
        (NesterovMomentumOptimizer(stepsize, momentum=gamma), "nest"),
        (AdagradOptimizer(stepsize), "ada"),
        (RMSPropOptimizer(stepsize, decay=gamma), "rms"),
        (AdamOptimizer(stepsize, beta1=gamma, beta2=delta), "adam"),
        (RotosolveOptimizer(), "roto"),
    ],
)
class TestOverOpts:
    """Tests keywords, multiple arguements, and non-training arguments in relevent optimizers"""
    def test_kwargs(self, mocker, opt, opt_name, tol):
        """Test that the keywords get passed and alter the function"""
        class func_wrapper:
            @staticmethod
            def func(x, c=1.0):
Exemplo n.º 17
0
        gen_weights: variables of the generator
    """
    return -prob_fake_true(gen_weights, disc_weights)


# Fix the angles with which the true data is generated
phi = np.pi / 6
theta = np.pi / 2
omega = np.pi / 7
# Initialize the other variables
np.random.seed(0)
eps = 1e-2
gen_weights = np.array([np.pi] + [0] * 8) + np.random.normal(scale=eps, size=[9])
disc_weights = np.random.normal(size=[9])

opt = GradientDescentOptimizer(0.1)

print("Training the discriminator.")
for it in range(50):
    disc_weights = opt.step(disc_cost, disc_weights) 
    cost = disc_cost(disc_weights)
    if it % 5 == 0:
        print("Step {}: cost = {}".format(it+1, cost))

print("Probability for the discriminator to classify real data correctly: ", prob_real_true(disc_weights))
print("Probability for the discriminator to classify fake data as real: ", prob_fake_true(gen_weights, disc_weights))

print("Training the generator.")
# train generator
for it in range(200):
    gen_weights = opt.step(gen_cost, gen_weights)
Exemplo n.º 18
0
#test it for [1,-1,-1] spin configuration. Total energy for this ising model should be

#H = -1(J1*s1*s2 + J2*s2*s3) = -1 (1*1*-1 + -1*-1*-1) = 2

#t1=np.array([0, np.pi, 0])
#t2=np.array([0,np.pi ,0])
t1 = np.pi * (np.random.ranf(3))
t2 = np.pi * (np.random.ranf(3))
var_init = np.array([t1, t2])
cost_init = cost(var_init)
print(var_init)
print(cost_init)

#Now we optimize using pennylane numpy gradient descent optimizer

gd = GradientDescentOptimizer(0.4)

var = var_init
var_gd = [var]
cost_gd = [cost_init]

for it in range(100):
    var = gd.step(cost, var)
    if (it + 1) % 5 == 0:
        var_gd.append(var)
        cost_gd.append(cost(var))
        print('Energy after step {:5d}: {: .7f} | Angles: {}'.format(
            it + 1, cost(var), var))

#As expected the minimum energy is -2  for the spin configuration [1,1,-1]
Exemplo n.º 19
0
 def __init__(self, cost, step_size):
     self.cost = cost
     self.optimizer = GradientDescentOptimizer(step_size)
Exemplo n.º 20
0
def objective(var):
    """Cost function to be minimized.

    Args:
        var (array[float]): array of variables

    Returns:
        float: output of variational circuit
    """
    return circuit(var)


var_init = np.array([-0.011, -0.012])

gd = GradientDescentOptimizer(0.4)

var = var_init
var_gd = [var]

for it in range(100):
    var = gd.step(objective, var)

    if (it + 1) % 5 == 0:
        var_gd.append(var)
        print('Objective after step {:5d}: {: .7f} | Angles: {}'.format(
            it + 1, objective(var), var))

ada = AdagradOptimizer(0.4)

var = var_init
Exemplo n.º 21
0
class BasicTest(BaseTest):
    """Basic optimizer tests.
    """
    def setUp(self):
        self.sgd_opt = GradientDescentOptimizer(stepsize)
        self.mom_opt = MomentumOptimizer(stepsize, momentum=gamma)
        self.nesmom_opt = NesterovMomentumOptimizer(stepsize, momentum=gamma)
        self.adag_opt = AdagradOptimizer(stepsize)
        self.rms_opt = RMSPropOptimizer(stepsize, decay=gamma)
        self.adam_opt = AdamOptimizer(stepsize, beta1=gamma, beta2=delta)

        self.fnames = ['test_function_1', 'test_function_2', 'test_function_3']
        self.univariate_funcs = [
            np.sin, lambda x: np.exp(x / 10.), lambda x: x**2
        ]
        self.grad_uni_fns = [
            np.cos, lambda x: np.exp(x / 10.) / 10., lambda x: 2 * x
        ]
        self.multivariate_funcs = [
            lambda x: np.sin(x[0]) + np.cos(x[1]),
            lambda x: np.exp(x[0] / 3) * np.tanh(x[1]),
            lambda x: np.sum([x_**2 for x_ in x])
        ]
        self.grad_multi_funcs = [
            lambda x: np.array([np.cos(x[0]), -np.sin(x[1])]),
            lambda x: np.array([
                np.exp(x[0] / 3) / 3 * np.tanh(x[1]),
                np.exp(x[0] / 3) * (1 - np.tanh(x[1])**2)
            ]), lambda x: np.array([2 * x_ for x_ in x])
        ]
        self.mvar_mdim_funcs = [
            lambda x: np.sin(x[0, 0]) + np.cos(x[1, 0]) - np.sin(x[0, 1]) + x[
                1, 1], lambda x: np.exp(x[0, 0] / 3) * np.tanh(x[0, 1]),
            lambda x: np.sum([x_[0]**2 for x_ in x])
        ]
        self.grad_mvar_mdim_funcs = [
            lambda x: np.array([[np.cos(x[0, 0]), -np.cos(x[0, 1])],
                                [-np.sin(x[1, 0]), 1.]]),
            lambda x: np.array([[
                np.exp(x[0, 0] / 3) / 3 * np.tanh(x[0, 1]),
                np.exp(x[0, 0] / 3) * (1 - np.tanh(x[0, 1])**2)
            ], [0., 0.]]), lambda x: np.array([[2 * x_[0], 0.] for x_ in x])
        ]

        self.class_fun = class_fun
        self.quant_fun = quant_fun
        self.hybrid_fun = hybrid_fun
        self.hybrid_fun_nested = hybrid_fun_nested
        self.hybrid_fun_flat = hybrid_fun_flat
        self.hybrid_fun_mdarr = hybrid_fun_mdarr
        self.hybrid_fun_mdlist = hybrid_fun_mdlist

        self.mixed_list = [(0.2, 0.3), np.array([0.4, 0.2, 0.4]), 0.1]
        self.mixed_tuple = (np.array([0.2, 0.3]), [0.4, 0.2, 0.4], 0.1)
        self.nested_list = [[[0.2], 0.3], [0.1, [0.4]], -0.1]
        self.flat_list = [0.2, 0.3, 0.1, 0.4, -0.1]
        self.multid_array = np.array([[0.1, 0.2], [-0.1, -0.4]])
        self.multid_list = [[0.1, 0.2], [-0.1, -0.4]]

    def test_mixed_inputs_for_hybrid_optimization(self):
        """Tests that gradient descent optimizer treats parameters of mixed types the same
        for hybrid optimization tasks."""
        self.logTestName()

        hybrid_list = self.sgd_opt.step(self.hybrid_fun, self.mixed_list)
        hybrid_tuple = self.sgd_opt.step(self.hybrid_fun, self.mixed_tuple)

        self.assertAllAlmostEqual(hybrid_list[0],
                                  hybrid_tuple[0],
                                  delta=self.tol)
        self.assertAllAlmostEqual(hybrid_list[1],
                                  hybrid_tuple[1],
                                  delta=self.tol)
        self.assertAllAlmostEqual(hybrid_list[2],
                                  hybrid_tuple[2],
                                  delta=self.tol)

    def test_mixed_inputs_for_classical_optimization(self):
        """Tests that gradient descent optimizer treats parameters of mixed types the same
        for purely classical optimization tasks."""
        self.logTestName()

        class_list = self.sgd_opt.step(self.class_fun, self.mixed_list)
        class_tuple = self.sgd_opt.step(self.class_fun, self.mixed_tuple)

        self.assertAllAlmostEqual(class_list[0],
                                  class_tuple[0],
                                  delta=self.tol)
        self.assertAllAlmostEqual(class_list[1],
                                  class_tuple[1],
                                  delta=self.tol)
        self.assertAllAlmostEqual(class_list[2],
                                  class_tuple[2],
                                  delta=self.tol)

    def test_mixed_inputs_for_quantum_optimization(self):
        """Tests that gradient descent optimizer treats parameters of mixed types the same
        for purely quantum optimization tasks."""
        self.logTestName()

        quant_list = self.sgd_opt.step(self.quant_fun, self.mixed_list)
        quant_tuple = self.sgd_opt.step(self.quant_fun, self.mixed_tuple)

        self.assertAllAlmostEqual(quant_list[0],
                                  quant_tuple[0],
                                  delta=self.tol)
        self.assertAllAlmostEqual(quant_list[1],
                                  quant_tuple[1],
                                  delta=self.tol)
        self.assertAllAlmostEqual(quant_list[2],
                                  quant_tuple[2],
                                  delta=self.tol)

    def test_nested_and_flat_returns_same_update(self):
        """Tests that gradient descent optimizer has the same output for
         nested and flat lists."""
        self.logTestName()

        nested = self.sgd_opt.step(self.hybrid_fun_nested, self.nested_list)
        flat = self.sgd_opt.step(self.hybrid_fun_flat, self.flat_list)

        self.assertAllAlmostEqual(flat, list(_flatten(nested)), delta=self.tol)

    def test_array_and_list_return_same_update(self):
        """Tests that gradient descent optimizer has the same output for
         lists and arrays."""
        self.logTestName()

        array = self.sgd_opt.step(self.hybrid_fun_mdarr, self.multid_array)
        list = self.sgd_opt.step(self.hybrid_fun_mdlist, self.multid_list)

        self.assertAllAlmostEqual(array, list, delta=self.tol)

    def test_gradient_descent_optimizer_univar(self):
        """Tests that basic stochastic gradient descent takes gradient-descent steps correctly
        for uni-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_uni_fns, self.univariate_funcs,
                                  self.fnames):
            with self.subTest(i=name):
                for x_start in x_vals:
                    x_new = self.sgd_opt.step(f, x_start)
                    x_correct = x_start - gradf(x_start) * stepsize
                    self.assertAlmostEqual(x_new, x_correct, delta=self.tol)

    def test_gradient_descent_optimizer_multivar(self):
        """Tests that basic stochastic gradient descent takes gradient-descent steps correctly
        for multi-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_multi_funcs,
                                  self.multivariate_funcs, self.fnames):
            with self.subTest(i=name):
                for jdx in range(len(x_vals[:-1])):
                    x_vec = x_vals[jdx:jdx + 2]
                    x_new = self.sgd_opt.step(f, x_vec)
                    x_correct = x_vec - gradf(x_vec) * stepsize
                    self.assertAllAlmostEqual(x_new, x_correct, delta=self.tol)

    def test_gradient_descent_optimizer_multivar_multidim(self):
        """Tests that basic stochastic gradient descent takes gradient-descent steps correctly
        for multi-variate functions and with higher dimensional inputs."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_mvar_mdim_funcs,
                                  self.mvar_mdim_funcs, self.fnames):
            with self.subTest(i=name):
                for jdx in range(len(x_vals[:-3])):
                    x_vec = x_vals[jdx:jdx + 4]
                    x_vec_multidim = np.reshape(x_vec, (2, 2))
                    x_new = self.sgd_opt.step(f, x_vec_multidim)
                    x_correct = x_vec_multidim - gradf(
                        x_vec_multidim) * stepsize
                    x_new_flat = x_new.flatten()
                    x_correct_flat = x_correct.flatten()
                    self.assertAllAlmostEqual(x_new_flat,
                                              x_correct_flat,
                                              delta=self.tol)

    def test_gradient_descent_optimizer_usergrad(self):
        """Tests that basic stochastic gradient descent takes gradient-descent steps correctly
        using user-provided gradients."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_uni_fns[::-1],
                                  self.univariate_funcs, self.fnames):
            with self.subTest(i=name):
                for x_start in x_vals:
                    x_new = self.sgd_opt.step(f, x_start, grad_fn=gradf)
                    x_correct = x_start - gradf(x_start) * stepsize
                    self.assertAlmostEqual(x_new, x_correct, delta=self.tol)

    def test_momentum_optimizer_univar(self):
        """Tests that momentum optimizer takes one and two steps correctly
        for uni-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_uni_fns, self.univariate_funcs,
                                  self.fnames):
            with self.subTest(i=name):
                for x_start in x_vals:
                    self.mom_opt.reset()

                    x_onestep = self.mom_opt.step(f, x_start)
                    x_onestep_target = x_start - gradf(x_start) * stepsize
                    self.assertAlmostEqual(x_onestep,
                                           x_onestep_target,
                                           delta=self.tol)

                    x_twosteps = self.mom_opt.step(f, x_onestep)
                    momentum_term = gamma * gradf(x_start)
                    x_twosteps_target = x_onestep - (gradf(x_onestep) +
                                                     momentum_term) * stepsize
                    self.assertAlmostEqual(x_twosteps,
                                           x_twosteps_target,
                                           delta=self.tol)

    def test_momentum_optimizer_multivar(self):
        """Tests that momentum optimizer takes one and two steps correctly
        for multi-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_multi_funcs,
                                  self.multivariate_funcs, self.fnames):
            with self.subTest(i=name):
                for jdx in range(len(x_vals[:-1])):
                    self.mom_opt.reset()

                    x_vec = x_vals[jdx:jdx + 2]
                    x_onestep = self.mom_opt.step(f, x_vec)
                    x_onestep_target = x_vec - gradf(x_vec) * stepsize
                    self.assertAllAlmostEqual(x_onestep,
                                              x_onestep_target,
                                              delta=self.tol)

                    x_twosteps = self.mom_opt.step(f, x_onestep)
                    momentum_term = gamma * gradf(x_vec)
                    x_twosteps_target = x_onestep - (gradf(x_onestep) +
                                                     momentum_term) * stepsize
                    self.assertAllAlmostEqual(x_twosteps,
                                              x_twosteps_target,
                                              delta=self.tol)

    def test_nesterovmomentum_optimizer_univar(self):
        """Tests that nesterov momentum optimizer takes one and two steps correctly
        for uni-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_uni_fns, self.univariate_funcs,
                                  self.fnames):
            with self.subTest(i=name):
                for x_start in x_vals:
                    self.nesmom_opt.reset()

                    x_onestep = self.nesmom_opt.step(f, x_start)
                    x_onestep_target = x_start - gradf(x_start) * stepsize
                    self.assertAlmostEqual(x_onestep,
                                           x_onestep_target,
                                           delta=self.tol)

                    x_twosteps = self.nesmom_opt.step(f, x_onestep)
                    momentum_term = gamma * gradf(x_start)
                    shifted_grad_term = gradf(x_onestep -
                                              stepsize * momentum_term)
                    x_twosteps_target = x_onestep - (shifted_grad_term +
                                                     momentum_term) * stepsize
                    self.assertAlmostEqual(x_twosteps,
                                           x_twosteps_target,
                                           delta=self.tol)

    def test_nesterovmomentum_optimizer_multivar(self):
        """Tests that nesterov momentum optimizer takes one and two steps correctly
        for multi-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_multi_funcs,
                                  self.multivariate_funcs, self.fnames):
            with self.subTest(i=name):
                for jdx in range(len(x_vals[:-1])):
                    self.nesmom_opt.reset()

                    x_vec = x_vals[jdx:jdx + 2]
                    x_onestep = self.nesmom_opt.step(f, x_vec)
                    x_onestep_target = x_vec - gradf(x_vec) * stepsize
                    self.assertAllAlmostEqual(x_onestep,
                                              x_onestep_target,
                                              delta=self.tol)

                    x_twosteps = self.nesmom_opt.step(f, x_onestep)
                    momentum_term = gamma * gradf(x_vec)
                    shifted_grad_term = gradf(x_onestep -
                                              stepsize * momentum_term)
                    x_twosteps_target = x_onestep - (shifted_grad_term +
                                                     momentum_term) * stepsize
                    self.assertAllAlmostEqual(x_twosteps,
                                              x_twosteps_target,
                                              delta=self.tol)

    def test_nesterovmomentum_optimizer_usergrad(self):
        """Tests that nesterov momentum optimizer takes gradient-descent steps correctly
        using user-provided gradients."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_uni_fns[::-1],
                                  self.univariate_funcs, self.fnames):
            with self.subTest(i=name):
                for x_start in x_vals:
                    self.nesmom_opt.reset()

                    x_onestep = self.nesmom_opt.step(f, x_start, grad_fn=gradf)
                    x_onestep_target = x_start - gradf(x_start) * stepsize
                    self.assertAlmostEqual(x_onestep,
                                           x_onestep_target,
                                           delta=self.tol)

                    x_twosteps = self.nesmom_opt.step(f,
                                                      x_onestep,
                                                      grad_fn=gradf)
                    momentum_term = gamma * gradf(x_start)
                    shifted_grad_term = gradf(x_onestep -
                                              stepsize * momentum_term)
                    x_twosteps_target = x_onestep - (shifted_grad_term +
                                                     momentum_term) * stepsize
                    self.assertAlmostEqual(x_twosteps,
                                           x_twosteps_target,
                                           delta=self.tol)

    def test_adagrad_optimizer_univar(self):
        """Tests that adagrad optimizer takes one and two steps correctly
        for uni-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_uni_fns, self.univariate_funcs,
                                  self.fnames):
            with self.subTest(i=name):
                for x_start in x_vals:
                    self.adag_opt.reset()

                    x_onestep = self.adag_opt.step(f, x_start)
                    past_grads = gradf(x_start) * gradf(x_start)
                    adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
                    x_onestep_target = x_start - gradf(
                        x_start) * adapt_stepsize
                    self.assertAlmostEqual(x_onestep,
                                           x_onestep_target,
                                           delta=self.tol)

                    x_twosteps = self.adag_opt.step(f, x_onestep)
                    past_grads = gradf(x_start) * gradf(x_start) + gradf(
                        x_onestep) * gradf(x_onestep)
                    adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
                    x_twosteps_target = x_onestep - gradf(
                        x_onestep) * adapt_stepsize
                    self.assertAlmostEqual(x_twosteps,
                                           x_twosteps_target,
                                           delta=self.tol)

    def test_adagrad_optimizer_multivar(self):
        """Tests that adagrad optimizer takes one and two steps correctly
        for multi-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_multi_funcs,
                                  self.multivariate_funcs, self.fnames):
            with self.subTest(i=name):
                for jdx in range(len(x_vals[:-1])):
                    self.adag_opt.reset()

                    x_vec = x_vals[jdx:jdx + 2]
                    x_onestep = self.adag_opt.step(f, x_vec)
                    past_grads = gradf(x_vec) * gradf(x_vec)
                    adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
                    x_onestep_target = x_vec - gradf(x_vec) * adapt_stepsize
                    self.assertAllAlmostEqual(x_onestep,
                                              x_onestep_target,
                                              delta=self.tol)

                    x_twosteps = self.adag_opt.step(f, x_onestep)
                    past_grads = gradf(x_vec) * gradf(x_vec) + gradf(
                        x_onestep) * gradf(x_onestep)
                    adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
                    x_twosteps_target = x_onestep - gradf(
                        x_onestep) * adapt_stepsize
                    self.assertAllAlmostEqual(x_twosteps,
                                              x_twosteps_target,
                                              delta=self.tol)

    def test_rmsprop_optimizer_univar(self):
        """Tests that rmsprop optimizer takes one and two steps correctly
        for uni-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_uni_fns, self.univariate_funcs,
                                  self.fnames):
            with self.subTest(i=name):
                for x_start in x_vals:
                    self.rms_opt.reset()

                    x_onestep = self.rms_opt.step(f, x_start)
                    past_grads = (1 - gamma) * gradf(x_start) * gradf(x_start)
                    adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
                    x_onestep_target = x_start - gradf(
                        x_start) * adapt_stepsize
                    self.assertAlmostEqual(x_onestep,
                                           x_onestep_target,
                                           delta=self.tol)

                    x_twosteps = self.rms_opt.step(f, x_onestep)
                    past_grads = (1 - gamma) * gamma * gradf(x_start)*gradf(x_start) \
                                 + (1 - gamma) * gradf(x_onestep)*gradf(x_onestep)
                    adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
                    x_twosteps_target = x_onestep - gradf(
                        x_onestep) * adapt_stepsize
                    self.assertAlmostEqual(x_twosteps,
                                           x_twosteps_target,
                                           delta=self.tol)

    def test_rmsprop_optimizer_multivar(self):
        """Tests that rmsprop optimizer takes one and two steps correctly
        for multi-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_multi_funcs,
                                  self.multivariate_funcs, self.fnames):
            with self.subTest(i=name):
                for jdx in range(len(x_vals[:-1])):
                    self.rms_opt.reset()

                    x_vec = x_vals[jdx:jdx + 2]
                    x_onestep = self.rms_opt.step(f, x_vec)
                    past_grads = (1 - gamma) * gradf(x_vec) * gradf(x_vec)
                    adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
                    x_onestep_target = x_vec - gradf(x_vec) * adapt_stepsize
                    self.assertAllAlmostEqual(x_onestep,
                                              x_onestep_target,
                                              delta=self.tol)

                    x_twosteps = self.rms_opt.step(f, x_onestep)
                    past_grads = (1 - gamma) * gamma * gradf(x_vec) * gradf(x_vec) \
                                 + (1 - gamma) * gradf(x_onestep) * gradf(x_onestep)
                    adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
                    x_twosteps_target = x_onestep - gradf(
                        x_onestep) * adapt_stepsize
                    self.assertAllAlmostEqual(x_twosteps,
                                              x_twosteps_target,
                                              delta=self.tol)

    def test_adam_optimizer_univar(self):
        """Tests that adam optimizer takes one and two steps correctly
        for uni-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_uni_fns, self.univariate_funcs,
                                  self.fnames):
            with self.subTest(i=name):
                for x_start in x_vals:
                    self.adam_opt.reset()

                    x_onestep = self.adam_opt.step(f, x_start)
                    adapted_stepsize = stepsize * np.sqrt(1 - delta) / (1 -
                                                                        gamma)
                    firstmoment = gradf(x_start)
                    secondmoment = gradf(x_start) * gradf(x_start)
                    x_onestep_target = x_start - adapted_stepsize * firstmoment / (
                        np.sqrt(secondmoment) + 1e-8)
                    self.assertAlmostEqual(x_onestep,
                                           x_onestep_target,
                                           delta=self.tol)

                    x_twosteps = self.adam_opt.step(f, x_onestep)
                    adapted_stepsize = stepsize * np.sqrt(1 - delta**2) / (
                        1 - gamma**2)
                    firstmoment = (gamma * gradf(x_start) +
                                   (1 - gamma) * gradf(x_onestep))
                    secondmoment = (
                        delta * gradf(x_start) * gradf(x_start) +
                        (1 - delta) * gradf(x_onestep) * gradf(x_onestep))
                    x_twosteps_target = x_onestep - adapted_stepsize * firstmoment / (
                        np.sqrt(secondmoment) + 1e-8)
                    self.assertAlmostEqual(x_twosteps,
                                           x_twosteps_target,
                                           delta=self.tol)

    def test_adam_optimizer_multivar(self):
        """Tests that adam optimizer takes one and two steps correctly
        for multi-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_multi_funcs,
                                  self.multivariate_funcs, self.fnames):
            with self.subTest(i=name):
                for jdx in range(len(x_vals[:-1])):
                    self.adam_opt.reset()

                    x_vec = x_vals[jdx:jdx + 2]
                    x_onestep = self.adam_opt.step(f, x_vec)
                    adapted_stepsize = stepsize * np.sqrt(1 - delta) / (1 -
                                                                        gamma)
                    firstmoment = gradf(x_vec)
                    secondmoment = gradf(x_vec) * gradf(x_vec)
                    x_onestep_target = x_vec - adapted_stepsize * firstmoment / (
                        np.sqrt(secondmoment) + 1e-8)
                    self.assertAllAlmostEqual(x_onestep,
                                              x_onestep_target,
                                              delta=self.tol)

                    x_twosteps = self.adam_opt.step(f, x_onestep)
                    adapted_stepsize = stepsize * np.sqrt(1 - delta**2) / (
                        1 - gamma**2)
                    firstmoment = (gamma * gradf(x_vec) +
                                   (1 - gamma) * gradf(x_onestep))
                    secondmoment = (
                        delta * gradf(x_vec) * gradf(x_vec) +
                        (1 - delta) * gradf(x_onestep) * gradf(x_onestep))
                    x_twosteps_target = x_onestep - adapted_stepsize * firstmoment / (
                        np.sqrt(secondmoment) + 1e-8)
                    self.assertAllAlmostEqual(x_twosteps,
                                              x_twosteps_target,
                                              delta=self.tol)
Exemplo n.º 22
0
    return qml.expval.MeanPhoton(0)


def objective(var):
    """Objective to minimize.

    Args:
        var (array[float]): array containing the variables

    Returns:
        output of the variational circuit
    """
    return circuit(var)


opt = GradientDescentOptimizer(stepsize=0.1)

var_init = np.array([0.01, 0.01])

var = var_init
var_gd = []

for iteration in range(100):
    var = opt.step(objective, var)
    var_gd.append(var)

    if iteration % 10 == 0:
        print('Cost after step {:3d}: {:0.7f} | Variables [{:0.7f}, {:0.7f}]'
              ''.format(iteration, objective(var), var[0], var[1]))
def cost(var):
    """Cost  function to be minimized.

    Args:
        var (list[float]): list of variables

    Returns:
        float: square of linear combination of the expectations
    """

    expX = circuit_X()
    expY = circuit_Y()

    return (var[0] * expX + var[1] * expY)**2


# optimizer
opt = GradientDescentOptimizer(0.5)

# minimize the cost
var = [0.3, 2.5]
var_gd = [var]
for it in range(20):
    var = opt.step(cost, var)
    var_gd.append(var)

    print(
        'Cost after step {:5d}: {: .7f} | Variables: [{: .5f},{: .5f}]'.format(
            it + 1, cost(var), var[0], var[1]))