def test_nesterovmomentum_optimizer_univar(self, x_start, tol):
        """Tests that nesterov momentum optimizer takes one and two steps correctly
        for univariate functions."""
        stepsize, gamma = 0.1, 0.5
        nesmom_opt = NesterovMomentumOptimizer(stepsize, momentum=gamma)

        univariate_funcs = [np.sin, lambda x: np.exp(x / 10.0), lambda x: x ** 2]
        grad_uni_fns = [
            lambda x: (np.cos(x),),
            lambda x: (np.exp(x / 10.0) / 10.0,),
            lambda x: (2 * x,),
        ]

        for gradf, f in zip(grad_uni_fns, univariate_funcs):
            nesmom_opt.reset()

            x_onestep = nesmom_opt.step(f, x_start)
            x_onestep_target = x_start - gradf(x_start)[0] * stepsize
            assert np.allclose(x_onestep, x_onestep_target, atol=tol)

            x_twosteps = nesmom_opt.step(f, x_onestep)
            momentum_term = gamma * gradf(x_start)[0]
            shifted_grad_term = gradf(x_onestep - stepsize * momentum_term)[0]
            x_twosteps_target = x_onestep - (shifted_grad_term + momentum_term) * stepsize
            assert np.allclose(x_twosteps, x_twosteps_target, atol=tol)
예제 #2
0
def optimize(feats_train, feats_val, Y_train, Y_val, features, Y):
    num_qubits = 2
    num_layers = 6
    var_init = (0.01 * np.random.randn(num_layers, num_qubits, 3), 0.0)

    opt = NesterovMomentumOptimizer(0.01)
    batch_size = 5

    # train the variational classifier
    var = var_init
    for it in range(60):

        # Update the weights by one optimizer step
        batch_index = np.random.randint(0, num_train, (batch_size, ))
        feats_train_batch = feats_train[batch_index]
        Y_train_batch = Y_train[batch_index]
        var = opt.step(lambda v: cost(v, feats_train_batch, Y_train_batch),
                       var)

        # Compute predictions on train and validation set
        predictions_train = [
            np.sign(variational_classifier(var, angles=f)) for f in feats_train
        ]
        predictions_val = [
            np.sign(variational_classifier(var, angles=f)) for f in feats_val
        ]

        # Compute accuracy on train and validation set
        acc_train = accuracy(Y_train, predictions_train)
        acc_val = accuracy(Y_val, predictions_val)

        print(
            "Iter: {:5d} | Cost: {:0.7f} | Acc train: {:0.7f} | Acc validation: {:0.7f} "
            "".format(it + 1, cost(var, features, Y), acc_train, acc_val))
    return var
    def test_nesterovmomentum_optimizer_multivar(self, tol):
        """Tests that nesterov momentum optimizer takes one and two steps correctly
        for multivariate functions."""
        stepsize, gamma = 0.1, 0.5
        nesmom_opt = NesterovMomentumOptimizer(stepsize, momentum=gamma)

        multivariate_funcs = [
            lambda x: np.sin(x[0]) + np.cos(x[1]),
            lambda x: np.exp(x[0] / 3) * np.tanh(x[1]),
            lambda x: np.sum([x_ ** 2 for x_ in x]),
        ]
        grad_multi_funcs = [
            lambda x: (np.array([np.cos(x[0]), -np.sin(x[1])]),),
            lambda x: (
                np.array(
                    [
                        np.exp(x[0] / 3) / 3 * np.tanh(x[1]),
                        np.exp(x[0] / 3) * (1 - np.tanh(x[1]) ** 2),
                    ]
                ),
            ),
            lambda x: (np.array([2 * x_ for x_ in x]),),
        ]

        x_vals = np.linspace(-10, 10, 16, endpoint=False)

        for gradf, f in zip(grad_multi_funcs, multivariate_funcs):
            for jdx in range(len(x_vals[:-1])):
                nesmom_opt.reset()

                x_vec = x_vals[jdx : jdx + 2]
                x_onestep = nesmom_opt.step(f, x_vec)
                x_onestep_target = x_vec - gradf(x_vec)[0] * stepsize
                assert np.allclose(x_onestep, x_onestep_target, atol=tol)

                x_twosteps = nesmom_opt.step(f, x_onestep)
                momentum_term = gamma * gradf(x_vec)[0]
                shifted_grad_term = gradf(x_onestep - stepsize * momentum_term)[0]
                x_twosteps_target = x_onestep - (shifted_grad_term + momentum_term) * stepsize
                assert np.allclose(x_twosteps, x_twosteps_target, atol=tol)
예제 #4
0
    def train_classifier(x_train, y_train, x_test, y_test, shots=50):
        from pennylane.optimize import NesterovMomentumOptimizer, AdamOptimizer

        old_shots = dev.shots
        dev.shots = shots

        num_train = len(x_train)
        num_layers = 4
        var_init = (qml.init.strong_ent_layers_uniform(num_layers, n_qubits,
                                                       3), 0.0)
        stepsize = 0.1
        opt = NesterovMomentumOptimizer(stepsize)
        batch_size = 5
        maxit = 20

        # train the variational classifier
        var = var_init
        for it in range(maxit):
            # Update the weights by one optimizer step
            batch_index = np.random.randint(0, num_train, (batch_size, ))
            x_train_batch = x_train[batch_index]
            y_train_batch = y_train[batch_index]
            var = opt.step(lambda v: cost(v, x_train_batch, y_train_batch),
                           var)

            # stepsize *= 0.95
            # opt.update_stepsize(stepsize)

            # Compute predictions on train and validation set
            predictions_train = [
                np.sign(variational_classifier(var, f)) for f in x_train
            ]
            acc_train = accuracy(y_train, predictions_train)

            if DEBUG:
                predictions_val = [
                    np.sign(variational_classifier(var, f)) for f in x_test
                ]
                acc_val = accuracy(y_test, predictions_val)

                print(
                    "Iter: {:5d} | Cost: {:0.7f} | Acc train: {:0.7f} | Acc validation: {:0.7f} "
                    "".format(it + 1, cost(var, x_train, y_train), acc_train,
                              acc_val))

            if acc_train > 0.95:
                break

            dev.shots = old_shots
        return var
def train_and_test(X_train, Y_train, X_test, Y_test):
    opt = NesterovMomentumOptimizer(0.01)
    batch_size = 5

    # train the variational classifier
    var = var_init

    test_accuracies = []
    train_accuracies = []
    costs = []
    for it in range(num_iterations):

        # Update the weights by one optimizer step
        batch_index = np.random.randint(0, num_train, (batch_size, ))
        X_train_batch = X_train[batch_index]
        Y_train_batch = Y_train[batch_index]
        var = opt.step(lambda v: cost(v, X_train_batch, Y_train_batch), var)

        # Compute predictions on train and validation set
        predictions_train = [np.sign(variational_classifier(var, f)) for f in X_train]
        predictions_test = [np.sign(variational_classifier(var, f)) for f in X_test]

        # Compute accuracy on train and validation set
        acc_train = accuracy(Y_train, predictions_train)
        acc_test = accuracy(Y_test, predictions_test)

        # Compute cost on all samples
        c = cost(var, X, Y)

        costs.append(c)
        test_accuracies.append(acc_test)
        train_accuracies.append(acc_train)

        print("Iter: {:5d} | Cost: {:0.7f} | Acc train: {:0.7f} | Acc validation: {:0.7f} "
              "".format(it+1, c, acc_train, acc_test))

    return train_accuracies, test_accuracies, costs, var
data = np.loadtxt("data/parity.txt")
X = data[:, :-1]
Y = data[:, -1]
Y = Y * 2 - np.ones(len(Y))  # shift label from {0, 1} to {-1, 1}

# initialize weight layers
np.random.seed(0)
num_qubits = 4
num_layers = 2
var_init = (0.01 * np.random.randn(num_layers, num_qubits, 3), 0.0)

# create optimizer
opt = NesterovMomentumOptimizer(0.5)
batch_size = 5

# train the variational classifier
var = var_init
for it in range(25):

    # Update the weights by one optimizer step
    batch_index = np.random.randint(0, len(X), (batch_size, ))
    X_batch = X[batch_index]
    Y_batch = Y[batch_index]
    var = opt.step(lambda v: cost(v, X_batch, Y_batch), var)

    # Compute accuracy
    predictions = [np.sign(variational_classifier(var, x=x)) for x in X]
    acc = accuracy(Y, predictions)

    print("Iter: {:5d} | Cost: {:0.7f} | Accuracy: {:0.7f} ".format(it + 1, cost(var, X, Y), acc))
batch_size = 5

##############################################################################
# …and train the optimizer. We track the accuracy - the share of correctly
# classified data samples. For this we compute the outputs of the
# variational classifier and turn them into predictions in
# :math:`\{-1,1\}` by taking the sign of the output.

var = var_init
for it in range(25):

    # Update the weights by one optimizer step
    batch_index = np.random.randint(0, len(X), (batch_size, ))
    X_batch = X[batch_index]
    Y_batch = Y[batch_index]
    var = opt.step(lambda v: cost(v, X_batch, Y_batch), var)

    # Compute accuracy
    predictions = [np.sign(variational_classifier(var, x=x)) for x in X]
    acc = accuracy(Y, predictions)

    print("Iter: {:5d} | Cost: {:0.7f} | Accuracy: {:0.7f} ".format(
        it + 1, cost(var, X, Y), acc))

##############################################################################
# 2. Iris classification
# ----------------------
#
# Quantum and classical nodes
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
예제 #8
0
class BasicTest(BaseTest):
    """Basic optimizer tests.
    """
    def setUp(self):
        self.sgd_opt = GradientDescentOptimizer(stepsize)
        self.mom_opt = MomentumOptimizer(stepsize, momentum=gamma)
        self.nesmom_opt = NesterovMomentumOptimizer(stepsize, momentum=gamma)
        self.adag_opt = AdagradOptimizer(stepsize)
        self.rms_opt = RMSPropOptimizer(stepsize, decay=gamma)
        self.adam_opt = AdamOptimizer(stepsize, beta1=gamma, beta2=delta)

        self.fnames = ['test_function_1', 'test_function_2', 'test_function_3']
        self.univariate_funcs = [
            np.sin, lambda x: np.exp(x / 10.), lambda x: x**2
        ]
        self.grad_uni_fns = [
            np.cos, lambda x: np.exp(x / 10.) / 10., lambda x: 2 * x
        ]
        self.multivariate_funcs = [
            lambda x: np.sin(x[0]) + np.cos(x[1]),
            lambda x: np.exp(x[0] / 3) * np.tanh(x[1]),
            lambda x: np.sum([x_**2 for x_ in x])
        ]
        self.grad_multi_funcs = [
            lambda x: np.array([np.cos(x[0]), -np.sin(x[1])]),
            lambda x: np.array([
                np.exp(x[0] / 3) / 3 * np.tanh(x[1]),
                np.exp(x[0] / 3) * (1 - np.tanh(x[1])**2)
            ]), lambda x: np.array([2 * x_ for x_ in x])
        ]
        self.mvar_mdim_funcs = [
            lambda x: np.sin(x[0, 0]) + np.cos(x[1, 0]) - np.sin(x[0, 1]) + x[
                1, 1], lambda x: np.exp(x[0, 0] / 3) * np.tanh(x[0, 1]),
            lambda x: np.sum([x_[0]**2 for x_ in x])
        ]
        self.grad_mvar_mdim_funcs = [
            lambda x: np.array([[np.cos(x[0, 0]), -np.cos(x[0, 1])],
                                [-np.sin(x[1, 0]), 1.]]),
            lambda x: np.array([[
                np.exp(x[0, 0] / 3) / 3 * np.tanh(x[0, 1]),
                np.exp(x[0, 0] / 3) * (1 - np.tanh(x[0, 1])**2)
            ], [0., 0.]]), lambda x: np.array([[2 * x_[0], 0.] for x_ in x])
        ]

        self.class_fun = class_fun
        self.quant_fun = quant_fun
        self.hybrid_fun = hybrid_fun
        self.hybrid_fun_nested = hybrid_fun_nested
        self.hybrid_fun_flat = hybrid_fun_flat
        self.hybrid_fun_mdarr = hybrid_fun_mdarr
        self.hybrid_fun_mdlist = hybrid_fun_mdlist

        self.mixed_list = [(0.2, 0.3), np.array([0.4, 0.2, 0.4]), 0.1]
        self.mixed_tuple = (np.array([0.2, 0.3]), [0.4, 0.2, 0.4], 0.1)
        self.nested_list = [[[0.2], 0.3], [0.1, [0.4]], -0.1]
        self.flat_list = [0.2, 0.3, 0.1, 0.4, -0.1]
        self.multid_array = np.array([[0.1, 0.2], [-0.1, -0.4]])
        self.multid_list = [[0.1, 0.2], [-0.1, -0.4]]

    def test_mixed_inputs_for_hybrid_optimization(self):
        """Tests that gradient descent optimizer treats parameters of mixed types the same
        for hybrid optimization tasks."""
        self.logTestName()

        hybrid_list = self.sgd_opt.step(self.hybrid_fun, self.mixed_list)
        hybrid_tuple = self.sgd_opt.step(self.hybrid_fun, self.mixed_tuple)

        self.assertAllAlmostEqual(hybrid_list[0],
                                  hybrid_tuple[0],
                                  delta=self.tol)
        self.assertAllAlmostEqual(hybrid_list[1],
                                  hybrid_tuple[1],
                                  delta=self.tol)
        self.assertAllAlmostEqual(hybrid_list[2],
                                  hybrid_tuple[2],
                                  delta=self.tol)

    def test_mixed_inputs_for_classical_optimization(self):
        """Tests that gradient descent optimizer treats parameters of mixed types the same
        for purely classical optimization tasks."""
        self.logTestName()

        class_list = self.sgd_opt.step(self.class_fun, self.mixed_list)
        class_tuple = self.sgd_opt.step(self.class_fun, self.mixed_tuple)

        self.assertAllAlmostEqual(class_list[0],
                                  class_tuple[0],
                                  delta=self.tol)
        self.assertAllAlmostEqual(class_list[1],
                                  class_tuple[1],
                                  delta=self.tol)
        self.assertAllAlmostEqual(class_list[2],
                                  class_tuple[2],
                                  delta=self.tol)

    def test_mixed_inputs_for_quantum_optimization(self):
        """Tests that gradient descent optimizer treats parameters of mixed types the same
        for purely quantum optimization tasks."""
        self.logTestName()

        quant_list = self.sgd_opt.step(self.quant_fun, self.mixed_list)
        quant_tuple = self.sgd_opt.step(self.quant_fun, self.mixed_tuple)

        self.assertAllAlmostEqual(quant_list[0],
                                  quant_tuple[0],
                                  delta=self.tol)
        self.assertAllAlmostEqual(quant_list[1],
                                  quant_tuple[1],
                                  delta=self.tol)
        self.assertAllAlmostEqual(quant_list[2],
                                  quant_tuple[2],
                                  delta=self.tol)

    def test_nested_and_flat_returns_same_update(self):
        """Tests that gradient descent optimizer has the same output for
         nested and flat lists."""
        self.logTestName()

        nested = self.sgd_opt.step(self.hybrid_fun_nested, self.nested_list)
        flat = self.sgd_opt.step(self.hybrid_fun_flat, self.flat_list)

        self.assertAllAlmostEqual(flat, list(_flatten(nested)), delta=self.tol)

    def test_array_and_list_return_same_update(self):
        """Tests that gradient descent optimizer has the same output for
         lists and arrays."""
        self.logTestName()

        array = self.sgd_opt.step(self.hybrid_fun_mdarr, self.multid_array)
        list = self.sgd_opt.step(self.hybrid_fun_mdlist, self.multid_list)

        self.assertAllAlmostEqual(array, list, delta=self.tol)

    def test_gradient_descent_optimizer_univar(self):
        """Tests that basic stochastic gradient descent takes gradient-descent steps correctly
        for uni-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_uni_fns, self.univariate_funcs,
                                  self.fnames):
            with self.subTest(i=name):
                for x_start in x_vals:
                    x_new = self.sgd_opt.step(f, x_start)
                    x_correct = x_start - gradf(x_start) * stepsize
                    self.assertAlmostEqual(x_new, x_correct, delta=self.tol)

    def test_gradient_descent_optimizer_multivar(self):
        """Tests that basic stochastic gradient descent takes gradient-descent steps correctly
        for multi-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_multi_funcs,
                                  self.multivariate_funcs, self.fnames):
            with self.subTest(i=name):
                for jdx in range(len(x_vals[:-1])):
                    x_vec = x_vals[jdx:jdx + 2]
                    x_new = self.sgd_opt.step(f, x_vec)
                    x_correct = x_vec - gradf(x_vec) * stepsize
                    self.assertAllAlmostEqual(x_new, x_correct, delta=self.tol)

    def test_gradient_descent_optimizer_multivar_multidim(self):
        """Tests that basic stochastic gradient descent takes gradient-descent steps correctly
        for multi-variate functions and with higher dimensional inputs."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_mvar_mdim_funcs,
                                  self.mvar_mdim_funcs, self.fnames):
            with self.subTest(i=name):
                for jdx in range(len(x_vals[:-3])):
                    x_vec = x_vals[jdx:jdx + 4]
                    x_vec_multidim = np.reshape(x_vec, (2, 2))
                    x_new = self.sgd_opt.step(f, x_vec_multidim)
                    x_correct = x_vec_multidim - gradf(
                        x_vec_multidim) * stepsize
                    x_new_flat = x_new.flatten()
                    x_correct_flat = x_correct.flatten()
                    self.assertAllAlmostEqual(x_new_flat,
                                              x_correct_flat,
                                              delta=self.tol)

    def test_gradient_descent_optimizer_usergrad(self):
        """Tests that basic stochastic gradient descent takes gradient-descent steps correctly
        using user-provided gradients."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_uni_fns[::-1],
                                  self.univariate_funcs, self.fnames):
            with self.subTest(i=name):
                for x_start in x_vals:
                    x_new = self.sgd_opt.step(f, x_start, grad_fn=gradf)
                    x_correct = x_start - gradf(x_start) * stepsize
                    self.assertAlmostEqual(x_new, x_correct, delta=self.tol)

    def test_momentum_optimizer_univar(self):
        """Tests that momentum optimizer takes one and two steps correctly
        for uni-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_uni_fns, self.univariate_funcs,
                                  self.fnames):
            with self.subTest(i=name):
                for x_start in x_vals:
                    self.mom_opt.reset()

                    x_onestep = self.mom_opt.step(f, x_start)
                    x_onestep_target = x_start - gradf(x_start) * stepsize
                    self.assertAlmostEqual(x_onestep,
                                           x_onestep_target,
                                           delta=self.tol)

                    x_twosteps = self.mom_opt.step(f, x_onestep)
                    momentum_term = gamma * gradf(x_start)
                    x_twosteps_target = x_onestep - (gradf(x_onestep) +
                                                     momentum_term) * stepsize
                    self.assertAlmostEqual(x_twosteps,
                                           x_twosteps_target,
                                           delta=self.tol)

    def test_momentum_optimizer_multivar(self):
        """Tests that momentum optimizer takes one and two steps correctly
        for multi-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_multi_funcs,
                                  self.multivariate_funcs, self.fnames):
            with self.subTest(i=name):
                for jdx in range(len(x_vals[:-1])):
                    self.mom_opt.reset()

                    x_vec = x_vals[jdx:jdx + 2]
                    x_onestep = self.mom_opt.step(f, x_vec)
                    x_onestep_target = x_vec - gradf(x_vec) * stepsize
                    self.assertAllAlmostEqual(x_onestep,
                                              x_onestep_target,
                                              delta=self.tol)

                    x_twosteps = self.mom_opt.step(f, x_onestep)
                    momentum_term = gamma * gradf(x_vec)
                    x_twosteps_target = x_onestep - (gradf(x_onestep) +
                                                     momentum_term) * stepsize
                    self.assertAllAlmostEqual(x_twosteps,
                                              x_twosteps_target,
                                              delta=self.tol)

    def test_nesterovmomentum_optimizer_univar(self):
        """Tests that nesterov momentum optimizer takes one and two steps correctly
        for uni-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_uni_fns, self.univariate_funcs,
                                  self.fnames):
            with self.subTest(i=name):
                for x_start in x_vals:
                    self.nesmom_opt.reset()

                    x_onestep = self.nesmom_opt.step(f, x_start)
                    x_onestep_target = x_start - gradf(x_start) * stepsize
                    self.assertAlmostEqual(x_onestep,
                                           x_onestep_target,
                                           delta=self.tol)

                    x_twosteps = self.nesmom_opt.step(f, x_onestep)
                    momentum_term = gamma * gradf(x_start)
                    shifted_grad_term = gradf(x_onestep -
                                              stepsize * momentum_term)
                    x_twosteps_target = x_onestep - (shifted_grad_term +
                                                     momentum_term) * stepsize
                    self.assertAlmostEqual(x_twosteps,
                                           x_twosteps_target,
                                           delta=self.tol)

    def test_nesterovmomentum_optimizer_multivar(self):
        """Tests that nesterov momentum optimizer takes one and two steps correctly
        for multi-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_multi_funcs,
                                  self.multivariate_funcs, self.fnames):
            with self.subTest(i=name):
                for jdx in range(len(x_vals[:-1])):
                    self.nesmom_opt.reset()

                    x_vec = x_vals[jdx:jdx + 2]
                    x_onestep = self.nesmom_opt.step(f, x_vec)
                    x_onestep_target = x_vec - gradf(x_vec) * stepsize
                    self.assertAllAlmostEqual(x_onestep,
                                              x_onestep_target,
                                              delta=self.tol)

                    x_twosteps = self.nesmom_opt.step(f, x_onestep)
                    momentum_term = gamma * gradf(x_vec)
                    shifted_grad_term = gradf(x_onestep -
                                              stepsize * momentum_term)
                    x_twosteps_target = x_onestep - (shifted_grad_term +
                                                     momentum_term) * stepsize
                    self.assertAllAlmostEqual(x_twosteps,
                                              x_twosteps_target,
                                              delta=self.tol)

    def test_nesterovmomentum_optimizer_usergrad(self):
        """Tests that nesterov momentum optimizer takes gradient-descent steps correctly
        using user-provided gradients."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_uni_fns[::-1],
                                  self.univariate_funcs, self.fnames):
            with self.subTest(i=name):
                for x_start in x_vals:
                    self.nesmom_opt.reset()

                    x_onestep = self.nesmom_opt.step(f, x_start, grad_fn=gradf)
                    x_onestep_target = x_start - gradf(x_start) * stepsize
                    self.assertAlmostEqual(x_onestep,
                                           x_onestep_target,
                                           delta=self.tol)

                    x_twosteps = self.nesmom_opt.step(f,
                                                      x_onestep,
                                                      grad_fn=gradf)
                    momentum_term = gamma * gradf(x_start)
                    shifted_grad_term = gradf(x_onestep -
                                              stepsize * momentum_term)
                    x_twosteps_target = x_onestep - (shifted_grad_term +
                                                     momentum_term) * stepsize
                    self.assertAlmostEqual(x_twosteps,
                                           x_twosteps_target,
                                           delta=self.tol)

    def test_adagrad_optimizer_univar(self):
        """Tests that adagrad optimizer takes one and two steps correctly
        for uni-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_uni_fns, self.univariate_funcs,
                                  self.fnames):
            with self.subTest(i=name):
                for x_start in x_vals:
                    self.adag_opt.reset()

                    x_onestep = self.adag_opt.step(f, x_start)
                    past_grads = gradf(x_start) * gradf(x_start)
                    adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
                    x_onestep_target = x_start - gradf(
                        x_start) * adapt_stepsize
                    self.assertAlmostEqual(x_onestep,
                                           x_onestep_target,
                                           delta=self.tol)

                    x_twosteps = self.adag_opt.step(f, x_onestep)
                    past_grads = gradf(x_start) * gradf(x_start) + gradf(
                        x_onestep) * gradf(x_onestep)
                    adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
                    x_twosteps_target = x_onestep - gradf(
                        x_onestep) * adapt_stepsize
                    self.assertAlmostEqual(x_twosteps,
                                           x_twosteps_target,
                                           delta=self.tol)

    def test_adagrad_optimizer_multivar(self):
        """Tests that adagrad optimizer takes one and two steps correctly
        for multi-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_multi_funcs,
                                  self.multivariate_funcs, self.fnames):
            with self.subTest(i=name):
                for jdx in range(len(x_vals[:-1])):
                    self.adag_opt.reset()

                    x_vec = x_vals[jdx:jdx + 2]
                    x_onestep = self.adag_opt.step(f, x_vec)
                    past_grads = gradf(x_vec) * gradf(x_vec)
                    adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
                    x_onestep_target = x_vec - gradf(x_vec) * adapt_stepsize
                    self.assertAllAlmostEqual(x_onestep,
                                              x_onestep_target,
                                              delta=self.tol)

                    x_twosteps = self.adag_opt.step(f, x_onestep)
                    past_grads = gradf(x_vec) * gradf(x_vec) + gradf(
                        x_onestep) * gradf(x_onestep)
                    adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
                    x_twosteps_target = x_onestep - gradf(
                        x_onestep) * adapt_stepsize
                    self.assertAllAlmostEqual(x_twosteps,
                                              x_twosteps_target,
                                              delta=self.tol)

    def test_rmsprop_optimizer_univar(self):
        """Tests that rmsprop optimizer takes one and two steps correctly
        for uni-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_uni_fns, self.univariate_funcs,
                                  self.fnames):
            with self.subTest(i=name):
                for x_start in x_vals:
                    self.rms_opt.reset()

                    x_onestep = self.rms_opt.step(f, x_start)
                    past_grads = (1 - gamma) * gradf(x_start) * gradf(x_start)
                    adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
                    x_onestep_target = x_start - gradf(
                        x_start) * adapt_stepsize
                    self.assertAlmostEqual(x_onestep,
                                           x_onestep_target,
                                           delta=self.tol)

                    x_twosteps = self.rms_opt.step(f, x_onestep)
                    past_grads = (1 - gamma) * gamma * gradf(x_start)*gradf(x_start) \
                                 + (1 - gamma) * gradf(x_onestep)*gradf(x_onestep)
                    adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
                    x_twosteps_target = x_onestep - gradf(
                        x_onestep) * adapt_stepsize
                    self.assertAlmostEqual(x_twosteps,
                                           x_twosteps_target,
                                           delta=self.tol)

    def test_rmsprop_optimizer_multivar(self):
        """Tests that rmsprop optimizer takes one and two steps correctly
        for multi-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_multi_funcs,
                                  self.multivariate_funcs, self.fnames):
            with self.subTest(i=name):
                for jdx in range(len(x_vals[:-1])):
                    self.rms_opt.reset()

                    x_vec = x_vals[jdx:jdx + 2]
                    x_onestep = self.rms_opt.step(f, x_vec)
                    past_grads = (1 - gamma) * gradf(x_vec) * gradf(x_vec)
                    adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
                    x_onestep_target = x_vec - gradf(x_vec) * adapt_stepsize
                    self.assertAllAlmostEqual(x_onestep,
                                              x_onestep_target,
                                              delta=self.tol)

                    x_twosteps = self.rms_opt.step(f, x_onestep)
                    past_grads = (1 - gamma) * gamma * gradf(x_vec) * gradf(x_vec) \
                                 + (1 - gamma) * gradf(x_onestep) * gradf(x_onestep)
                    adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
                    x_twosteps_target = x_onestep - gradf(
                        x_onestep) * adapt_stepsize
                    self.assertAllAlmostEqual(x_twosteps,
                                              x_twosteps_target,
                                              delta=self.tol)

    def test_adam_optimizer_univar(self):
        """Tests that adam optimizer takes one and two steps correctly
        for uni-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_uni_fns, self.univariate_funcs,
                                  self.fnames):
            with self.subTest(i=name):
                for x_start in x_vals:
                    self.adam_opt.reset()

                    x_onestep = self.adam_opt.step(f, x_start)
                    adapted_stepsize = stepsize * np.sqrt(1 - delta) / (1 -
                                                                        gamma)
                    firstmoment = gradf(x_start)
                    secondmoment = gradf(x_start) * gradf(x_start)
                    x_onestep_target = x_start - adapted_stepsize * firstmoment / (
                        np.sqrt(secondmoment) + 1e-8)
                    self.assertAlmostEqual(x_onestep,
                                           x_onestep_target,
                                           delta=self.tol)

                    x_twosteps = self.adam_opt.step(f, x_onestep)
                    adapted_stepsize = stepsize * np.sqrt(1 - delta**2) / (
                        1 - gamma**2)
                    firstmoment = (gamma * gradf(x_start) +
                                   (1 - gamma) * gradf(x_onestep))
                    secondmoment = (
                        delta * gradf(x_start) * gradf(x_start) +
                        (1 - delta) * gradf(x_onestep) * gradf(x_onestep))
                    x_twosteps_target = x_onestep - adapted_stepsize * firstmoment / (
                        np.sqrt(secondmoment) + 1e-8)
                    self.assertAlmostEqual(x_twosteps,
                                           x_twosteps_target,
                                           delta=self.tol)

    def test_adam_optimizer_multivar(self):
        """Tests that adam optimizer takes one and two steps correctly
        for multi-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_multi_funcs,
                                  self.multivariate_funcs, self.fnames):
            with self.subTest(i=name):
                for jdx in range(len(x_vals[:-1])):
                    self.adam_opt.reset()

                    x_vec = x_vals[jdx:jdx + 2]
                    x_onestep = self.adam_opt.step(f, x_vec)
                    adapted_stepsize = stepsize * np.sqrt(1 - delta) / (1 -
                                                                        gamma)
                    firstmoment = gradf(x_vec)
                    secondmoment = gradf(x_vec) * gradf(x_vec)
                    x_onestep_target = x_vec - adapted_stepsize * firstmoment / (
                        np.sqrt(secondmoment) + 1e-8)
                    self.assertAllAlmostEqual(x_onestep,
                                              x_onestep_target,
                                              delta=self.tol)

                    x_twosteps = self.adam_opt.step(f, x_onestep)
                    adapted_stepsize = stepsize * np.sqrt(1 - delta**2) / (
                        1 - gamma**2)
                    firstmoment = (gamma * gradf(x_vec) +
                                   (1 - gamma) * gradf(x_onestep))
                    secondmoment = (
                        delta * gradf(x_vec) * gradf(x_vec) +
                        (1 - delta) * gradf(x_onestep) * gradf(x_onestep))
                    x_twosteps_target = x_onestep - adapted_stepsize * firstmoment / (
                        np.sqrt(secondmoment) + 1e-8)
                    self.assertAllAlmostEqual(x_twosteps,
                                              x_twosteps_target,
                                              delta=self.tol)
예제 #9
0
theta = strong_ent_layers_uniform(n_layers, n_qubits, seed=15)

# train the variational classifier

# start of main learning loop
# build the optimizer object
pennylane_opt = NesterovMomentumOptimizer()

log = []
# split training data into batches
X_batches = np.array_split(np.arange(len(X_train)), batches)
for it, batch_index in enumerate(chain(*(n_epochs * [X_batches]))):
    # Update the weights by one optimizer step
    batch_cost = \
        lambda t: cost(t, X_train[batch_index], e_train[batch_index])
    theta = pennylane_opt.step(batch_cost, theta)
    log.append({"theta": theta})
# end of learning loop

# convert scores to classes
scores = np.array([circuit(theta, x=x) for x in X_test])
y_pred = sgn(scores)

print(metrics.accuracy_score(y_test, y_pred))
print(metrics.confusion_matrix(y_test, y_pred))


def plot(X, y, log, name="", density=23):
    import matplotlib.pyplot as plt
    from matplotlib.backends.backend_pdf import PdfPages