Пример #1
0
    def test_apply_grad(self, grad, args, tol):
        """
        Test that the gradient can be applied correctly to a set of parameters
        and that accumulation works correctly.
        """
        stepsize, gamma, delta, eps = 0.1, 0.5, 0.8, 1e-8
        sgd_opt = AdamOptimizer(stepsize, beta1=gamma, beta2=delta, eps=eps)
        grad, args = np.array(grad), np.array(args, requires_grad=True)

        a1 = (1 - gamma) * grad
        b1 = (1 - delta) * grad ** 2
        a1_corrected = a1 / (1 - gamma)
        b1_corrected = b1 / (1 - delta)
        expected = args - stepsize * a1_corrected / (np.sqrt(b1_corrected) + eps)
        res = sgd_opt.apply_grad(grad, args)
        assert np.allclose(res, expected, atol=tol)

        # Simulate a new step
        grad = grad + args
        args = expected

        a2 = gamma * a1 + (1 - gamma) * grad
        b2 = delta * b1 + (1 - delta) * grad ** 2
        a2_corrected = a2 / (1 - gamma ** 2)
        b2_corrected = b2 / (1 - delta ** 2)
        expected = args - stepsize * a2_corrected / (np.sqrt(b2_corrected) + eps)
        res = sgd_opt.apply_grad(grad, args)
        assert np.allclose(res, expected, atol=tol)
Пример #2
0
    def train(X=[], Y=[], iterations=25, num_of_layers=NUM_OF_LAYERS):
        opt = AdamOptimizer(0.01, beta1=0.9, beta2=0.999)

        var = weight_init(num_of_layers)
        for it in range(iterations):
            var = opt.step(lambda v: cost(v, X, Y), var)
            # print("Iter: {:5d} | Cost: {:0.7f} ".format(it + 1, cost(var, X, Y)))
        return var
Пример #3
0
 def train(self, iterations=25):
     opt = AdamOptimizer(0.01, beta1=0.9, beta2=0.999)
     var_init = 0.05 * np.random.randn(
         self.num_of_layers,
         5)  # initialise network weights at a Normal Distribution
     self.weights = var_init  # initialise network weights at a Normal Distribution
     for it in range(iterations):
         self.weights = opt.step(lambda v: self.cost(v, X, Y), self.weights)
         print("Iter: {:5d} | Cost: {:0.7f} ".format(
             it + 1, self.cost(self.weights, X, Y)))
    def test_update_stepsize(self):
        """Tests that the stepsize correctly updates"""

        eta = 0.5
        opt = AdamOptimizer(eta)
        assert opt._stepsize == eta

        eta2 = 0.1
        opt.update_stepsize(eta2)
        assert opt._stepsize == eta2
Пример #5
0
    def setUp(self):
        self.sgd_opt = GradientDescentOptimizer(stepsize)
        self.mom_opt = MomentumOptimizer(stepsize, momentum=gamma)
        self.nesmom_opt = NesterovMomentumOptimizer(stepsize, momentum=gamma)
        self.adag_opt = AdagradOptimizer(stepsize)
        self.rms_opt = RMSPropOptimizer(stepsize, decay=gamma)
        self.adam_opt = AdamOptimizer(stepsize, beta1=gamma, beta2=delta)

        self.fnames = ['test_function_1', 'test_function_2', 'test_function_3']
        self.univariate_funcs = [
            np.sin, lambda x: np.exp(x / 10.), lambda x: x**2
        ]
        self.grad_uni_fns = [
            np.cos, lambda x: np.exp(x / 10.) / 10., lambda x: 2 * x
        ]
        self.multivariate_funcs = [
            lambda x: np.sin(x[0]) + np.cos(x[1]),
            lambda x: np.exp(x[0] / 3) * np.tanh(x[1]),
            lambda x: np.sum([x_**2 for x_ in x])
        ]
        self.grad_multi_funcs = [
            lambda x: np.array([np.cos(x[0]), -np.sin(x[1])]),
            lambda x: np.array([
                np.exp(x[0] / 3) / 3 * np.tanh(x[1]),
                np.exp(x[0] / 3) * (1 - np.tanh(x[1])**2)
            ]), lambda x: np.array([2 * x_ for x_ in x])
        ]
        self.mvar_mdim_funcs = [
            lambda x: np.sin(x[0, 0]) + np.cos(x[1, 0]) - np.sin(x[0, 1]) + x[
                1, 1], lambda x: np.exp(x[0, 0] / 3) * np.tanh(x[0, 1]),
            lambda x: np.sum([x_[0]**2 for x_ in x])
        ]
        self.grad_mvar_mdim_funcs = [
            lambda x: np.array([[np.cos(x[0, 0]), -np.cos(x[0, 1])],
                                [-np.sin(x[1, 0]), 1.]]),
            lambda x: np.array([[
                np.exp(x[0, 0] / 3) / 3 * np.tanh(x[0, 1]),
                np.exp(x[0, 0] / 3) * (1 - np.tanh(x[0, 1])**2)
            ], [0., 0.]]), lambda x: np.array([[2 * x_[0], 0.] for x_ in x])
        ]

        self.class_fun = class_fun
        self.quant_fun = quant_fun
        self.hybrid_fun = hybrid_fun
        self.hybrid_fun_nested = hybrid_fun_nested
        self.hybrid_fun_flat = hybrid_fun_flat
        self.hybrid_fun_mdarr = hybrid_fun_mdarr
        self.hybrid_fun_mdlist = hybrid_fun_mdlist

        self.mixed_list = [(0.2, 0.3), np.array([0.4, 0.2, 0.4]), 0.1]
        self.mixed_tuple = (np.array([0.2, 0.3]), [0.4, 0.2, 0.4], 0.1)
        self.nested_list = [[[0.2], 0.3], [0.1, [0.4]], -0.1]
        self.flat_list = [0.2, 0.3, 0.1, 0.4, -0.1]
        self.multid_array = np.array([[0.1, 0.2], [-0.1, -0.4]])
        self.multid_list = [[0.1, 0.2], [-0.1, -0.4]]
Пример #6
0
    def test_update_stepsize(self):
        """Tests that the stepsize correctly updates"""
        self.logTestName()

        eta = 0.5
        opt = AdamOptimizer(eta)
        self.assertAlmostEqual(opt._stepsize, eta)

        eta2 = 0.1
        opt.update_stepsize(eta2)
        self.assertAlmostEqual(opt._stepsize, eta2)
Пример #7
0
 class A:
     sgd_opt = GradientDescentOptimizer(stepsize)
     mom_opt = MomentumOptimizer(stepsize, momentum=gamma)
     nesmom_opt = NesterovMomentumOptimizer(stepsize, momentum=gamma)
     adag_opt = AdagradOptimizer(stepsize)
     rms_opt = RMSPropOptimizer(stepsize, decay=gamma)
     adam_opt = AdamOptimizer(stepsize, beta1=gamma, beta2=delta)
Пример #8
0
    def test_adam_optimizer_properties(self):
        """Test the adam property interfaces"""
        stepsize, gamma, delta = 0.1, 0.5, 0.8
        adam_opt = AdamOptimizer(stepsize, beta1=gamma, beta2=delta)

        # check if None is returned when accumulation is empty
        assert adam_opt.fm == None
        assert adam_opt.sm == None
        assert adam_opt.t == None

        # Do some calculations to fill accumulation
        adam_opt.step(np.sin, np.random.rand(1))

        # Check the properties return the same values, stored in accumulation
        assert adam_opt.fm == adam_opt.accumulation["fm"]
        assert adam_opt.sm == adam_opt.accumulation["sm"]
        assert adam_opt.t == adam_opt.accumulation["t"]
Пример #9
0
    def test_adam_optimizer_univar(self, x_start, tol):
        """Tests that adam optimizer takes one and two steps correctly
        for univariate functions."""
        stepsize, gamma, delta = 0.1, 0.5, 0.8
        adam_opt = AdamOptimizer(stepsize, beta1=gamma, beta2=delta)

        univariate_funcs = [np.sin, lambda x: np.exp(x / 10.0), lambda x: x ** 2]
        grad_uni_fns = [
            lambda x: (np.cos(x),),
            lambda x: (np.exp(x / 10.0) / 10.0,),
            lambda x: (2 * x,),
        ]

        for gradf, f in zip(grad_uni_fns, univariate_funcs):
            adam_opt.reset()

            x_onestep = adam_opt.step(f, x_start)
            adapted_stepsize = stepsize * np.sqrt(1 - delta) / (1 - gamma)
            firstmoment = (1 - gamma) * gradf(x_start)[0]
            secondmoment = (1 - delta) * gradf(x_start)[0] * gradf(x_start)[0]
            x_onestep_target = x_start - adapted_stepsize * firstmoment / (
                np.sqrt(secondmoment) + 1e-8
            )
            assert np.allclose(x_onestep, x_onestep_target, atol=tol)

            x_twosteps = adam_opt.step(f, x_onestep)
            adapted_stepsize = stepsize * np.sqrt(1 - delta ** 2) / (1 - gamma ** 2)
            firstmoment = gamma * firstmoment + (1 - gamma) * gradf(x_onestep)[0]
            secondmoment = (
                delta * secondmoment + (1 - delta) * gradf(x_onestep)[0] * gradf(x_onestep)[0]
            )
            x_twosteps_target = x_onestep - adapted_stepsize * firstmoment / (
                np.sqrt(secondmoment) + 1e-8
            )
            assert np.allclose(x_twosteps, x_twosteps_target, atol=tol)
Пример #10
0
    def test_private_stepsize(self):
        """
        Tests whether it is possible to get and set ``stepsize`` using ``_stepsize``
        and whether a ``UserWarning`` is raised while doing so.
        """
        eta = 0.5
        opt = AdamOptimizer(eta)
        assert opt._stepsize == eta

        with pytest.warns(
            UserWarning, match="'_stepsize' is deprecated. Please use 'stepsize' instead."
        ):
            opt._stepsize

        eta2 = 0.1
        opt._stepsize = eta2
        assert opt.stepsize == eta2

        with pytest.warns(
            UserWarning, match="'_stepsize' is deprecated. Please use 'stepsize' instead."
        ):
            opt._stepsize = eta2
def optimize_circuit(params):
    """Minimize the variational circuit and return its minimum value.

    The code you write for this challenge should be completely contained within this function
    between the # QHACK # comment markers. You should create a device and convert the
    variational_circuit function into an executable QNode. Next, you should minimize the variational
    circuit using gradient-based optimization to update the input params. Return the optimized value
    of the QNode as a single floating-point number.

    Args:
        params (np.ndarray): Input parameters to be optimized, of dimension 30

    Returns:
        float: the value of the optimized QNode
    """

    optimal_value = 0.0

    # QHACK #

    # Initialize the device
    # dev = ...
    dev = qml.device('default.qubit', wires=2)

    # Instantiate the QNode
    # circuit = qml.QNode(variational_circuit, dev)
    circuit = qml.QNode(variational_circuit, dev)
    # Minimize the circuit
    # Using default params
    var = params
    opt = AdamOptimizer(stepsize=0.01)
    for it in range(250):
        var, optimal_value = opt.step_and_cost(circuit, var)
    #    print("Iter: {:5d} | Cost: {:0.7f} ".format(it, optimal_value))
    # print(circuit.draw())
    # QHACK #

    # Return the value of the minimized QNode
    return optimal_value
Пример #12
0
    def test_adam_optimizer_multivar(self, tol):
        """Tests that adam optimizer takes one and two steps correctly
        for multivariate functions."""
        stepsize, gamma, delta = 0.1, 0.5, 0.8
        adam_opt = AdamOptimizer(stepsize, beta1=gamma, beta2=delta)

        multivariate_funcs = [
            lambda x: np.sin(x[0]) + np.cos(x[1]),
            lambda x: np.exp(x[0] / 3) * np.tanh(x[1]),
            lambda x: np.sum([x_ ** 2 for x_ in x]),
        ]
        grad_multi_funcs = [
            lambda x: (np.array([np.cos(x[0]), -np.sin(x[1])]),),
            lambda x: (
                np.array(
                    [
                        np.exp(x[0] / 3) / 3 * np.tanh(x[1]),
                        np.exp(x[0] / 3) * (1 - np.tanh(x[1]) ** 2),
                    ]
                ),
            ),
            lambda x: (np.array([2 * x_ for x_ in x]),),
        ]

        x_vals = np.linspace(-10, 10, 16, endpoint=False)

        for gradf, f in zip(grad_multi_funcs, multivariate_funcs):
            for jdx in range(len(x_vals[:-1])):
                adam_opt.reset()

                x_vec = x_vals[jdx : jdx + 2]
                x_onestep = adam_opt.step(f, x_vec)
                adapted_stepsize = stepsize * np.sqrt(1 - delta) / (1 - gamma)
                firstmoment = (1 - gamma) * gradf(x_vec)[0]
                secondmoment = (1 - delta) * gradf(x_vec)[0] * gradf(x_vec)[0]
                x_onestep_target = x_vec - adapted_stepsize * firstmoment / (
                    np.sqrt(secondmoment) + 1e-8
                )
                assert np.allclose(x_onestep, x_onestep_target, atol=tol)

                x_twosteps = adam_opt.step(f, x_onestep)
                adapted_stepsize = stepsize * np.sqrt(1 - delta ** 2) / (1 - gamma ** 2)
                firstmoment = gamma * firstmoment + (1 - gamma) * gradf(x_onestep)[0]
                secondmoment = (
                    delta * secondmoment + (1 - delta) * gradf(x_onestep)[0] * gradf(x_onestep)[0]
                )
                x_twosteps_target = x_onestep - adapted_stepsize * firstmoment / (
                    np.sqrt(secondmoment) + 1e-8
                )
                assert np.allclose(x_twosteps, x_twosteps_target, atol=tol)
Пример #13
0
def opt(opt_name):
    stepsize, gamma, delta = 0.1, 0.5, 0.8

    if opt_name == "gd":
        return GradientDescentOptimizer(stepsize)

    if opt_name == "nest":
        return NesterovMomentumOptimizer(stepsize, momentum=gamma)

    if opt_name == "moment":
        return MomentumOptimizer(stepsize, momentum=gamma)

    if opt_name == "ada":
        return AdagradOptimizer(stepsize)

    if opt_name == "rms":
        return RMSPropOptimizer(stepsize, decay=gamma)

    if opt_name == "adam":
        return AdamOptimizer(stepsize, beta1=gamma, beta2=delta)
Пример #14
0
    def test_update_stepsize(self):
        """
        Tests whether the stepsize value is updated correctly and whether a ``UserWarning``
        is raised when the ``update_stepsize`` method is used.
        """

        eta = 0.5
        opt = AdamOptimizer(eta)
        assert opt.stepsize == eta

        eta2 = 0.1
        opt.update_stepsize(eta2)
        assert opt.stepsize == eta2

        with pytest.warns(
            UserWarning,
            match="'update_stepsize' is deprecated. Stepsize value can be updated using "
            "the 'stepsize' attribute.",
        ):
            opt.update_stepsize(eta)
num_training = 200
num_test = 2000

Xdata, y_train = circle(num_training)
X_train = np.hstack((Xdata, np.zeros((Xdata.shape[0], 1))))

Xtest, y_test = circle(num_test)
X_test = np.hstack((Xtest, np.zeros((Xtest.shape[0], 1))))

# Train using Adam optimizer and evaluate the classifier
num_layers = 3
learning_rate = 0.6
epochs = 20
batch_size = 32

opt = AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999)

# initialize random weights
params = np.random.uniform(size=(num_layers, 3))

predicted_train, states_train = test(params, X_train, y_train, state_labels)
accuracy_train = accuracy_score(y_train, predicted_train)

predicted_test, states_test = test(params, X_test, y_test, state_labels)
accuracy_test = accuracy_score(y_test, predicted_test)

# save predictions with random weights for comparison
initial_predictions = predicted_test

loss = cost(params, X_test, y_test, state_labels)

def reset(opt):
    if getattr(opt, "reset", None):
        opt.reset()


@pytest.mark.parametrize(
    "opt, opt_name",
    [
        (GradientDescentOptimizer(stepsize), "gd"),
        (MomentumOptimizer(stepsize, momentum=gamma), "moment"),
        (NesterovMomentumOptimizer(stepsize, momentum=gamma), "nest"),
        (AdagradOptimizer(stepsize), "ada"),
        (RMSPropOptimizer(stepsize, decay=gamma), "rms"),
        (AdamOptimizer(stepsize, beta1=gamma, beta2=delta), "adam"),
        (RotosolveOptimizer(), "roto"),
    ],
)
class TestOverOpts:
    """Tests keywords, multiple arguements, and non-training arguments in relevent optimizers"""
    def test_kwargs(self, mocker, opt, opt_name, tol):
        """Test that the keywords get passed and alter the function"""
        class func_wrapper:
            @staticmethod
            def func(x, c=1.0):
                return (x - c)**2

        x = 1.0

        wrapper = func_wrapper()
Пример #17
0
        var (array[float]): array of variables
        features (array[float]): 2-d array of input vectors
        labels (array[float]): 1-d array of targets
    Returns:
        float: loss
    """
    # Compute prediction for each input in data batch
    preds = [quantum_neural_net(var, x=x) for x in features]

    return square_loss(labels, preds)


# load function data
data = np.loadtxt("sine.txt")
X = data[:, 0]
Y = data[:, 1]

# initialize weights
np.random.seed(0)
num_layers = 4
var_init = 0.05 * np.random.randn(num_layers, 5)

# create optimizer
opt = AdamOptimizer(0.01, beta1=0.9, beta2=0.999)

# train
var = var_init
for it in range(500):
    var = opt.step(lambda v: cost(v, X, Y), var)
    
    print("Iter: {:5d} | Cost: {:0.7f} ".format(it + 1, cost(var, X, Y)))
    y = np.array(y)

    return X[:125], y[:125], X[125:], y[125:]  #spliting data into train/test


X_train, y_train, X_test, y_test = load_data()

#weights initialization for our quantum layers
weights = (0.01 * np.random.randn(NUM_LAYERS, NUM_WIRES, 3))

#I've mad an easy batch loader it's not perfect, adapt with your train/test data
BATCH_SIZE = 5
EPOCH = 10
LR = 1e-3

opt = AdamOptimizer(LR)

for epoch in tqdm(range(EPOCH), desc="Epoch"):
    with tqdm(range(0,
                    len(X_train) - BATCH_SIZE, BATCH_SIZE),
              desc="Train") as pbar:

        for batch in pbar:
            X_train_batch = X_train[batch:batch + BATCH_SIZE]
            y_train_batch = y_train[batch:batch + BATCH_SIZE]

            weights = opt.step(lambda v: cost(v, X_train_batch, y_train_batch),
                               weights)

            #Compute loss and accuracy
            predictions = [circuit(weights, x) for x in X_train]
YY = np.linspace(-2, 4., 5)   #evenly instead of stepping...

#Create the mesh grid(s) for all X/Y combos.
XX, YY = np.meshgrid(XX, YY)

#Rosenbrock function w/ two parameters using numpy Arrays
ZZ = (1.-XX)**2 + 100.*(YY-XX*XX)**2
ZZ = ZZ/np.max(ZZ)

# final inputs and labels
inputs = list(zip(XX.flatten(),YY.flatten()))
labels = ZZ.flatten()

# sets initial variable value and optimizer
var = inits
opt = AdamOptimizer(0.01, beta1=0.9, beta2=0.999)

losses = []
for it in range(500):
    
    # update variable with optimizer and get loss
    var = opt.step(lambda v: cost(v, inputs,labels), var)
    losses.append(cost.loss._value)
    
# get the final predictions
preds = np.array([pred._value for pred in cost.preds])
ZZ_NN = preds.reshape(ZZ.shape)

# plot
ax = fig.gca(projection='3d')
ax.view_init(30, 35)
Пример #20
0
class BasicTest(BaseTest):
    """Basic optimizer tests.
    """
    def setUp(self):
        self.sgd_opt = GradientDescentOptimizer(stepsize)
        self.mom_opt = MomentumOptimizer(stepsize, momentum=gamma)
        self.nesmom_opt = NesterovMomentumOptimizer(stepsize, momentum=gamma)
        self.adag_opt = AdagradOptimizer(stepsize)
        self.rms_opt = RMSPropOptimizer(stepsize, decay=gamma)
        self.adam_opt = AdamOptimizer(stepsize, beta1=gamma, beta2=delta)

        self.fnames = ['test_function_1', 'test_function_2', 'test_function_3']
        self.univariate_funcs = [
            np.sin, lambda x: np.exp(x / 10.), lambda x: x**2
        ]
        self.grad_uni_fns = [
            np.cos, lambda x: np.exp(x / 10.) / 10., lambda x: 2 * x
        ]
        self.multivariate_funcs = [
            lambda x: np.sin(x[0]) + np.cos(x[1]),
            lambda x: np.exp(x[0] / 3) * np.tanh(x[1]),
            lambda x: np.sum([x_**2 for x_ in x])
        ]
        self.grad_multi_funcs = [
            lambda x: np.array([np.cos(x[0]), -np.sin(x[1])]),
            lambda x: np.array([
                np.exp(x[0] / 3) / 3 * np.tanh(x[1]),
                np.exp(x[0] / 3) * (1 - np.tanh(x[1])**2)
            ]), lambda x: np.array([2 * x_ for x_ in x])
        ]
        self.mvar_mdim_funcs = [
            lambda x: np.sin(x[0, 0]) + np.cos(x[1, 0]) - np.sin(x[0, 1]) + x[
                1, 1], lambda x: np.exp(x[0, 0] / 3) * np.tanh(x[0, 1]),
            lambda x: np.sum([x_[0]**2 for x_ in x])
        ]
        self.grad_mvar_mdim_funcs = [
            lambda x: np.array([[np.cos(x[0, 0]), -np.cos(x[0, 1])],
                                [-np.sin(x[1, 0]), 1.]]),
            lambda x: np.array([[
                np.exp(x[0, 0] / 3) / 3 * np.tanh(x[0, 1]),
                np.exp(x[0, 0] / 3) * (1 - np.tanh(x[0, 1])**2)
            ], [0., 0.]]), lambda x: np.array([[2 * x_[0], 0.] for x_ in x])
        ]

        self.class_fun = class_fun
        self.quant_fun = quant_fun
        self.hybrid_fun = hybrid_fun
        self.hybrid_fun_nested = hybrid_fun_nested
        self.hybrid_fun_flat = hybrid_fun_flat
        self.hybrid_fun_mdarr = hybrid_fun_mdarr
        self.hybrid_fun_mdlist = hybrid_fun_mdlist

        self.mixed_list = [(0.2, 0.3), np.array([0.4, 0.2, 0.4]), 0.1]
        self.mixed_tuple = (np.array([0.2, 0.3]), [0.4, 0.2, 0.4], 0.1)
        self.nested_list = [[[0.2], 0.3], [0.1, [0.4]], -0.1]
        self.flat_list = [0.2, 0.3, 0.1, 0.4, -0.1]
        self.multid_array = np.array([[0.1, 0.2], [-0.1, -0.4]])
        self.multid_list = [[0.1, 0.2], [-0.1, -0.4]]

    def test_mixed_inputs_for_hybrid_optimization(self):
        """Tests that gradient descent optimizer treats parameters of mixed types the same
        for hybrid optimization tasks."""
        self.logTestName()

        hybrid_list = self.sgd_opt.step(self.hybrid_fun, self.mixed_list)
        hybrid_tuple = self.sgd_opt.step(self.hybrid_fun, self.mixed_tuple)

        self.assertAllAlmostEqual(hybrid_list[0],
                                  hybrid_tuple[0],
                                  delta=self.tol)
        self.assertAllAlmostEqual(hybrid_list[1],
                                  hybrid_tuple[1],
                                  delta=self.tol)
        self.assertAllAlmostEqual(hybrid_list[2],
                                  hybrid_tuple[2],
                                  delta=self.tol)

    def test_mixed_inputs_for_classical_optimization(self):
        """Tests that gradient descent optimizer treats parameters of mixed types the same
        for purely classical optimization tasks."""
        self.logTestName()

        class_list = self.sgd_opt.step(self.class_fun, self.mixed_list)
        class_tuple = self.sgd_opt.step(self.class_fun, self.mixed_tuple)

        self.assertAllAlmostEqual(class_list[0],
                                  class_tuple[0],
                                  delta=self.tol)
        self.assertAllAlmostEqual(class_list[1],
                                  class_tuple[1],
                                  delta=self.tol)
        self.assertAllAlmostEqual(class_list[2],
                                  class_tuple[2],
                                  delta=self.tol)

    def test_mixed_inputs_for_quantum_optimization(self):
        """Tests that gradient descent optimizer treats parameters of mixed types the same
        for purely quantum optimization tasks."""
        self.logTestName()

        quant_list = self.sgd_opt.step(self.quant_fun, self.mixed_list)
        quant_tuple = self.sgd_opt.step(self.quant_fun, self.mixed_tuple)

        self.assertAllAlmostEqual(quant_list[0],
                                  quant_tuple[0],
                                  delta=self.tol)
        self.assertAllAlmostEqual(quant_list[1],
                                  quant_tuple[1],
                                  delta=self.tol)
        self.assertAllAlmostEqual(quant_list[2],
                                  quant_tuple[2],
                                  delta=self.tol)

    def test_nested_and_flat_returns_same_update(self):
        """Tests that gradient descent optimizer has the same output for
         nested and flat lists."""
        self.logTestName()

        nested = self.sgd_opt.step(self.hybrid_fun_nested, self.nested_list)
        flat = self.sgd_opt.step(self.hybrid_fun_flat, self.flat_list)

        self.assertAllAlmostEqual(flat, list(_flatten(nested)), delta=self.tol)

    def test_array_and_list_return_same_update(self):
        """Tests that gradient descent optimizer has the same output for
         lists and arrays."""
        self.logTestName()

        array = self.sgd_opt.step(self.hybrid_fun_mdarr, self.multid_array)
        list = self.sgd_opt.step(self.hybrid_fun_mdlist, self.multid_list)

        self.assertAllAlmostEqual(array, list, delta=self.tol)

    def test_gradient_descent_optimizer_univar(self):
        """Tests that basic stochastic gradient descent takes gradient-descent steps correctly
        for uni-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_uni_fns, self.univariate_funcs,
                                  self.fnames):
            with self.subTest(i=name):
                for x_start in x_vals:
                    x_new = self.sgd_opt.step(f, x_start)
                    x_correct = x_start - gradf(x_start) * stepsize
                    self.assertAlmostEqual(x_new, x_correct, delta=self.tol)

    def test_gradient_descent_optimizer_multivar(self):
        """Tests that basic stochastic gradient descent takes gradient-descent steps correctly
        for multi-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_multi_funcs,
                                  self.multivariate_funcs, self.fnames):
            with self.subTest(i=name):
                for jdx in range(len(x_vals[:-1])):
                    x_vec = x_vals[jdx:jdx + 2]
                    x_new = self.sgd_opt.step(f, x_vec)
                    x_correct = x_vec - gradf(x_vec) * stepsize
                    self.assertAllAlmostEqual(x_new, x_correct, delta=self.tol)

    def test_gradient_descent_optimizer_multivar_multidim(self):
        """Tests that basic stochastic gradient descent takes gradient-descent steps correctly
        for multi-variate functions and with higher dimensional inputs."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_mvar_mdim_funcs,
                                  self.mvar_mdim_funcs, self.fnames):
            with self.subTest(i=name):
                for jdx in range(len(x_vals[:-3])):
                    x_vec = x_vals[jdx:jdx + 4]
                    x_vec_multidim = np.reshape(x_vec, (2, 2))
                    x_new = self.sgd_opt.step(f, x_vec_multidim)
                    x_correct = x_vec_multidim - gradf(
                        x_vec_multidim) * stepsize
                    x_new_flat = x_new.flatten()
                    x_correct_flat = x_correct.flatten()
                    self.assertAllAlmostEqual(x_new_flat,
                                              x_correct_flat,
                                              delta=self.tol)

    def test_gradient_descent_optimizer_usergrad(self):
        """Tests that basic stochastic gradient descent takes gradient-descent steps correctly
        using user-provided gradients."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_uni_fns[::-1],
                                  self.univariate_funcs, self.fnames):
            with self.subTest(i=name):
                for x_start in x_vals:
                    x_new = self.sgd_opt.step(f, x_start, grad_fn=gradf)
                    x_correct = x_start - gradf(x_start) * stepsize
                    self.assertAlmostEqual(x_new, x_correct, delta=self.tol)

    def test_momentum_optimizer_univar(self):
        """Tests that momentum optimizer takes one and two steps correctly
        for uni-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_uni_fns, self.univariate_funcs,
                                  self.fnames):
            with self.subTest(i=name):
                for x_start in x_vals:
                    self.mom_opt.reset()

                    x_onestep = self.mom_opt.step(f, x_start)
                    x_onestep_target = x_start - gradf(x_start) * stepsize
                    self.assertAlmostEqual(x_onestep,
                                           x_onestep_target,
                                           delta=self.tol)

                    x_twosteps = self.mom_opt.step(f, x_onestep)
                    momentum_term = gamma * gradf(x_start)
                    x_twosteps_target = x_onestep - (gradf(x_onestep) +
                                                     momentum_term) * stepsize
                    self.assertAlmostEqual(x_twosteps,
                                           x_twosteps_target,
                                           delta=self.tol)

    def test_momentum_optimizer_multivar(self):
        """Tests that momentum optimizer takes one and two steps correctly
        for multi-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_multi_funcs,
                                  self.multivariate_funcs, self.fnames):
            with self.subTest(i=name):
                for jdx in range(len(x_vals[:-1])):
                    self.mom_opt.reset()

                    x_vec = x_vals[jdx:jdx + 2]
                    x_onestep = self.mom_opt.step(f, x_vec)
                    x_onestep_target = x_vec - gradf(x_vec) * stepsize
                    self.assertAllAlmostEqual(x_onestep,
                                              x_onestep_target,
                                              delta=self.tol)

                    x_twosteps = self.mom_opt.step(f, x_onestep)
                    momentum_term = gamma * gradf(x_vec)
                    x_twosteps_target = x_onestep - (gradf(x_onestep) +
                                                     momentum_term) * stepsize
                    self.assertAllAlmostEqual(x_twosteps,
                                              x_twosteps_target,
                                              delta=self.tol)

    def test_nesterovmomentum_optimizer_univar(self):
        """Tests that nesterov momentum optimizer takes one and two steps correctly
        for uni-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_uni_fns, self.univariate_funcs,
                                  self.fnames):
            with self.subTest(i=name):
                for x_start in x_vals:
                    self.nesmom_opt.reset()

                    x_onestep = self.nesmom_opt.step(f, x_start)
                    x_onestep_target = x_start - gradf(x_start) * stepsize
                    self.assertAlmostEqual(x_onestep,
                                           x_onestep_target,
                                           delta=self.tol)

                    x_twosteps = self.nesmom_opt.step(f, x_onestep)
                    momentum_term = gamma * gradf(x_start)
                    shifted_grad_term = gradf(x_onestep -
                                              stepsize * momentum_term)
                    x_twosteps_target = x_onestep - (shifted_grad_term +
                                                     momentum_term) * stepsize
                    self.assertAlmostEqual(x_twosteps,
                                           x_twosteps_target,
                                           delta=self.tol)

    def test_nesterovmomentum_optimizer_multivar(self):
        """Tests that nesterov momentum optimizer takes one and two steps correctly
        for multi-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_multi_funcs,
                                  self.multivariate_funcs, self.fnames):
            with self.subTest(i=name):
                for jdx in range(len(x_vals[:-1])):
                    self.nesmom_opt.reset()

                    x_vec = x_vals[jdx:jdx + 2]
                    x_onestep = self.nesmom_opt.step(f, x_vec)
                    x_onestep_target = x_vec - gradf(x_vec) * stepsize
                    self.assertAllAlmostEqual(x_onestep,
                                              x_onestep_target,
                                              delta=self.tol)

                    x_twosteps = self.nesmom_opt.step(f, x_onestep)
                    momentum_term = gamma * gradf(x_vec)
                    shifted_grad_term = gradf(x_onestep -
                                              stepsize * momentum_term)
                    x_twosteps_target = x_onestep - (shifted_grad_term +
                                                     momentum_term) * stepsize
                    self.assertAllAlmostEqual(x_twosteps,
                                              x_twosteps_target,
                                              delta=self.tol)

    def test_nesterovmomentum_optimizer_usergrad(self):
        """Tests that nesterov momentum optimizer takes gradient-descent steps correctly
        using user-provided gradients."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_uni_fns[::-1],
                                  self.univariate_funcs, self.fnames):
            with self.subTest(i=name):
                for x_start in x_vals:
                    self.nesmom_opt.reset()

                    x_onestep = self.nesmom_opt.step(f, x_start, grad_fn=gradf)
                    x_onestep_target = x_start - gradf(x_start) * stepsize
                    self.assertAlmostEqual(x_onestep,
                                           x_onestep_target,
                                           delta=self.tol)

                    x_twosteps = self.nesmom_opt.step(f,
                                                      x_onestep,
                                                      grad_fn=gradf)
                    momentum_term = gamma * gradf(x_start)
                    shifted_grad_term = gradf(x_onestep -
                                              stepsize * momentum_term)
                    x_twosteps_target = x_onestep - (shifted_grad_term +
                                                     momentum_term) * stepsize
                    self.assertAlmostEqual(x_twosteps,
                                           x_twosteps_target,
                                           delta=self.tol)

    def test_adagrad_optimizer_univar(self):
        """Tests that adagrad optimizer takes one and two steps correctly
        for uni-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_uni_fns, self.univariate_funcs,
                                  self.fnames):
            with self.subTest(i=name):
                for x_start in x_vals:
                    self.adag_opt.reset()

                    x_onestep = self.adag_opt.step(f, x_start)
                    past_grads = gradf(x_start) * gradf(x_start)
                    adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
                    x_onestep_target = x_start - gradf(
                        x_start) * adapt_stepsize
                    self.assertAlmostEqual(x_onestep,
                                           x_onestep_target,
                                           delta=self.tol)

                    x_twosteps = self.adag_opt.step(f, x_onestep)
                    past_grads = gradf(x_start) * gradf(x_start) + gradf(
                        x_onestep) * gradf(x_onestep)
                    adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
                    x_twosteps_target = x_onestep - gradf(
                        x_onestep) * adapt_stepsize
                    self.assertAlmostEqual(x_twosteps,
                                           x_twosteps_target,
                                           delta=self.tol)

    def test_adagrad_optimizer_multivar(self):
        """Tests that adagrad optimizer takes one and two steps correctly
        for multi-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_multi_funcs,
                                  self.multivariate_funcs, self.fnames):
            with self.subTest(i=name):
                for jdx in range(len(x_vals[:-1])):
                    self.adag_opt.reset()

                    x_vec = x_vals[jdx:jdx + 2]
                    x_onestep = self.adag_opt.step(f, x_vec)
                    past_grads = gradf(x_vec) * gradf(x_vec)
                    adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
                    x_onestep_target = x_vec - gradf(x_vec) * adapt_stepsize
                    self.assertAllAlmostEqual(x_onestep,
                                              x_onestep_target,
                                              delta=self.tol)

                    x_twosteps = self.adag_opt.step(f, x_onestep)
                    past_grads = gradf(x_vec) * gradf(x_vec) + gradf(
                        x_onestep) * gradf(x_onestep)
                    adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
                    x_twosteps_target = x_onestep - gradf(
                        x_onestep) * adapt_stepsize
                    self.assertAllAlmostEqual(x_twosteps,
                                              x_twosteps_target,
                                              delta=self.tol)

    def test_rmsprop_optimizer_univar(self):
        """Tests that rmsprop optimizer takes one and two steps correctly
        for uni-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_uni_fns, self.univariate_funcs,
                                  self.fnames):
            with self.subTest(i=name):
                for x_start in x_vals:
                    self.rms_opt.reset()

                    x_onestep = self.rms_opt.step(f, x_start)
                    past_grads = (1 - gamma) * gradf(x_start) * gradf(x_start)
                    adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
                    x_onestep_target = x_start - gradf(
                        x_start) * adapt_stepsize
                    self.assertAlmostEqual(x_onestep,
                                           x_onestep_target,
                                           delta=self.tol)

                    x_twosteps = self.rms_opt.step(f, x_onestep)
                    past_grads = (1 - gamma) * gamma * gradf(x_start)*gradf(x_start) \
                                 + (1 - gamma) * gradf(x_onestep)*gradf(x_onestep)
                    adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
                    x_twosteps_target = x_onestep - gradf(
                        x_onestep) * adapt_stepsize
                    self.assertAlmostEqual(x_twosteps,
                                           x_twosteps_target,
                                           delta=self.tol)

    def test_rmsprop_optimizer_multivar(self):
        """Tests that rmsprop optimizer takes one and two steps correctly
        for multi-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_multi_funcs,
                                  self.multivariate_funcs, self.fnames):
            with self.subTest(i=name):
                for jdx in range(len(x_vals[:-1])):
                    self.rms_opt.reset()

                    x_vec = x_vals[jdx:jdx + 2]
                    x_onestep = self.rms_opt.step(f, x_vec)
                    past_grads = (1 - gamma) * gradf(x_vec) * gradf(x_vec)
                    adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
                    x_onestep_target = x_vec - gradf(x_vec) * adapt_stepsize
                    self.assertAllAlmostEqual(x_onestep,
                                              x_onestep_target,
                                              delta=self.tol)

                    x_twosteps = self.rms_opt.step(f, x_onestep)
                    past_grads = (1 - gamma) * gamma * gradf(x_vec) * gradf(x_vec) \
                                 + (1 - gamma) * gradf(x_onestep) * gradf(x_onestep)
                    adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
                    x_twosteps_target = x_onestep - gradf(
                        x_onestep) * adapt_stepsize
                    self.assertAllAlmostEqual(x_twosteps,
                                              x_twosteps_target,
                                              delta=self.tol)

    def test_adam_optimizer_univar(self):
        """Tests that adam optimizer takes one and two steps correctly
        for uni-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_uni_fns, self.univariate_funcs,
                                  self.fnames):
            with self.subTest(i=name):
                for x_start in x_vals:
                    self.adam_opt.reset()

                    x_onestep = self.adam_opt.step(f, x_start)
                    adapted_stepsize = stepsize * np.sqrt(1 - delta) / (1 -
                                                                        gamma)
                    firstmoment = gradf(x_start)
                    secondmoment = gradf(x_start) * gradf(x_start)
                    x_onestep_target = x_start - adapted_stepsize * firstmoment / (
                        np.sqrt(secondmoment) + 1e-8)
                    self.assertAlmostEqual(x_onestep,
                                           x_onestep_target,
                                           delta=self.tol)

                    x_twosteps = self.adam_opt.step(f, x_onestep)
                    adapted_stepsize = stepsize * np.sqrt(1 - delta**2) / (
                        1 - gamma**2)
                    firstmoment = (gamma * gradf(x_start) +
                                   (1 - gamma) * gradf(x_onestep))
                    secondmoment = (
                        delta * gradf(x_start) * gradf(x_start) +
                        (1 - delta) * gradf(x_onestep) * gradf(x_onestep))
                    x_twosteps_target = x_onestep - adapted_stepsize * firstmoment / (
                        np.sqrt(secondmoment) + 1e-8)
                    self.assertAlmostEqual(x_twosteps,
                                           x_twosteps_target,
                                           delta=self.tol)

    def test_adam_optimizer_multivar(self):
        """Tests that adam optimizer takes one and two steps correctly
        for multi-variate functions."""
        self.logTestName()

        for gradf, f, name in zip(self.grad_multi_funcs,
                                  self.multivariate_funcs, self.fnames):
            with self.subTest(i=name):
                for jdx in range(len(x_vals[:-1])):
                    self.adam_opt.reset()

                    x_vec = x_vals[jdx:jdx + 2]
                    x_onestep = self.adam_opt.step(f, x_vec)
                    adapted_stepsize = stepsize * np.sqrt(1 - delta) / (1 -
                                                                        gamma)
                    firstmoment = gradf(x_vec)
                    secondmoment = gradf(x_vec) * gradf(x_vec)
                    x_onestep_target = x_vec - adapted_stepsize * firstmoment / (
                        np.sqrt(secondmoment) + 1e-8)
                    self.assertAllAlmostEqual(x_onestep,
                                              x_onestep_target,
                                              delta=self.tol)

                    x_twosteps = self.adam_opt.step(f, x_onestep)
                    adapted_stepsize = stepsize * np.sqrt(1 - delta**2) / (
                        1 - gamma**2)
                    firstmoment = (gamma * gradf(x_vec) +
                                   (1 - gamma) * gradf(x_onestep))
                    secondmoment = (
                        delta * gradf(x_vec) * gradf(x_vec) +
                        (1 - delta) * gradf(x_onestep) * gradf(x_onestep))
                    x_twosteps_target = x_onestep - adapted_stepsize * firstmoment / (
                        np.sqrt(secondmoment) + 1e-8)
                    self.assertAllAlmostEqual(x_twosteps,
                                              x_twosteps_target,
                                              delta=self.tol)
Пример #21
0
def classify_data(X_train, Y_train, X_test):
    """Develop and train variational quantum classifier.

    Args:
        X_train (np.ndarray): An array of floats of size (250, 3) to be used as training data.
        Y_train (np.ndarray): An array of size (250,) which are the categorical labels
            associated to the training data. The categories are labeled by -1, 0, and 1.
        X_test (np.ndarray): An array of floats of (50, 3) to serve as testing data.

    Returns:
        str: The predicted categories of X_test, converted from a list of ints to a
            comma-separated string.
    """

    # Use this array to make a prediction for the labels of the data in X_test
    predictions = []

    NUM_WIRES = 3
    NUM_LAYERS = 2

    dev = qml.device("default.qubit", wires=NUM_WIRES)

    @qml.qnode(dev)
    def circuit(params, x):
        xEmbeded = [i * np.pi for i in x]
        for i in range(NUM_WIRES):
            qml.RX(xEmbeded[i], wires=i)
            qml.Rot(*params[0, i], wires=i)

        qml.CZ(wires=[1, 0])
        qml.CZ(wires=[1, 2])
        qml.CZ(wires=[0, 2])

        for i in range(NUM_WIRES):
            qml.Rot(*params[1, i], wires=i)

        return qml.expval(qml.PauliZ(0)), qml.expval(
            qml.PauliZ(1)), qml.expval(qml.PauliZ(2))

    def prediction(weigths, x_train):
        predictions = [circuit(weigths, f) for f in x_train]
        for i, p in enumerate(predictions):
            maxi = p[0]
            indexMax = 0
            for k in range(3):
                if p[k] > maxi:
                    maxi = p[k]
                    indexMax = k
            predictions[i] = indexMax - 1

        return predictions

    def cost(weights, x_train, labels):
        predictions = [circuit(weights, f) for f in x_train]

        loss = 0
        for i in range(len(predictions)):
            min = predictions[i][0]
            max = predictions[i][0]
            for k in range(3):
                if predictions[i][k] > max:
                    max = predictions[i][k]
                if predictions[i][k] < min:
                    min = predictions[i][k]

            x = (predictions[i][labels[i] + 1] - min) / (max - min)

            loss += (1 - x)**2  # [0.4,0.3,0.3]

        return loss / len(predictions)

    def accuracy(weights, x_train, labels):
        predictions = prediction(weights, x_train)
        loss = 0
        for i in range(len(predictions)):
            if predictions[i] == labels[i]:
                loss += 1
        loss = loss / len(predictions)
        return loss

    params = (0.01 * np.random.randn(2, NUM_WIRES, 3))
    bestparams = (0.01 * np.random.randn(2, NUM_WIRES, 3))
    bestcost = 1
    opt = AdamOptimizer(0.425)
    batch_size = 10
    Diag_Cost = []
    Diag_Acc = []
    for it in range(30):

        # Update the weights by one optimizer step
        batch_index = np.random.randint(0, 250, (batch_size, ))
        X_train_batch = X_train[batch_index]
        Y_train_batch = Y_train[batch_index]
        params = opt.step(lambda v: cost(v, X_train_batch, Y_train_batch),
                          params)

        # Compute predictions on train and validation set
        #predictions_train = prediction(params,X_train)

        cosT = cost(params, X_train, Y_train)
        # Compute accuracy on train and validation set
        acc = accuracy(params, X_train, Y_train)

        if cosT < bestcost:
            bestcost = cosT
            bestparams = params

        Diag_Cost.append(cosT.numpy())
        Diag_Acc.append(acc)
        print("Iter: {:5d} | Cost: {:0.7f} | Accuracy: {:0.2f}% ".format(
            it + 1, cosT, acc * 100))

    predictions = prediction(bestparams, X_test)
    results = [
        1, 0, -1, 0, -1, 1, -1, -1, 0, -1, 1, -1, 0, 1, 0, -1, -1, 0, 0, 1, 1,
        0, -1, 0, 0, -1, 0, -1, 0, 0, 1, 1, -1, -1, -1, 0, -1, 0, 1, 0, -1, 1,
        1, 0, -1, -1, -1, -1, 0, 0
    ]

    accResult = accuracy(bestparams, X_test, results)
    print()
    print("FINAL ACCURACY: {:0.2f}%".format(accResult * 100))
    circuit(bestparams, X_train[0])
    print()
    print(circuit.draw())
    return array_to_concatenated_string(predictions), Diag_Cost, Diag_Acc