def test_opt_cloner(self):  # Dumb test. Just for coverage
        opt_1 = opt.create_opt(opt.SGD())
        opt_1.learning_rate = 0.5
        opt_2 = opt.create_opt(opt_1, opt_1.learning_rate)

        self.assertIsInstance(opt_1, opt.SGD)
        self.assertIsInstance(opt_2, opt.SGD)
        self.assertTrue(opt_1.learning_rate == opt_2.learning_rate)
예제 #2
0
    def select_optimizer(self):
        if self.opt_type == self._Optimizer.MOMENTUM:
            return opt.Momentum(momentum=self.momentum)

        elif self.opt_type == self._Optimizer.NAG:
            return opt.NesterovMomentum(momentum=self.momentum)

        elif self.opt_type == self._Optimizer.ADAGRAD:
            return opt.AdaGrad()

        elif self.opt_type == self._Optimizer.RMSPROP:
            return opt.RMSProp(rho=self.rho)

        elif self.opt_type == self._Optimizer.ADADELTA:
            return opt.AdaDelta(rho=self.rho)

        elif self.opt_type == self._Optimizer.ADAM:
            return opt.Adam(beta1=self.beta1, beta2=self.beta2)

        elif self.opt_type == self._Optimizer.ADAMAX:
            return opt.Adamax(beta1=self.beta1, beta2=self.beta2)

        else:
            return opt.SGD()
import orangecontrib.recommendation.optimizers as opt

import numpy as np
import collections
import unittest
import copy

__optimizers__ = [
    opt.SGD(learning_rate=0.1),
    opt.Momentum(learning_rate=0.1, momentum=0.5),
    opt.NesterovMomentum(learning_rate=0.1, momentum=0.5),
    opt.AdaGrad(learning_rate=0.1),
    opt.RMSProp(learning_rate=0.01, rho=0.9, epsilon=1e-6),
    opt.AdaDelta(learning_rate=1, rho=0.95, epsilon=1e-6),
    opt.Adam(learning_rate=0.01, beta1=0.9, beta2=0.999, epsilon=1e-8),
    opt.Adamax(learning_rate=0.01, beta1=0.9, beta2=0.999, epsilon=1e-8)
]


def dxf(X):
    tensor = [0.1, 0.2, 0.3]
    return tensor * X * 2  # f(x) = x^2 -> dx(f(x)) = 2x


def dxf2(X):
    return X * 2


class TestOptimizers(unittest.TestCase):

    # These tests compare results on a toy problem to values