예제 #1
0
def test_FastNewtonTR():
    warnings.filterwarnings("ignore")
    finder = autocrit.FastNewtonTR
    finder_str = "FastNewtonTR"

    problem_str = "least squares"
    finder_kwargs = {"step_size": 0.5}
    num_iters = 25

    random_least_squares_problem, random_init = \
        shared.generate_random_least_squares()

    shared.convergence_test(finder, finder_str, finder_kwargs,
                            random_least_squares_problem.loss,
                            random_least_squares_problem, problem_str,
                            random_init, num_iters)

    problem_str = "shallow regression"
    finder_kwargs = {"step_size": 0.1}
    num_iters = 250

    random_regression_problem, network, random_init = \
        shared.generate_random_shallow_regression()

    shared.convergence_test(finder,
                            finder_str,
                            finder_kwargs,
                            network.loss,
                            random_regression_problem,
                            problem_str,
                            random_init,
                            num_iters,
                            test_soln_converge=False)
예제 #2
0
def test_FastNewtonMR():
    warnings.filterwarnings("ignore")
    finder = autocrit.FastNewtonMR
    finder_str = "FastNewtonMR"

    problem_str = "least squares"
    finder_kwargs = {"alpha": 0.5, "beta": 0.99, "check_pure": True}
    num_iters = 500

    random_least_squares_problem, random_init = \
        shared.generate_random_least_squares()

    shared.convergence_test(finder, finder_str, finder_kwargs,
                            random_least_squares_problem.loss,
                            random_least_squares_problem, problem_str,
                            random_init, num_iters)

    problem_str = "shallow regression"
    finder_kwargs = {"alpha": 0.5, "beta": 0.9, "rho": 1e-6}
    num_iters = 250

    random_regression_problem, network, random_init = \
        shared.generate_random_shallow_regression()

    shared.convergence_test(finder,
                            finder_str,
                            finder_kwargs,
                            network.loss,
                            random_regression_problem,
                            problem_str,
                            random_init,
                            num_iters,
                            test_soln_converge=False)
예제 #3
0
def test_NewtonMethod():
    warnings.filterwarnings("ignore")
    finder = autocrit.finders.newtons.NewtonMethod
    finder_str = "NewtonMethod"

    problem_str = "least squares"
    finder_kwargs = {}
    num_iters = 1

    random_least_squares_problem, random_init = \
        shared.generate_random_least_squares()

    shared.convergence_test(finder, finder_str, finder_kwargs,
                            random_least_squares_problem.loss,
                            random_least_squares_problem, problem_str,
                            random_init, num_iters)
예제 #4
0
def test_deep_classification():
    warnings.filterwarnings("ignore")

    problem_str = "deep classification"

    finder = autocrit.FastNewtonMR
    finder_str = "FastNewtonMR"

    finder_kwargs = {"alpha": 1., "beta": 0.5, "rho": 1e-6, "check_pure": True}
    num_iters = 250

    random_classification_problem, network, random_init = \
        shared.generate_random_deep_classification()

    shared.convergence_test(finder,
                            finder_str,
                            finder_kwargs,
                            network.loss,
                            random_classification_problem,
                            problem_str,
                            random_init,
                            num_iters,
                            test_function_converge=False,
                            test_soln_converge=False)

    finder = autocrit.FastNewtonTR
    finder_str = "FastNewtonTR"

    finder_kwargs = {"step_size": 0.05}
    num_iters = 250

    random_classification_problem, network, random_init = \
        shared.generate_random_deep_classification(seed=shared.SEED + 1)

    shared.convergence_test(finder,
                            finder_str,
                            finder_kwargs,
                            network.loss,
                            random_classification_problem,
                            problem_str,
                            random_init,
                            num_iters,
                            test_function_converge=False,
                            test_soln_converge=False)
예제 #5
0
def test_GradientNormMinimizer():
    warnings.filterwarnings("ignore")
    finder = autocrit.GradientNormMinimizer
    finder_str = "GradientNormMinimizer"

    problem_str = "least squares"
    finder_kwargs = {
        "minimizer_str": "momentum",
        "minimizer_params": {
            "lr": 1e-2,
            "momentum": 0.9
        }
    }
    num_iters = 1000

    random_least_squares_problem, random_init = \
        shared.generate_random_least_squares()

    shared.convergence_test(finder, finder_str, finder_kwargs,
                            random_least_squares_problem.loss,
                            random_least_squares_problem, problem_str,
                            random_init, num_iters)
예제 #6
0
def test_GradientDescentOptimizer():
    optimizer = autocrit.optimizers.GradientDescentOptimizer
    optimizer_str = "GradientDescentOptimizer"

    problem_str = "least squares"
    optimizer_kwargs = {}
    num_iters = 1000

    random_least_squares_problem, random_init = \
        shared.generate_random_least_squares()

    shared.convergence_test(optimizer, optimizer_str, optimizer_kwargs,
                            random_least_squares_problem.loss,
                            random_least_squares_problem, problem_str,
                            random_init, num_iters)

    problem_str = "shallow regression"
    optimizer_kwargs = {}
    num_iters = 10000

    random_regression_problem, network, random_init = \
        shared.generate_random_shallow_regression()

    shared.convergence_test(optimizer, optimizer_str, optimizer_kwargs,
                            network.loss, random_regression_problem,
                            problem_str, random_init, num_iters)

    problem_str = "shallow classification"
    optimizer_kwargs = {}
    num_iters = 12500

    random_classification_problem, network, random_init = \
        shared.generate_random_shallow_classification()

    shared.convergence_test(optimizer,
                            optimizer_str,
                            optimizer_kwargs,
                            network.loss,
                            random_classification_problem,
                            problem_str,
                            random_init,
                            num_iters,
                            test_soln_converge=False)
예제 #7
0
def test_BackTrackingLineSearchOptimizer(dim=25):
    optimizer = autocrit.optimizers.BackTrackingLineSearchOptimizer
    optimizer_str = "BackTrackingLineSearchOptimizer"

    problem_str = "least squares"
    optimizer_kwargs = {}
    num_iters = 1000

    random_least_squares_problem, random_init = \
        shared.generate_random_least_squares()

    shared.convergence_test(optimizer, optimizer_str, optimizer_kwargs,
                            random_least_squares_problem.loss,
                            random_least_squares_problem, problem_str,
                            random_init, num_iters)

    problem_str = "shallow regression"
    optimizer_kwargs = {"gamma": 1 - 1e-3}
    num_iters = 100

    random_regression_problem, network, random_init = \
        shared.generate_random_shallow_regression()

    shared.convergence_test(optimizer, optimizer_str, optimizer_kwargs,
                            network.loss, random_regression_problem,
                            problem_str, random_init, num_iters)

    problem_str = "shallow classification"
    optimizer_kwargs = {"gamma": 1 - 1e-3}
    num_iters = 100

    random_classification_problem, network, random_init = \
        shared.generate_random_shallow_classification()

    shared.convergence_test(optimizer,
                            optimizer_str,
                            optimizer_kwargs,
                            network.loss,
                            random_classification_problem,
                            problem_str,
                            random_init,
                            num_iters,
                            test_soln_converge=False)
예제 #8
0
def test_MomentumOptimizer():
    warnings.filterwarnings("ignore")
    optimizer = autocrit.optimizers.MomentumOptimizer
    optimizer_str = "MomentumOptimizer"

    problem_str = "least squares"
    optimizer_kwargs = {}
    num_iters = 1000

    random_least_squares_problem, random_init = \
        shared.generate_random_least_squares()

    shared.convergence_test(optimizer, optimizer_str, optimizer_kwargs,
                            random_least_squares_problem.loss,
                            random_least_squares_problem, problem_str,
                            random_init, num_iters)

    problem_str = "shallow regression"
    optimizer_kwargs = {}
    num_iters = 1000

    random_regression_problem, network, random_init = \
        shared.generate_random_shallow_regression()

    shared.convergence_test(optimizer, optimizer_str, optimizer_kwargs,
                            network.loss, random_regression_problem,
                            problem_str, random_init, num_iters)

    problem_str = "shallow classification"
    optimizer_kwargs = {}
    num_iters = 1000

    random_classification_problem, network, random_init = \
        shared.generate_random_shallow_classification()

    shared.convergence_test(optimizer,
                            optimizer_str,
                            optimizer_kwargs,
                            network.loss,
                            random_classification_problem,
                            problem_str,
                            random_init,
                            num_iters,
                            test_soln_converge=False)

    problem_str = "convolutional classification"
    optimizer_kwargs = {"momentum": 0.99}
    num_iters = 1000

    test_classification_problem, network, random_init = \
        shared.generate_test_conv_classification()

    shared.convergence_test(optimizer,
                            optimizer_str,
                            optimizer_kwargs,
                            network.loss,
                            test_classification_problem,
                            problem_str,
                            random_init,
                            num_iters,
                            test_soln_converge=False)