Beispiel #1
0
from funkyyak import grad, kylist

from hypergrad.data import load_data_dicts
from hypergrad.nn_utils import make_nn_funs, VectorParser, logit, inv_logit, fill_parser
from hypergrad.optimizers import simple_sgd, sgd_parsed
from hypergrad.util import RandomState

import whetlab
parameters = {
    'init_log_alphas': {
        'min': -3,
        'max': 2,
        'type': 'float'
    },
    'init_invlogit_betas': {
        'min': inv_logit(0.1),
        'max': inv_logit(0.999),
        'type': 'float'
    },
    'init_log_param_scale': {
        'min': -5,
        'max': 1,
        'type': 'float'
    }
}
outcome = {'name': 'Training Gain'}
scientist = whetlab.Experiment(
    name="ICML Hypergrad paper - optimize initial values - v2 no randomness",
    description="Vanilla tuning of hyperparameters. v2",
    parameters=parameters,
    outcome=outcome)
Beispiel #2
0
from hypergrad.optimizers import sgd4, rms_prop, adam

# ----- Fixed params -----
layer_sizes = [784, 10]
batch_size = 200
N_iters = 60
N_classes = 10
N_train = 1000
N_valid = 10**3
N_tests = 10**3
N_batches = N_train / batch_size
#N_iters = N_epochs * N_batches
# ----- Initial values of learned hyper-parameters -----
init_log_L2_reg = 0.0
init_log_alphas = -2.0
init_invlogit_betas = inv_logit(0.9)
init_log_param_scale = 0.0
# ----- Superparameters -----
meta_alpha = 0.04
N_meta_iter = 100

global_seed = npr.RandomState(3).randint(1000)

def fill_parser(parser, items):
    partial_vects = [np.full(parser[name].size, items[i])
                     for i, name in enumerate(parser.names)]
    return np.concatenate(partial_vects, axis=0)

def run():
    train_data, valid_data, tests_data = load_data_dicts(N_train, N_valid, N_tests)
    parser, pred_fun, loss_fun, frac_err = make_nn_funs(layer_sizes)
Beispiel #3
0
# ----- Fixed params -----
layer_sizes = [784, 100, 10]
batch_size = 200
N_iters = 100
N_classes = 10
N_train = 1000
N_valid = 10000
N_tests = 10000
N_learning_checkpoint = 2
thin = np.ceil(N_iters / N_learning_checkpoint)

# ----- Initial values of learned hyper-parameters -----
init_log_L2_reg = 0.0
init_log_L2_reg_noise = 0.0
init_log_alphas = -1.0
init_invlogit_betas = inv_logit(0.5)
init_log_param_scale = -3.0

# ----- Superparameters -----
meta_alpha = 0.4
N_meta_iter = 50

seed = 0


def fill_parser(parser, items):
    partial_vects = [
        np.full(parser[name].size, items[i])
        for i, name in enumerate(parser.names)
    ]
    return np.concatenate(partial_vects, axis=0)
Beispiel #4
0
def test_inv_logit():
    assert np.allclose(inv_logit(logit(0.5)), 0.5, rtol=1e-3, atol=1e-4)
    assert np.allclose(inv_logit(logit(0.6)), 0.6, rtol=1e-3, atol=1e-4)
    assert np.allclose(inv_logit(logit(0.1)), 0.1, rtol=1e-3, atol=1e-4)
    assert np.allclose(inv_logit(logit(0.2)), 0.2, rtol=1e-3, atol=1e-4)
Beispiel #5
0
"""Find good initial values using Whetlab."""
import numpy as np
import pickle
from collections import defaultdict

from funkyyak import grad, kylist

from hypergrad.data import load_data_dicts
from hypergrad.nn_utils import make_nn_funs, VectorParser, logit, inv_logit, fill_parser
from hypergrad.optimizers import simple_sgd, sgd_parsed
from hypergrad.util import RandomState

import whetlab
parameters = { 'init_log_alphas':{'min':-3, 'max':2, 'type':'float'},
               'init_invlogit_betas':{'min':inv_logit(0.1), 'max':inv_logit(0.999),'type':'float'},
               'init_log_param_scale':{'min':-5, 'max':1,'type':'float'}}
outcome = {'name':'Training Gain'}
scientist = whetlab.Experiment(name="ICML Hypergrad paper - optimize initial values",
                               description="Vanilla tuning of hyperparameters.",
                               parameters=parameters,
                               outcome=outcome)

# ----- Fixed params -----
layer_sizes = [784, 50, 50, 50, 10]
batch_size = 200
N_iters = 100
N_classes = 10
N_train = 10000
N_valid = 10000
N_tests = 10000
N_learning_checkpoint = 10
"""Find good initial values using Whetlab."""
import pickle
from collections import defaultdict

import numpy as np
import whetlab

from funkyyak import grad, kylist
from hypergrad.data import load_data_dicts
from hypergrad.nn_utils import make_nn_funs, VectorParser, logit, inv_logit, fill_parser
from hypergrad.optimizers import sgd_parsed
from hypergrad.util import RandomState

parameters = {
    "init_log_alphas": {"min": -3, "max": 2, "type": "float"},
    "init_invlogit_betas": {"min": inv_logit(0.1), "max": inv_logit(0.999), "type": "float"},
    "init_log_param_scale": {"min": -5, "max": 1, "type": "float"},
}
outcome = {"name": "Training Gain"}
scientist = whetlab.Experiment(
    name="ICML Hypergrad paper - optimize initial values",
    description="Vanilla tuning of hyperparameters.",
    parameters=parameters,
    outcome=outcome,
)

# ----- Fixed params -----
layer_sizes = [784, 50, 50, 50, 10]
batch_size = 200
N_iters = 100
N_classes = 10
Beispiel #7
0
def test_inv_logit():
    assert np.allclose(inv_logit(logit(0.5)), 0.5, rtol=1e-3, atol=1e-4)
    assert np.allclose(inv_logit(logit(0.6)), 0.6, rtol=1e-3, atol=1e-4)
    assert np.allclose(inv_logit(logit(0.1)), 0.1, rtol=1e-3, atol=1e-4)
    assert np.allclose(inv_logit(logit(0.2)), 0.2, rtol=1e-3, atol=1e-4)