コード例 #1
0
import numpy as np
from deepobs.pytorch.runners import StandardRunner
from deepobs.tuner import GridSearch
from torch.optim import SGD
from probprec import Preconditioner
from sorunner import SORunner

optimizer_class = Preconditioner
hyperparams = {"lr": {"type": float}, "est_rank": {"type": int}}

# The discrete values to construct a grid for.
grid = {'lr': np.logspace(-5, 2, 10), 'est_rank': [2, 3]}

# Make sure to set the amount of ressources to the grid size. For grid search, this is just a sanity check.
tuner = GridSearch(optimizer_class,
                   hyperparams,
                   grid,
                   runner=SORunner,
                   ressources=20)

# Tune (i.e. evaluate every grid point) and rerun the best setting with 10 different seeds.
# tuner.tune('quadratic_deep', rerun_best_setting=True, num_epochs=2, output_dir='./grid_search')

# Optionally, generate commands for a parallelized execution
tuner.generate_commands_script('mnist_vae',
                               run_script='/home/bald/pre_fmnist/runscript.py',
                               output_dir='./grid_search',
                               generation_dir='./grid_search_commands')
コード例 #2
0
    "momentum": {"type": float},
    "nesterov": {"type": bool},
}

# The discrete values to construct a grid for.
grid = {
    "lr": np.logspace(-5, 2, 6),
    "momentum": [0.5, 0.7, 0.9],
    "nesterov": [False, True],
}

# Make sure to set the amount of ressources to the grid size. For grid search, this is just a sanity check.
tuner = GridSearch(
    optimizer_class,
    hyperparams,
    grid,
    runner=StandardRunner,
    ressources=6 * 3 * 2,
)

# Tune (i.e. evaluate every grid point) and rerun the best setting with 10 different seeds.
# tuner.tune('quadratic_deep', rerun_best_setting=True, num_epochs=2, output_dir='./grid_search')

# Optionally, generate commands for a parallelized execution
tuner.generate_commands_script(
    "quadratic_deep",
    run_script="../runner_momentum_pytorch.py",
    num_epochs=2,
    output_dir="./grid_search",
    generation_dir="./grid_search_commands",
)
コード例 #3
0
    "momentum": {
        "type": float
    },
    "nesterov": {
        "type": bool
    }
}

# The discrete values to construct a grid for.
grid = {
    'lr': np.logspace(-5, 2, 6),
    'momentum': [0.5, 0.7, 0.9],
    'nesterov': [False, True]
}

# Make sure to set the amount of ressources to the grid size. For grid search, this is just a sanity check.
tuner = GridSearch(optimizer_class,
                   hyperparams,
                   grid,
                   runner=StandardRunner,
                   ressources=6 * 3 * 2)

# Tune (i.e. evaluate every grid point) and rerun the best setting with 10 different seeds.
# tuner.tune('quadratic_deep', rerun_best_setting=True, num_epochs=2, output_dir='./grid_search')

# Optionally, generate commands for a parallelized execution
tuner.generate_commands_script('quadratic_deep',
                               run_script='../SGD.py',
                               num_epochs=2,
                               output_dir='./grid_search',
                               generation_dir='./grid_search_commands')
コード例 #4
0
ファイル: testset_globals.py プロジェクト: jotaf98/DeepOBS
from deepobs.tuner import GridSearch
from torch.optim import SGD
import numpy as np
from deepobs.pytorch.runners import StandardRunner
from deepobs.config import get_small_test_set

# define optimizer
optimizer_class = SGD
hyperparams = {"lr": {"type": float}}

### Grid Search ###
# The discrete values to construct a grid for.
grid = {'lr': np.logspace(-5, 2, 6)}

# init tuner class
tuner = GridSearch(optimizer_class,
                   hyperparams,
                   grid,
                   runner=StandardRunner,
                   ressources=6)

# get the small test set and automatically tune on each of the contained test problems
small_testset = get_small_test_set()
tuner.tune_on_testset(
    small_testset,
    rerun_best_setting=True)  # kwargs are parsed to the tune() method
コード例 #5
0
from deepobs.tuner import GridSearch
from torch.optim import SGD
import numpy as np
from deepobs.pytorch.runners import StandardRunner

# define optimizer
optimizer_class = SGD
hyperparams = {"lr": {"type": float}}

### Grid Search ###
# The discrete values to construct a grid for.
grid = {'lr': np.logspace(-5, 2, 6)}

# init tuner class
tuner = GridSearch(optimizer_class,
                   hyperparams,
                   grid,
                   runner=StandardRunner,
                   ressources=6)

# tune on quadratic test problem and automatically rerun the best instance with 10 different seeds.
tuner.tune('quadratic_deep', rerun_best_setting=True)
コード例 #6
0
        "type": bool
    }
}

### Grid Search ###
# The discrete values to construct a grid for.
grid = {
    'lr': np.logspace(-5, 2, 6),
    'momentum': [0.5, 0.7, 0.9],
    'nesterov': [False, True]
}

# Make sure to set the amount of resources to the grid size. For grid search, this is just a sanity check.
tuner = GridSearch(optimizer_class,
                   hyperparams,
                   grid,
                   runner=StandardRunner,
                   ressources=6 * 3 * 2)

### Random Search ###
# Define the distributions to sample from
distributions = {
    'lr': log_uniform(-5, 2),
    'momentum': uniform(0.5, 0.5),
    'nesterov': binom(1, 0.5)
}

# Allow 36 random evaluations.
tuner = RandomSearch(optimizer_class,
                     hyperparams,
                     distributions,
コード例 #7
0
ファイル: runscript3.py プロジェクト: ludwigbald/probprec
    prunner.run(testproblem='quadratic_deep',
                num_epochs=20,
                batch_size=32,
                random_seed=rs)
    runner.run(testproblem='quadratic_deep',
               num_epochs=20,
               batch_size=32,
               random_seed=rs)

## SGD on quadratic deep

optimizer_class = optim.SGD
hyperparams = {"lr": {"type": float}}

# The discrete values to construct a grid for.
grid = {'lr': np.logspace(-5, 2, 10)}

# Make sure to set the amount of ressources to the grid size. For grid search, this is just a sanity check.
tuner = GridSearch(optimizer_class,
                   hyperparams,
                   grid,
                   runner=pyt.runners.StandardRunner,
                   ressources=6 * 3 * 2)

# Tune (i.e. evaluate every grid point) and rerun the best setting with 10 different seeds.
tuner.tune('quadratic_deep',
           rerun_best_setting=True,
           num_epochs=20,
           batch_size=32,
           output_dir='./results')