コード例 #1
0
# Example use case:
an_optimizer = HyperParameterOptimizer(
    # This is the experiment we want to optimize
    base_task_id=args['template_task_id'],
    # here we define the hyper-parameters to optimize
    hyper_parameters=[
        UniformIntegerParameterRange('General/layer_1',
                                     min_value=128,
                                     max_value=512,
                                     step_size=128),
        UniformIntegerParameterRange('General/layer_2',
                                     min_value=128,
                                     max_value=512,
                                     step_size=128),
        DiscreteParameterRange('General/batch_size', values=[96, 128, 160]),
        DiscreteParameterRange('General/epochs', values=[30]),
    ],
    # this is the objective metric we want to maximize/minimize
    objective_metric_title='epoch_accuracy',
    objective_metric_series='epoch_accuracy',
    # now we decide if we want to maximize it or minimize it (accuracy we maximize)
    objective_metric_sign='max',
    # let us limit the number of concurrent experiments,
    # this in turn will make sure we do dont bombard the scheduler with experiments.
    # if we have an auto-scaler connected, this, by proxy, will limit the number of machine
    max_number_of_concurrent_tasks=2,
    # this is the optimizer class (actually doing the optimization)
    # Currently, we can choose from GridSearch, RandomSearch or OptimizerBOHB (Bayesian optimization Hyper-Band)
    # more are coming soon...
    optimizer_class=aSearchStrategy,
コード例 #2
0
# Example use case:
an_optimizer = HyperParameterOptimizer(
    # This is the experiment we want to optimize
    base_task_id=args['template_task_id'],
    # here we define the hyper-parameters to optimize
    hyper_parameters=[
        UniformIntegerParameterRange('layer_1',
                                     min_value=128,
                                     max_value=512,
                                     step_size=128),
        UniformIntegerParameterRange('layer_2',
                                     min_value=128,
                                     max_value=512,
                                     step_size=128),
        DiscreteParameterRange('batch_size', values=[96, 128, 160]),
        DiscreteParameterRange('epochs', values=[30]),
    ],
    # this is the objective metric we want to maximize/minimize
    objective_metric_title='val_acc',
    objective_metric_series='val_acc',
    # now we decide if we want to maximize it or minimize it (accuracy we maximize)
    objective_metric_sign='max',
    # let us limit the number of concurrent experiments,
    # this in turn will make sure we do dont bombard the scheduler with experiments.
    # if we have an auto-scaler connected, this, by proxy, will limit the number of machine
    max_number_of_concurrent_tasks=2,
    # this is the optimizer class (actually doing the optimization)
    # Currently, we can choose from GridSearch, RandomSearch or OptimizerBOHB (Bayesian optimization Hyper-Band)
    # more are coming soon...
    optimizer_class=Our_SearchStrategy,
コード例 #3
0
ファイル: hp_search.py プロジェクト: thisis-nkul/roc-star
# MODIFY THIS, MAKE SURE TO CHOOSE THE CORRECT TEMPLATE TASK ID
# experiment template to optimize in the hyper-parameter optimization
args = {
    'template_task_id': '8d8ff6167a334ff59c3001c06e996eda',
    'run_as_service': False,
}
args = task.connect(args)

# Example use case:
an_optimizer = HyperParameterOptimizer(
    # This is the experiment we want to optimize
    base_task_id=args['template_task_id'],
    # here we define the hyper-parameters to optimize
    hyper_parameters=[
        DiscreteParameterRange('lstm_units', values=[64, 96, 128]),
        DiscreteParameterRange('dense_hidden_units', values=[512, 1024, 2048]),
        DiscreteParameterRange('use_roc_star', values=[True,False])
    ],
    # this is the objective metric we want to maximize/minimize
    objective_metric_title='Accuracy',
    objective_metric_series='validation accuracy',
    # now we decide if we want to maximize it or minimize it (accuracy we maximize)
    objective_metric_sign='max',
    # let us limit the number of concurrent experiments,
    # this in turn will make sure we do dont bombard the scheduler with experiments.
    # if we have an auto-scaler connected, this, by proxy, will limit the number of machine
    max_number_of_concurrent_tasks=4,
    # this is the optimizer class (actually doing the optimization)
    # Currently, we can choose from GridSearch, RandomSearch or OptimizerBOHB (Bayesian optimization Hyper-Band)
    # more are coming soon...