コード例 #1
0
ファイル: driver.py プロジェクト: prijatelj/shadho
import math

# Import the driver and random search from SHADHO
from shadho import Shadho, spaces


# Define the function to optimize, which returns a single floating-point value
# to optimize on. Hyperparameters are passed in as a dictionary with the
# same structure as `space` below.
def sin(params):
    return math.sin(params['x'])


if __name__ == '__main__':
    # Set up the search space, in this case a uniform distribution over the
    # domain [0, pi]
    space = {'x': spaces.uniform(0, math.pi)}

    # Pass the `sin` function, the search space, and a timeout into the SHADHO
    # driver and configure SHAHDO to run locally.
    opt = Shadho('sin_local_example', sin, space, timeout=30)
    opt.config.manager = 'local'

    # Run SHADHO, and the optimal `x` value will be printed after 30s.
    opt.run()
コード例 #2
0
    )

    dense_layers = spaces.repeat(dense_layer, 3)

    # The full search space is compressed into these two entries: one list
    # of convolutional layers and one list of dense layers.

    search_space = {
        'conv_layers': conv_layers,
        'dense_layers': dense_layers
    }

    # The optimizer is set up as in previous examples.

    opt = Shadho(
        'nas-tutorial',      # The experiment key
        'bash evaluate.sh',  # The command to run on the worker
        search_space,        # The search space
        method='random',     # The sampling method to use
        timeout=120          # The amount of time to run (s)

    # Here we add the files to send to every worker, including the bash
    # script that sets up the environment, the Python training script,
    # and the file containing the dataset.

    opt.add_input_file('evaluate.sh')
    opt.add_input_file('train_cnn.py')
    opt.add_input_file('')

    opt.run()
コード例 #3
0
import math

# Import the driver and random search from SHADHO
from shadho import Shadho, spaces


# Define the function to optimize, which returns a single floating-point value
# to optimize on. Hyperparameters are passed in as a dictionary with the
# same structure as `space` below.
def sin(params):
    return math.sin(params['x'])


if __name__ == '__main__':
    # Set up the search space, in this case a uniform distribution over the
    # domain [0, pi]
    space = {'x': spaces.uniform(0, math.pi)}

    # Pass the `sin` function, the search space, and a timeout into the SHADHO
    # driver and configure SHAHDO to run locally.
    opt = Shadho(sin, space, timeout=30)
    opt.config.manager = 'local'

    # Run SHADHO, and the optimal `x` value will be printed after 30s.
    opt.run()
コード例 #4
0
ファイル: driver.py プロジェクト: prijatelj/shadho_helper
    # Set up the SHADHO driver like usual
    if args.pyrameter_model_sort in ['uniform_random', 'perceptron']:
        use_complexity = False
        use_priority = False
    else:
        use_complexity = True
        use_priority = True

    opt = Shadho(
        'bash svm_task.sh',
        space,
        use_complexity=use_complexity,
        use_priority=use_priority,
        timeout=args.timeout,
        backend=args.result_file,
        update_frequency=args.update_frequency,
        checkpoint_frequency=args.checkpoint_frequency,
        model_sort=args.model_sort,
        init_model_sort=args.init_model_sort,
        pyrameter_model_sort=args.pyrameter_model_sort,
    )
    #opt = Shadho('bash svm_task.sh', space, timeout=args.timeout, backend=args.result_file)
    # TODO implement the frequency and model sort arguements throughout pyrameter and shadho
    opt.config.workqueue.name = args.master_name
    opt.config.workqueue.port = 0

    # Add the task files to the optimizer
    opt.add_input_file('svm_task.sh')
    opt.add_input_file('svm.py')
    opt.add_input_file('mnist.npz')
コード例 #5
0
ファイル: driver.py プロジェクト: prijatelj/shadho
"""This script shows an example of setting up a distributed Shadho search."""
import math

from shadho import Shadho, spaces

if __name__ == '__main__':
    # Set up the search space for sin(x)
    space = {'x': spaces.uniform(0, math.pi)}

    # Create a SHADHO driver. Unlike the local example, distributed SHADHO
    # requires a shell command to run on the worker.
    opt = Shadho('sin_distributed_example',
                 'bash ./sin_task.sh',
                 space,
                 timeout=60)

    # Add the files necessary to run the task
    opt.add_input_file('sin_task.sh')
    opt.add_input_file('sin.py')

    # Optionally, provide a name for the Work Queue master that tracks the
    # distributed workers.
    opt.config.workqueue.name = 'shadho_sin_ex'

    # Run the search, and the optimal observed value will be output after 60s.
    opt.run()
コード例 #6
0
optimizers = spaces.scope(
    exclusive=True
    sgd=spaces.scope(
        lr=spaces.log10_uniform(-4, -1),
        momentum=spaces.uniform(0, 1),
        decay=spaces.log10_uniform(-4, -1)),
    rmsprop='rmsprop',
    adagrad='adagrad',
    adadelta='adadelta',
    adam='adam',
    adamax='adamax',
    nadam='nadam')

# Set up the full search space over the U-Net down- and upsampling blocks
space = spaces.scope(
    optimizer=optimizers,
    min_filters=spaces.log2_randint(5, 8),
    down1=spaces.scope(conv1=conv, conv2=conv),
    down2=spaces.scope(conv1=conv, conv2=conv),
    down3=spaces.scope(conv1=conv, conv2=conv),
    down4=spaces.scope(conv1=conv, conv2=conv),
    up1=spaces.scope(conv1=conv, conv2=conv),
    up2=spaces.scope(conv1=conv, conv2=conv),
    up3=spaces.scope(conv1=conv, conv2=conv),
    up4=spaces.scope(conv1=conv, conv2=conv),
    out=spaces.scope(conv1=conv, conv2=conv))


if __name__ == '__main__':
    opt = Shadho()
コード例 #7
0
ファイル: benchmarks.py プロジェクト: jeffkinnison/shadho
def driver(benchmark,
           dataset,
           exp_key,
           method,
           inner_method='random',
           timeout=600,
           max_tasks=500,
           seed=None):
    """Run an HPOBench benchmark through a SHADHO optimizer.
    Parameters
    ----------
    benchmark : {}  PUT THE BENCHMARK NAMES HERE
        The name of the HPOBench benchmark to run.
    dataset : {} PUT THE DATASET NAMES HERE
        The name of the HPOBench dataset to use.
    exp_key : str
        Name of the session provided to the driver and workers.
    method : str or `pyrameter.methods.Method`
        The optimization method to use.
    inner_method : str or `pyrameter.methods.Method`, optional
        The inner optimization method to use in a bilevel optimization.
        Ignored if ``method`` is not bilevel or is an instance of
        `pyrameter.methods.BilevelMethod`.
    timeout : int
        The amount of time to run the search in seconds. Default 600.
    max_tasks : int
        The maximum number of hyperparameter sets to evaluate. Default: 500.
    seed : int, optional
        The random seed to apply to SHADHO and HPOBench. If not supplied, uses
        the default RNG protocol for each.
    """
    # Grab the benchmark object here with importlib
    b = BENCHMARKS[benchmark](task_id=dataset, rng=seed)
    obj = functools.partial(run_benchmark, b)

    # Grab the configuration space here
    config = b.get_configuration_space(seed=seed)

    # Convert the HPOBench config to a SHADHO search space
    space = convert_config_to_shadho(config)

    # Create the SHADHO object
    if isinstance(method, str):
        try:
            if re.search('^(ncqs|hom)', method):
                method = METHODS[method](METHODS[inner_method]())
            else:
                method = METHODS[method]()
        except KeyError:
            raise ValueError(
                f'Invalid optimization method {method} requested. ' + \
                f'Re-run with one of {list(METHODS.keys())}'
            )
    opt = Shadho(exp_key,
                 obj,
                 space,
                 method=method,
                 timeout=timeout,
                 max_tasks=max_tasks,
                 seed=seed)

    # Run the SHADHO search
    opt.run()
コード例 #8
0

if __name__ == '__main__':
    # We set up the search space for the objective with two domains:
    #    x: a continuous uniform distribution over [0, pi]
    #    y: a discrete set of 1000 evenly-spaced numbers in [0, pi]
    #
    # Note that the dictionary passed to the objective retains the structure
    # defined here.
    search_space = {
        'x': spaces.uniform(0, 2 * math.pi),
        'y': spaces.choice(list(np.linspace(0, 2 * math.pi, 1000)))
    }

    # We next set up the optimizer, which will attempt to minimize the
    # objective locally. It takes an experiment key, the objective function,
    # the search space, a search method, and a timeout.

    opt = Shadho(
        'convex-tutorial',  # Name of this experiment
        objective,  # The function to optimize
        search_space,  # The search space to sample
        method=
        'random',  # The sampling method, one of 'random', 'bayes', 'tpe', 'smac'
        timeout=30  # The time to run the search, in seconds.
    )

    # We then run the optimization, and SHADHO records the results.
    # Results are written to 'results.json'.
    opt.run()
コード例 #9
0
            'kernel': 'sigmoid',  # add the kernel name for convenience
            'C': C,
            'gamma': gamma,
            'coef0': coef0
        },
        'poly': {
            'kernel': 'poly',  # add the kernel name for convenience
            'C': C,
            'gamma': gamma,
            'coef0': coef0,
            'degree': spaces.randint(2, 15)
        },
    }

    # Set up the SHADHO driver like usual
    opt = Shadho('bash svm_task.sh', space, timeout=600)
    opt.config.workqueue.name = 'shadho_svm_ex'

    # Add the task files to the optimizer
    opt.add_input_file('svm_task.sh')
    opt.add_input_file('svm.py')
    opt.add_input_file('mnist.npz')

    # We can divide the work over different compute classes, or sets of workers
    # with commmon hardware resources, if such resources are available. SHADHO
    # will attempt to divide work across hardware in a way that balances the
    # search.
    # For example, in a cluster with 20 16-core, 25 8-core, and 50 4-core
    # nodes, we can specify:
    # opt.add_compute_class('16-core', 'cores', 16, max_tasks=20)
    # opt.add_compute_class('8-core', 'cores', 8, max_tasks=25)
コード例 #10
0
            'kernel': 'sigmoid',  # add the kernel name for convenience
            'C': C,
            'gamma': gamma,
            'coef0': coef0
        },
        'poly': {
            'kernel': 'poly',  # add the kernel name for convenience
            'C': C,
            'gamma': gamma,
            'coef0': coef0,
            'degree': spaces.randint(2, 15)
        },
    }

    # Set up the SHADHO driver like usual
    opt = Shadho('shadho_svm_example', 'bash svm_task.sh', space, timeout=3600)
    opt.config.workqueue.name = 'shadho_svm_ex'

    # Add the task files to the optimizer
    opt.add_input_file('svm_task.sh')
    opt.add_input_file('svm.py')
    opt.add_input_file('mnist.npz')

    # We can divide the work over different compute classes, or sets of workers
    # with commmon hardware resources, if such resources are available. SHADHO
    # will attempt to divide work across hardware in a way that balances the
    # search.
    # For example, in a cluster with 20 16-core, 25 8-core, and 50 4-core
    # nodes, we can specify:
    # opt.add_compute_class('16-core', 'cores', 16, max_tasks=20)
    # opt.add_compute_class('8-core', 'cores', 8, max_tasks=25)
コード例 #11
0
"""This script shows an example of setting up a distributed Shadho search."""
import math

from shadho import Shadho, spaces


if __name__ == '__main__':
    # Set up the search space for sin(x)
    space = {'x': spaces.uniform(0, math.pi)}

    # Create a SHADHO driver. Unlike the local example, distributed SHADHO
    # requires a shell command to run on the worker.
    opt = Shadho('bash ./sin_task.sh', space, timeout=60)

    # Add the files necessary to run the task
    opt.add_input_file('sin_task.sh')
    opt.add_input_file('sin.py')

    # Optionally, provide a name for the Work Queue master that tracks the
    # distributed workers.
    opt.config.workqueue.name = 'shadho_sin_ex'

    # Run the search, and the optimal observed value will be output after 60s.
    opt.run()
コード例 #12
0
        'coef0': coef0,
    },
    'poly': {
        'kernel': 'poly',
        'C': C,
        'gamma': gamma,
        'coef0': coef0,
        'degree': degree,
    },
}

files = ['svm.py',
         'svm.sh',
         'mnist.npz']


if __name__ == "__main__":
    #start = time.time()
    opt = Shadho('./svm.sh', spec, use_priority=True, use_complexity=True, timeout=3600, max_tasks=150, max_resubmissions=3)
    
    for i in files:
        opt.add_input_file(i)
    
    #opt.add_compute_class('smp16', 'cores', 16, max_tasks=50)
    #opt.add_compute_class('smp8', 'cores', 8, max_tasks=50)
    #opt.add_compute_class('smp4', 'cores', 4, max_tasks=50)
    
    opt.run()
    #with open('timing.log', 'w') as f:
    #    f.write(str(start) + ',' + str(time.time() - start))
コード例 #13
0
"""This script runs the hyperparameter search on remote workers.
"""

# These are imported, same as before
from shadho import Shadho, spaces

import math

# The space is also defined exactly the same.
space = {'x': spaces.uniform(0.0, 2.0 * math.pi)}

if __name__ == '__main__':
    # This time, instead of configuring shadho to run locally,
    # we direct it to the input files that run the optimization task.

    # Instead of the objective function, shadho is given a command that gets
    # run on the remote worker.
    opt = Shadho('shadho-wq-packaging-test',
                 'bash run_sin.sh',
                 space,
                 timeout=60)

    # Two input files are also added: the first is run directly by the worker
    # and can be used to set up your runtime environment (module load, anyone?)
    # The second is the script we're trying to optimize.
    opt.add_input_file('run_sin.sh')
    opt.add_input_file('sin.py')
    opt.run()