Пример #1
0
    )

    dense_layers = spaces.repeat(dense_layer, 3)

    # The full search space is compressed into these two entries: one list
    # of convolutional layers and one list of dense layers.

    search_space = {
        'conv_layers': conv_layers,
        'dense_layers': dense_layers
    }

    # The optimizer is set up as in previous examples.

    opt = Shadho(
        'nas-tutorial',      # The experiment key
        'bash evaluate.sh',  # The command to run on the worker
        search_space,        # The search space
        method='random',     # The sampling method to use
        timeout=120          # The amount of time to run (s)

    # Here we add the files to send to every worker, including the bash
    # script that sets up the environment, the Python training script,
    # and the file containing the dataset.

    opt.add_input_file('evaluate.sh')
    opt.add_input_file('train_cnn.py')
    opt.add_input_file('')

    opt.run()
Пример #2
0
        use_priority=use_priority,
        timeout=args.timeout,
        backend=args.result_file,
        update_frequency=args.update_frequency,
        checkpoint_frequency=args.checkpoint_frequency,
        model_sort=args.model_sort,
        init_model_sort=args.init_model_sort,
        pyrameter_model_sort=args.pyrameter_model_sort,
    )
    #opt = Shadho('bash svm_task.sh', space, timeout=args.timeout, backend=args.result_file)
    # TODO implement the frequency and model sort arguements throughout pyrameter and shadho
    opt.config.workqueue.name = args.master_name
    opt.config.workqueue.port = 0

    # Add the task files to the optimizer
    opt.add_input_file('svm_task.sh')
    opt.add_input_file('svm.py')
    opt.add_input_file('mnist.npz')

    # We can divide the work over different compute classes, or sets of workers
    # with commmon hardware resources, if such resources are available. SHADHO
    # will attempt to divide work across hardware in a way that balances the
    # search.
    # For example, in a cluster with 20 16-core, 25 8-core, and 50 4-core
    # nodes, we can specify:
    opt.add_compute_class('16-core', 'cores', 16, max_tasks=20)
    opt.add_compute_class('8-core', 'cores', 8, max_tasks=20)
    opt.add_compute_class('4-core', 'cores', 4, max_tasks=20)
    #opt.add_compute_class('2-core', 'cores', 2, max_tasks=20)

    opt.run()
Пример #3
0
"""This script shows an example of setting up a distributed Shadho search."""
import math

from shadho import Shadho, spaces

if __name__ == '__main__':
    # Set up the search space for sin(x)
    space = {'x': spaces.uniform(0, math.pi)}

    # Create a SHADHO driver. Unlike the local example, distributed SHADHO
    # requires a shell command to run on the worker.
    opt = Shadho('sin_distributed_example',
                 'bash ./sin_task.sh',
                 space,
                 timeout=60)

    # Add the files necessary to run the task
    opt.add_input_file('sin_task.sh')
    opt.add_input_file('sin.py')

    # Optionally, provide a name for the Work Queue master that tracks the
    # distributed workers.
    opt.config.workqueue.name = 'shadho_sin_ex'

    # Run the search, and the optimal observed value will be output after 60s.
    opt.run()
Пример #4
0
            'coef0': coef0,
            'degree': degree
        },
        'exclusive': True  # Tells SHADHO to sample from one kernel at a time
    }

    # The optimizer is set up as in previous examples.

    opt = Shadho(
        'svm-tutorial',  # The experiment key
        'bash evaluate.sh',  # The command to run on the worker
        search_space,  # The search space
        method='random',  # The sampling method to use
        timeout=120  # The amount of time to run (s)
    )

    # Here we add the files to send to every worker, including the bash
    # script that sets up the environment, the Python training script,
    # and the file containing the dataset.
    opt.add_input_file('evaluate.sh')
    opt.add_input_file('train_svm.py')
    opt.add_input_file('mnist.npz')

    # We can also add compute classes, groups of expected workers with
    # similar available hardware.
    # opt.add_compute_class('16-core', 'cores', 16)
    # opt.add_compute_class('8-core', 'cores', 8)
    # opt.add_compute_class('4-core', 'cores', 4)

    opt.run()
Пример #5
0
        'coef0': coef0,
    },
    'poly': {
        'kernel': 'poly',
        'C': C,
        'gamma': gamma,
        'coef0': coef0,
        'degree': degree,
    },
}

files = ['svm.py',
         'svm.sh',
         'mnist.npz']


if __name__ == "__main__":
    #start = time.time()
    opt = Shadho('./svm.sh', spec, use_priority=True, use_complexity=True, timeout=3600, max_tasks=150, max_resubmissions=3)
    
    for i in files:
        opt.add_input_file(i)
    
    #opt.add_compute_class('smp16', 'cores', 16, max_tasks=50)
    #opt.add_compute_class('smp8', 'cores', 8, max_tasks=50)
    #opt.add_compute_class('smp4', 'cores', 4, max_tasks=50)
    
    opt.run()
    #with open('timing.log', 'w') as f:
    #    f.write(str(start) + ',' + str(time.time() - start))
Пример #6
0
"""This script runs the hyperparameter search on remote workers.
"""

# These are imported, same as before
from shadho import Shadho, spaces

import math

# The space is also defined exactly the same.
space = {'x': spaces.uniform(0.0, 2.0 * math.pi)}

if __name__ == '__main__':
    # This time, instead of configuring shadho to run locally,
    # we direct it to the input files that run the optimization task.

    # Instead of the objective function, shadho is given a command that gets
    # run on the remote worker.
    opt = Shadho('shadho-wq-packaging-test',
                 'bash run_sin.sh',
                 space,
                 timeout=60)

    # Two input files are also added: the first is run directly by the worker
    # and can be used to set up your runtime environment (module load, anyone?)
    # The second is the script we're trying to optimize.
    opt.add_input_file('run_sin.sh')
    opt.add_input_file('sin.py')
    opt.run()