Example #1
0
def main(benchmark_name, dataset_name, dimensions, method_name, num_runs,
         run_start, num_iterations, eta, min_budget, max_budget, input_dir,
         output_dir):

    benchmark = make_benchmark(benchmark_name,
                               dimensions=dimensions,
                               dataset_name=dataset_name,
                               input_dir=input_dir)
    name = make_name(benchmark_name,
                     dimensions=dimensions,
                     dataset_name=dataset_name)

    output_path = Path(output_dir).joinpath(name, method_name)
    output_path.mkdir(parents=True, exist_ok=True)

    options = dict(eta=eta, min_budget=min_budget, max_budget=max_budget)
    with output_path.joinpath("options.yaml").open('w') as f:
        yaml.dump(options, f)

    for run_id in range(run_start, num_runs):

        NS = hpns.NameServer(run_id=run_id, host='localhost', port=0)
        ns_host, ns_port = NS.start()

        num_workers = 1

        workers = []
        for worker_id in range(num_workers):
            w = BenchmarkWorker(benchmark=benchmark,
                                nameserver=ns_host,
                                nameserver_port=ns_port,
                                run_id=run_id,
                                id=worker_id)
            w.run(background=True)
            workers.append(w)

        rs = RandomSearch(configspace=benchmark.get_config_space(),
                          run_id=run_id,
                          nameserver=ns_host,
                          nameserver_port=ns_port,
                          ping_interval=10,
                          **options)

        results = rs.run(num_iterations, min_n_workers=num_workers)

        rs.shutdown(shutdown_workers=True)
        NS.shutdown()

        data = HpBandSterLogs(results).to_frame()
        data.to_csv(output_path.joinpath(f"{run_id:03d}.csv"))

    return 0
def get_parameters(train_data, kFold, iterations, save=False, filepath = './result/loss_time_rs.csv'):
    parser = argparse.ArgumentParser(description='Example 1 - sequential and local execution.')
    parser.add_argument('--min_budget', type=float, help='Minimum budget used during the optimization.', default=1)
    parser.add_argument('--max_budget', type=float, help='Maximum budget used during the optimization.', default=1)
    parser.add_argument('--n_iterations', type=int, help='Number of iterations performed by the optimizer', default=iterations) # max value = 4
    # parser.add_argument('--worker', help='Flag to turn this into a worker process', action='store_true')
    parser.add_argument('--shared_directory', type=str,help='A directory that is accessible for all processes, e.g. a NFS share.', default='./result')
    # parser.add_argument('--nic_name', type=str, default='lo')
    args = parser.parse_args()

    result_logger = hpres.json_result_logger(directory=args.shared_directory, overwrite=True)

    NS = hpns.NameServer(run_id='RandomSearch', host='127.0.0.1', port=None)
    NS.start()

    w = worker(train_data, kFold, nameserver='127.0.0.1', run_id='RandomSearch')
    w.run(background=True)

    randomSearch = RandomSearch(configspace=w.get_configspace(),
                run_id='RandomSearch', nameserver='127.0.0.1',
                eta=3,
                min_budget=args.min_budget, max_budget=args.max_budget,
                result_logger=result_logger
                )
    res = randomSearch.run(n_iterations=args.n_iterations)

    randomSearch.shutdown(shutdown_workers=True)
    NS.shutdown()

    id2config = res.get_id2config_mapping()
    incumbent = res.get_incumbent_id()
    info = res.get_runs_by_id(incumbent)


    parameter = id2config[incumbent]['config']
    min_error = info[0]['loss']

    if save:
        all_info = res.get_all_runs()
        timepoint_dic = []
        loss_dic = []

        for i in all_info:
            timepoint_dic.append(i['time_stamps']['finished'])
            loss_dic.append(i['loss'])

        save_to_csv.save(filepath, timepoint_dic, loss_dic)

    return parameter, min_error
Example #3
0
# Step 2: Start a worker
# Now we can instantiate a worker, providing the mandatory information
# Besides the sleep_interval, we need to define the nameserver information and
# the same run_id as above. After that, we can start the worker in the background,
# where it will wait for incoming configurations to evaluate.
w = MyWorker(nameserver='127.0.0.1', run_id='example1')
w.run(background=True)

# Step 3: Run an optimizer
# Now we can create an optimizer object and start the run.
# Here, we run RandomSearch, but that is not essential.
# The run method will return the `Result` that contains all runs performed.

rs = RandomSearch(configspace=w.get_configspace(),
                  run_id='example1', nameserver='127.0.0.1',
                  min_budget=int(args.budget), max_budget=int(args.budget))
res = rs.run(n_iterations=args.n_iterations)

# Step 4: Shutdown
# After the optimizer run, we must shutdown the master and the nameserver.
rs.shutdown(shutdown_workers=True)
NS.shutdown()

# Step 5: Analysis
# Each optimizer returns a hpbandster.core.result.Result object.
# It holds information about the optimization run like the incumbent (=best) configuration.
# For further details about the Result object, see its documentation.
# Here we simply print out the best config and some statistics about the performed runs.
id2config = res.get_id2config_mapping()
incumbent = res.get_incumbent_id()
Example #4
0
else:
    isKeras = False

# Step 3: Run an optimizer
result_logger = hpres.json_result_logger(directory='.',
                                         overwrite=True)
if not isKeras:
    if OPTIMIZER == 'BOHB':
        optimizer = BOHB(configspace=worker.get_warmstart_configspace(), run_id='example1',
                         nameserver='127.0.0.1', min_budget=10, max_budget=10, eta=3.0,
                         result_logger=result_logger)
        res = optimizer.run(n_iterations=1)

    elif OPTIMIZER == 'RandomSearch':
        optimizer = RandomSearch(configspace=worker.get_configspace(), run_id='example1',
                                 nameserver='127.0.0.1', min_budget=1, max_budget=9, eta=3.0,
                                 result_logger=result_logger)
        res = optimizer.run(n_iterations=10)
else:
    if OPTIMIZER == 'BOHB':
        optimizer = BOHB(configspace=worker.get_configspace(), run_id='example1',
                         nameserver='127.0.0.1', min_budget=3, max_budget=100, eta=3.0,
                         result_logger=result_logger)
        res = optimizer.run(n_iterations=5)
    elif OPTIMIZER == 'RandomSearch':
        optimizer = RandomSearch(configspace=worker.get_configspace(), run_id='example1',
                                 nameserver='127.0.0.1', min_budget=3, max_budget=100, eta=3.0,
                                 result_logger=result_logger)
        res = optimizer.run(n_iterations=5)

# Step 4: Shutdown
Example #5
0
def run_opt(args):
    # import PyTorchWorker as worker

    # Every process has to lookup the hostname
    host = hpns.nic_name_to_host(args.nic_name)

    result_logger = hpres.json_result_logger(directory=args.shared_directory,
                                             overwrite=True)

    # Start a nameserver:
    NS = hpns.NameServer(run_id=args.run_id,
                         host=host,
                         port=0,
                         working_directory=args.shared_directory)
    ns_host, ns_port = NS.start()

    # Start local worker
    w = worker(run_id=args.run_id,
               host=host,
               nameserver=ns_host,
               nameserver_port=ns_port,
               timeout=120)
    w.run(background=True)

    if args.method == "BOHB":
        print("[RUNNER] method: BOHB")
        opt = BOHB(
            configspace=worker.get_configspace(),
            run_id=args.run_id,
            host=host,
            nameserver=ns_host,
            nameserver_port=ns_port,
            result_logger=result_logger,
            min_budget=args.min_budget,
            max_budget=args.max_budget,
        )
    elif args.method == "random":
        print("[RUNNER] method: random")
        opt = RandomSearch(
            configspace=worker.get_configspace(),
            run_id=args.run_id,
            host=host,
            nameserver=ns_host,
            nameserver_port=ns_port,
            result_logger=result_logger,
            min_budget=args.min_budget,
            max_budget=args.max_budget,
        )
    elif args.method == "BO":
        print("[RUNNER] method: BO")
        opt = BO_Search(
            configspace=worker.get_configspace(),
            run_id=args.run_id,
            host=host,
            nameserver=ns_host,
            nameserver_port=ns_port,
            result_logger=result_logger,
            min_budget=args.min_budget,
            max_budget=args.max_budget,
        )

    elif args.method == "HB":
        opt = HyperBand(
            configspace=worker.get_configspace(),
            run_id=args.run_id,
            host=host,
            nameserver=ns_host,
            nameserver_port=ns_port,
            result_logger=result_logger,
            min_budget=args.min_budget,
            max_budget=args.max_budget,
        )

    res = opt.run(n_iterations=args.n_iterations)

    # store results
    with open(os.path.join(args.shared_directory, 'results.pkl'), 'wb') as fh:
        pickle.dump(res, fh)

    # shutdown
    opt.shutdown(shutdown_workers=True)
    NS.shutdown()
Example #6
0
    # Continue previous runs
    if args.previous_run_dir:
        previous_run = hpres.logged_results_to_HBS_result(
            args.previous_run_dir)
    else:
        previous_run = None

    # Random search

    if hpo_method == 'rs':
        hpo_worker = RS(configspace=worker.get_configspace(
            args.default_config, args.test_mode, args.leaf, args.lr,
            args.tree),
                        run_id=hpo_run_id,
                        nameserver=ns_host,
                        nameserver_port=ns_port,
                        result_logger=result_logger,
                        min_budget=args.max_budget,
                        max_budget=args.max_budget,
                        previous_result=previous_run)
        res = hpo_worker.run(n_iterations=args.n_iterations,
                             min_n_workers=args.n_workers)

        # store results

    elif hpo_method == 'bohb':
        hpo_worker = BOHB(configspace=worker.get_configspace(
            args.default_config, args.test_mode, args.leaf, args.lr,
            args.tree),
                          run_id=hpo_run_id,
                          nameserver=ns_host,
Example #7
0
    # Step 2: Start a worker
    # Now we can instantiate a worker, providing the mandatory information
    # Besides the sleep_interval, we need to define the nameserver information and
    # the same run_id as above. After that, we can start the worker in the background,
    # where it will wait for incoming configurations to evaluate.
    w = MyWorker(nameserver='127.0.0.1', run_id='example1')
    w.run(background=True)

    # Step 3: Run an optimizer
    # Now we can create an optimizer object and start the run.
    # Here, we run RandomSearch, but that is not essential.
    # The run method will return the `Result` that contains all runs performed.

    rs = RandomSearch(configspace=w.get_configspace(exercise),
                      run_id='example1',
                      nameserver='127.0.0.1',
                      min_budget=int(budget),
                      max_budget=int(budget),
                      result_logger=logger)
    res = rs.run(n_iterations=iterations)

    # Step 4: Shutdown
    # After the optimizer run, we must shutdown the master and the nameserver.
    rs.shutdown(shutdown_workers=True)

    NS.shutdown()

    # Step 5: Analysis
    # Each optimizer returns a hpbandster.core.result.Result object.
    # It holds information about the optimization run like the incumbent (=best) configuration.
    # For further details about the Result object, see its documentation.
    # Here we simply print out the best config and some statistics about the performed runs.