Example #1
0
def runBOHB(cfg):
    run_id = "0"

    # assign random port in the 30000-40000 range to avoid using a blocked port because of a previous improper bohb shutdown
    port = int(30000 + random.random() * 10000)

    ns = hpns.NameServer(run_id=run_id, host="127.0.0.1", port=port)
    ns.start()

    w = BOHBWorker(cfg=cfg,
                   nameserver="127.0.0.1",
                   run_id=run_id,
                   nameserver_port=port)
    w.run(background=True)

    result_logger = hpres.json_result_logger(directory=cfg["bohb_log_dir"],
                                             overwrite=True)

    bohb = BOHB(
        configspace=get_configspace(cfg['model']),
        run_id=run_id,
        min_budget=cfg["bohb_min_budget"],
        max_budget=cfg["bohb_max_budget"],
        eta=cfg["bohb_eta"],
        nameserver="127.0.0.1",
        nameserver_port=port,
        result_logger=result_logger,
    )

    res = bohb.run(n_iterations=cfg["bohb_iterations"])
    bohb.shutdown(shutdown_workers=True)
    ns.shutdown()

    return res
Example #2
0
def optimize(cfg):
    logger = logging.getLogger(__name__)
    
    NS = hpns.NameServer(run_id=cfg.bohb.run_id, host=cfg.bohb.nameserver, port=None)
    NS.start()

    w = SAC_Worker(cfg.worker, nameserver=cfg.bohb.nameserver, run_id=cfg.bohb.run_id)
    w.run(background=True)

    bohb = BOHB(  configspace = w.get_configspace(),
                run_id = cfg.bohb.run_id, nameserver=cfg.bohb.nameserver,
                min_budget=cfg.bohb.min_budget, max_budget=cfg.bohb.max_budget )
            
    res = bohb.run(n_iterations=cfg.bohb.n_iterations)

    bohb.shutdown(shutdown_workers=True)
    NS.shutdown()

    id2config = res.get_id2config_mapping()
    incumbent = res.get_incumbent_id()

    # Store optimization results
    if not os.path.exists("optimization_results/"): 
        os.makedirs("optimization_results/")
    with open(os.path.join("optimization_results/", "%s.pkl" % cfg.bohb.run_id), 'wb') as fh:
        pickle.dump(res, fh)
    
    logger.info('Best found configuration: %s' % id2config[incumbent]['config'])
    logger.info('A total of %i unique configurations where sampled.' % len(id2config.keys()))
    logger.info('A total of %i runs where executed.' % len(res.get_all_runs()))
    logger.info('Total budget corresponds to %.1f full function evaluations.'%(sum([r.budget for r in res.get_all_runs()])/cfg.bohb.max_budget))
Example #3
0
def run_master(args):
    NS = hpns.NameServer(run_id=args.run_id,
                         nic_name=args.nic_name,
                         working_directory=args.bohb_root_path)
    ns_host, ns_port = NS.start()

    # Start a background worker for the master node
    if args.optimize_generalist:
        w = AggregateWorker(run_id=args.run_id,
                            host=ns_host,
                            nameserver=ns_host,
                            nameserver_port=ns_port,
                            working_directory=args.bohb_root_path,
                            n_repeat=args.n_repeat,
                            has_repeats_as_budget=args.n_repeat is None,
                            time_budget=args.time_budget,
                            time_budget_approx=args.time_budget_approx,
                            performance_matrix=args.performance_matrix)
    else:
        w = SingleWorker(run_id=args.run_id,
                         host=ns_host,
                         nameserver=ns_host,
                         nameserver_port=ns_port,
                         working_directory=args.bohb_root_path,
                         n_repeat=args.n_repeat,
                         dataset=args.dataset,
                         time_budget=args.time_budget,
                         time_budget_approx=args.time_budget_approx)
    w.run(background=True)

    # Create an optimizer
    result_logger = hpres.json_result_logger(directory=args.bohb_root_path,
                                             overwrite=False)

    if args.previous_run_dir is not None:
        previous_result = hpres.logged_results_to_HBS_result(
            args.previous_run_dir)
    else:
        pervious_result = None

    logger = logging.getLogger(__file__)
    logging_level = getattr(logging, args.logger_level)
    logger.setLevel(logging_level)

    optimizer = BOHB(configspace=get_configspace(),
                     run_id=args.run_id,
                     host=ns_host,
                     nameserver=ns_host,
                     nameserver_port=ns_port,
                     min_budget=args.n_repeat_lower_budget,
                     max_budget=args.n_repeat_upper_budget,
                     result_logger=result_logger,
                     logger=logger,
                     previous_result=previous_result)

    res = optimizer.run(n_iterations=args.n_iterations)

    # Shutdown
    optimizer.shutdown(shutdown_workers=True)
    NS.shutdown()
Example #4
0
def run_bohb(exp_name, log_dir='EXP', iterations=20):
    
    run_dir = 'bohb-{}-{}'.format(log_dir, exp_name)
    if not os.path.exists(run_dir):
        utils.create_exp_dir(run_dir, scripts_to_save=glob.glob('*.py'))

    # log_format = '%(asctime)s %(message)s'
    # logging.basicConfig(stream=sys.stdout, level=logging.INFO,
    #     format=log_format, datefmt='%m/%d %I:%M:%S %p')
    # fh = logging.FileHandler(os.path.join(run_dir, 'log.txt'))
    # fh.setFormatter(logging.Formatter(log_format))
    # logging.getLogger().addHandler(fh)

    result_logger = hpres.json_result_logger(directory=run_dir, overwrite=True)

    # Start a nameserver
    NS = hpns.NameServer(run_id=exp_name, host='127.0.0.1', port=0)
    ns_host, ns_port = NS.start()

    # Start a localserver
    worker = TorchWorker(run_id=exp_name, host='127.0.0.1', nameserver=ns_host, nameserver_port=ns_port,
                        timeout=120, run_dir=run_dir)
    worker.run(background=True)

    # Initialise optimiser
    bohb = BOHB(configspace=worker.get_configspace(),
                run_id=exp_name,
                host='127.0.0.1',
                nameserver=ns_host,
                nameserver_port=ns_port,
                result_logger=result_logger,
                min_budget=2, max_budget=5,
                )
    print('Worker running')
    res = bohb.run(n_iterations=iterations)
    # Store the results
    with open(os.path.join(run_dir, 'result.pkl'), 'wb') as file:
        pickle.dump(res, file)
    
    # Shutdown
    bohb.shutdown(shutdown_workers=True)
    NS.shutdown()

    # get all runs
    all_runs = res.get_all_runs()

    # get id to configuration mapping as dictionary
    id2conf = res.get_id2config_mapping()

    # get best/incubent run
    best_run = res.get_incumbent_id()
    best_config = id2conf[best_run]['config']
    
    print(f"Best run id:{best_run}, \n Config:{best_config}")

    # Store all run info
    file = open(os.path.join(run_dir, 'summary.txt'), 'w')
    file.write(f"{all_runs}")
    file.close()
Example #5
0
    def start(self):
        # type: () -> ()
        """
        Start the Optimizer controller function loop()
        If the calling process is stopped, the controller will stop as well.

        .. important::
            This function returns only after optimization is completed or :meth:`stop` was called.

        """
        # Step 1: Start a NameServer
        fake_run_id = 'OptimizerBOHB_{}'.format(time())
        # default port is 9090, we must have one, this is how BOHB workers communicate (even locally)
        self._namespace = hpns.NameServer(run_id=fake_run_id,
                                          host='127.0.0.1',
                                          port=self._nameserver_port)
        self._namespace.start()

        # we have to scale the budget to the iterations per job, otherwise numbers might be too high
        budget_iteration_scale = self._max_iteration_per_job

        # Step 2: Start the workers
        workers = []
        for i in range(self._num_concurrent_workers):
            w = _TrainsBandsterWorker(
                optimizer=self,
                sleep_interval=int(self.pool_period_minutes * 60),
                budget_iteration_scale=budget_iteration_scale,
                base_task_id=self._base_task_id,
                objective=self._objective_metric,
                queue_name=self._execution_queue,
                nameserver='127.0.0.1',
                nameserver_port=self._nameserver_port,
                run_id=fake_run_id,
                id=i)
            w.run(background=True)
            workers.append(w)

        # Step 3: Run an optimizer
        self._bohb = BOHB(
            configspace=self._convert_hyper_parameters_to_cs(),
            run_id=fake_run_id,
            # num_samples=self.total_max_jobs, # will be set by self._bohb_kwargs
            min_budget=float(self._min_iteration_per_job) /
            float(self._max_iteration_per_job),
            **self._bohb_kwargs)
        # scale the budget according to the successive halving iterations
        if self.budget.jobs.limit:
            self.budget.jobs.limit *= len(self._bohb.budgets)
        if self.budget.iterations.limit:
            self.budget.iterations.limit *= len(self._bohb.budgets)
        # start optimization
        self._res = self._bohb.run(n_iterations=self.total_max_jobs,
                                   min_n_workers=self._num_concurrent_workers)

        # Step 4: if we get here, Shutdown
        self.stop()
Example #6
0
def get_parameters(train_data, kFold, iterations, save=False, filepath = './result/loss_time_bohb.csv'):
    parser = argparse.ArgumentParser(description='Example 1 - sequential and local execution.')
    parser.add_argument('--min_budget', type=float, help='Minimum budget used during the optimization.', default=1)
    parser.add_argument('--max_budget', type=float, help='Maximum budget used during the optimization.', default=1)
    parser.add_argument('--n_iterations', type=int, help='Number of iterations performed by the optimizer', default=iterations) # max value = 4
    # parser.add_argument('--worker', help='Flag to turn this into a worker process', action='store_true')
    parser.add_argument('--shared_directory', type=str,help='A directory that is accessible for all processes, e.g. a NFS share.', default='./result')
    # parser.add_argument('--nic_name', type=str, default='lo')
    args = parser.parse_args()

    result_logger = hpres.json_result_logger(directory=args.shared_directory, overwrite=True)

    NS = hpns.NameServer(run_id='BOHB', host='127.0.0.1', port=None)
    NS.start()

    w = worker(train_data, kFold, nameserver='127.0.0.1', run_id='BOHB')
    w.run(background=True)


    bohb = BOHB(configspace=w.get_configspace(),
                run_id='BOHB', nameserver='127.0.0.1',
                min_budget=args.min_budget, max_budget=args.max_budget,
                result_logger=result_logger
                )
    res = bohb.run(n_iterations=args.n_iterations)

    bohb.shutdown(shutdown_workers=True)
    NS.shutdown()

    id2config = res.get_id2config_mapping()
    incumbent = res.get_incumbent_id()
    info = res.get_runs_by_id(incumbent)

    parameter = id2config[incumbent]['config']
    min_error = info[0]['loss']

    if save:
        all_info = res.get_all_runs()
        timepoint_dic = []
        loss_dic = []

        for i in all_info:
            timepoint_dic.append(i['time_stamps']['finished'])
            loss_dic.append(i['loss'])

        save_to_csv.save(filepath, timepoint_dic, loss_dic)


    return parameter, min_error
Example #7
0
def generate_bohb_data():
    import warnings
    import hpbandster.core.nameserver as hpns
    import hpbandster.core.result as hpres
    from hpbandster.optimizers import BOHB as BOHB

    run_id = '0'  # Every run has to have a unique (at runtime) id.
    NS = hpns.NameServer(run_id=run_id, host='localhost', port=0)
    ns_host, ns_port = NS.start()

    from neural_opt import MyWorker, get_configspace

    w = MyWorker(
        nameserver=ns_host,
        nameserver_port=ns_port,
        run_id=run_id,  # same as nameserver's
    )
    w.run(background=True)

    # Log the optimization results for later analysis
    result_logger = hpres.json_result_logger(
        directory='test/general_example/results/bohb_full_configspace',
        overwrite=True)

    bohb = BOHB(
        configspace=get_configspace(),
        run_id=run_id,  # same as nameserver's
        eta=2,
        min_budget=5,
        max_budget=100,  # Hyperband parameters
        nameserver=ns_host,
        nameserver_port=ns_port,
        result_logger=result_logger,
    )

    # Then start the optimizer. The n_iterations parameter specifies
    # the number of iterations to be performed in this run
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        res = bohb.run(n_iterations=2)

    # After the run is finished, the services started above need to be shutdown.
    # This ensures that the worker, the nameserver and the master all properly exit
    # and no (daemon) threads keep running afterwards.
    # In particular we shutdown the optimizer (which shuts down all workers) and the nameserver.
    bohb.shutdown(shutdown_workers=True)
    NS.shutdown()
Example #8
0
def select_model(worker, tmnt_config_space, total_iterations, result_logger,
                 id_str, ns_port):
    tmnt_config = TMNTConfig(tmnt_config_space)
    worker.run(background=True)
    cs = tmnt_config.get_configspace()
    config = cs.sample_configuration().get_dictionary()
    logging.info(config)
    bohb = BOHB(configspace=cs,
                run_id=id_str,
                nameserver='127.0.0.1',
                result_logger=result_logger,
                nameserver_port=ns_port,
                min_budget=2,
                max_budget=worker.max_budget)
    res = bohb.run(n_iterations=total_iterations)
    bohb.shutdown(shutdown_workers=True)
    return res
Example #9
0
def start_hpbandster_process(method,
                             configspace,
                             loss,
                             total_budget,
                             max_budget_per_config,
                             eta=3):
    """
    Starts a server and a worker object needed for the HpBandSter optimization process

    :param method: (str) Specifies if we use BOHB or hyperband
    :param configspace: Hyper-parameter search space
    :param loss: Loss function to minimize
    :param total_budget: Total budget (in number of epochs) allowed for optimization
    :param max_budget_per_config: Maximal number of epochs allowed for one config
    :param eta: split size between every steps of successful halving
    :return: NameServer and HpBandSter optimizer
    """

    # Start a nameserver:
    NS = hpns.NameServer(run_id=method)
    ns_host, ns_port = NS.start()

    # Start local worker
    w = MyWorker(run_id=method,
                 nameserver=ns_host,
                 nameserver_port=ns_port,
                 timeout=120,
                 loss_function=loss)
    w.run(background=True)

    if method == 'BOHB':

        optimizer = BOHB(
            configspace=configspace,
            run_id=method,
            nameserver=ns_host,
            nameserver_port=ns_port,
            min_budget=1,
            max_budget=max_budget_per_config,
        )
    else:

        optimizer = HyperBand(
            configspace=configspace,
            run_id=method,
            nameserver=ns_host,
            nameserver_port=ns_port,
            min_budget=1,
            max_budget=max_budget_per_config,
        )

    # We compute the maximal number of iteration to be exact with the original paper
    # (We divide the total budget by the fixed budget per successful halving iteration : (Smax+1)*bmax)
    max_iter = total_budget / (
        int(-1 * (log(1 / max_budget_per_config)) / log(eta) + 1) *
        max_budget_per_config)

    return NS, max_iter, optimizer
Example #10
0
    def optimize(self):
        """
        The main optimization procedure
        Should return the best arm as and array/ list
        :return:

        THe instance to be optimized is the self.objective value
        """
        try:
            if self.algorithm_name == 'BOHB':
                algo = BOHB(
                    configspace=self.worker.get_configspace(),
                    run_id='example1',
                    nameserver='127.0.0.1',
                    min_budget=1,
                    max_budget=1
                    # budget is the number of epochs that will run so it is 1 for min and max
                )
            else:
                algo = HyperBand(
                    configspace=self.worker.get_configspace(),
                    run_id='example1',
                    nameserver='127.0.0.1',
                    min_budget=1,
                    max_budget=1
                    # budget is the number of epochs that will run so it is 1 for min and max
                )

            res = algo.run(n_iterations=self.maxfeval)
            #We shutdown the server
            algo.shutdown(shutdown_workers=True)
            self.NS.shutdown()
            id2config = res.get_id2config_mapping()
            incumbent = res.get_incumbent_id()
            best = id2config[incumbent]['config']
            best_arm = convertToArray(best)
            success = True
        except:
            logger.warning('Optimization for ' + str(self.algorithm_name) +
                           ' failed with cost function: ' +
                           str(self.objective.GetCostFunctionName()))
            best_arm = NAN
            success = False
        return best_arm, success
Example #11
0
def run_optimization(args):
    """Runs the optimization process."""
    print("Starting name server.")
    date_time = datetime.datetime.now().strftime('%Y-%m-%d-%H_%M_%S')

    # First start nameserver
    NS = hpns.NameServer(run_id=date_time, host='127.0.0.1', port=None)
    NS.start()

    print("Preparing result logger and loading previous run, if it exists.")

    # Also start result logger
    output_dir = os.path.join(
        args.output_dir,
        datetime.datetime.now().strftime('%Y_%m_%d--%H_%M_%S'))
    result_logger_path = os.path.join(output_dir, 'results_log.json')
    best_result_path = os.path.join(output_dir, 'best_config.txt')

    print("Result logger will be written to %s" % result_logger_path)
    if os.path.exists(result_logger_path):
        previous_run = hpres.logged_results_to_HBS_result(result_logger_path)
    else:
        previous_run = None

    result_logger = hpres.json_result_logger(directory=output_dir,
                                             overwrite=True)

    print("Starting search worker.\n")

    # Then start worker
    w = SearchWorker(args.data_path,
                     iaa,
                     os.path.join(output_dir, "logging"),
                     nameserver='127.0.0.1',
                     run_id=date_time)
    w.run(background=True)

    print("Initializing optimizer.")
    # Run the optimizer
    bohb = BOHB(configspace=w.get_configspace(),
                run_id=date_time,
                nameserver='127.0.0.1',
                result_logger=result_logger,
                min_budget=args.min_budget,
                max_budget=args.max_budget,
                previous_result=previous_run)

    print("Initialization complete. Starting optimization run.")

    res = bohb.run(n_iterations=args.iterations)

    print("Optimization complete.")
    output_fp = os.path.join(output_dir, 'results.pkl')

    id2config = res.get_id2config_mapping()
    incumbent = res.get_incumbent_id()

    print("Results will be saved at:\n{}".format(output_fp))
    print("Best found configuration: ", id2config[incumbent]['config'])

    Slacker.send_message(
        "AutoML Optimization finished with minimum "
        "budget {}, maximum budget {}, and {} "
        "iterations.\n"
        "Output file has been written in {}    \n".format(
            args.min_budget, args.max_budget, args.iterations, output_dir),
        "AutoML Optimization Finished!")

    sleep(2)
    Slacker.send_code("Best found configuration:",
                      "{}".format(id2config[incumbent]['config']))

    with open(best_result_path, mode='w') as file:
        lines = [
            "Best results are as follows:\n",
            "{}".format(id2config[incumbent]['config'])
        ]
        file.writelines(lines)

    with open(output_fp, mode='wb') as file:
        pickle.dump(res, file)

    # Shutdown after completion
    bohb.shutdown(shutdown_workers=True)
    NS.shutdown()
Example #12
0
def main():
    parser = argparse.ArgumentParser(
        description=
        'Tensorforce hyperparameter tuner, using BOHB optimizer (Bayesian Optimization '
        'and Hyperband)')
    # Environment arguments (from run.py)
    parser.add_argument(
        '-e',
        '--environment',
        type=str,
        help='Environment (name, configuration JSON file, or library module)')
    parser.add_argument(
        '-l',
        '--level',
        type=str,
        default=None,
        help='Level or game id, like `CartPole-v1`, if supported')
    parser.add_argument('-m',
                        '--max-episode-timesteps',
                        type=int,
                        default=None,
                        help='Maximum number of timesteps per episode')
    parser.add_argument(
        '--import-modules',
        type=str,
        default=None,
        help='Import comma-separated modules required for environment')
    # Runner arguments (from run.py)
    parser.add_argument('-n',
                        '--episodes',
                        type=int,
                        help='Number of episodes')
    parser.add_argument(
        '-p',
        '--num-parallel',
        type=int,
        default=None,
        help='Number of environment instances to execute in parallel')
    # Tuner arguments
    parser.add_argument(
        '-r',
        '--runs-per-round',
        type=str,
        default='1,2,5,10',
        help=
        'Comma-separated number of runs per optimization round, each with a successively '
        'smaller number of candidates')
    parser.add_argument(
        '-s',
        '--selection-factor',
        type=int,
        default=3,
        help=
        'Selection factor n, meaning that one out of n candidates in each round advances to '
        'the next optimization round')
    parser.add_argument(
        '-i',
        '--num-iterations',
        type=int,
        default=1,
        help=
        'Number of optimization iterations, each consisting of a series of optimization '
        'rounds with an increasingly reduced candidate pool')
    parser.add_argument('-d',
                        '--directory',
                        type=str,
                        default='tuner',
                        help='Output directory')
    parser.add_argument('--restore',
                        type=str,
                        default=None,
                        help='Restore from given directory')
    parser.add_argument('--id',
                        type=str,
                        default='worker',
                        help='Unique worker id')
    args = parser.parse_args()

    if args.import_modules is not None:
        for module in args.import_modules.split(','):
            importlib.import_module(name=module)

    environment = dict(environment=args.environment)
    if args.level is not None:
        environment['level'] = args.level

    if False:
        host = nic_name_to_host(nic_name=None)
        port = 123
    else:
        host = 'localhost'
        port = None

    runs_per_round = tuple(int(x) for x in args.runs_per_round.split(','))
    print('Bayesian Optimization and Hyperband optimization')
    print(
        f'{args.num_iterations} iterations of each {len(runs_per_round)} rounds:'
    )
    for n, num_runs in enumerate(runs_per_round, start=1):
        num_candidates = round(
            math.pow(args.selection_factor,
                     len(runs_per_round) - n))
        print(f'round {n}: {num_candidates} candidates, each {num_runs} runs')
    print()

    server = NameServer(run_id=args.id,
                        working_directory=args.directory,
                        host=host,
                        port=port)
    nameserver, nameserver_port = server.start()

    worker = TensorforceWorker(
        environment=environment,
        max_episode_timesteps=args.max_episode_timesteps,
        num_episodes=args.episodes,
        base=args.selection_factor,
        runs_per_round=runs_per_round,
        num_parallel=args.num_parallel,
        run_id=args.id,
        nameserver=nameserver,
        nameserver_port=nameserver_port,
        host=host)
    worker.run(background=True)

    if args.restore is None:
        previous_result = None
    else:
        previous_result = logged_results_to_HBS_result(directory=args.restore)

    result_logger = json_result_logger(directory=args.directory,
                                       overwrite=True)

    optimizer = BOHB(configspace=worker.get_configspace(),
                     eta=args.selection_factor,
                     min_budget=0.9,
                     max_budget=math.pow(args.selection_factor,
                                         len(runs_per_round) - 1),
                     run_id=args.id,
                     working_directory=args.directory,
                     nameserver=nameserver,
                     nameserver_port=nameserver_port,
                     host=host,
                     result_logger=result_logger,
                     previous_result=previous_result)
    # BOHB(configspace=None, eta=3, min_budget=0.01, max_budget=1, min_points_in_model=None,
    # top_n_percent=15, num_samples=64, random_fraction=1 / 3, bandwidth_factor=3,
    # min_bandwidth=1e-3, **kwargs)
    # Master(run_id, config_generator, working_directory='.', ping_interval=60,
    # nameserver='127.0.0.1', nameserver_port=None, host=None, shutdown_workers=True,
    # job_queue_sizes=(-1,0), dynamic_queue_size=True, logger=None, result_logger=None,
    # previous_result = None)
    # logger: logging.logger like object, the logger to output some (more or less meaningful)
    # information

    results = optimizer.run(n_iterations=args.num_iterations)
    # optimizer.run(n_iterations=1, min_n_workers=1, iteration_kwargs={})
    # min_n_workers: int, minimum number of workers before starting the run

    optimizer.shutdown(shutdown_workers=True)
    server.shutdown()

    with open(os.path.join(args.directory, 'results.pkl'), 'wb') as filehandle:
        pickle.dump(results, filehandle)

    print('Best found configuration: {}'.format(
        results.get_id2config_mapping()[results.get_incumbent_id()]['config']))
    print('Runs:',
          results.get_runs_by_id(config_id=results.get_incumbent_id()))
    print('A total of {} unique configurations where sampled.'.format(
        len(results.get_id2config_mapping())))
    print('A total of {} runs where executed.'.format(
        len(results.get_all_runs())))
Example #13
0
def a2c_bohb_wrapper(**params):
    # Setup directories where live data is logged
    logdir = params["logdir"]
    a2c_output_dir = os.path.join(logdir, 'a2c_output')
    params["logdir"] = a2c_output_dir
    bohb_output_dir = os.path.join(logdir, 'bohb_output')

    logging.basicConfig(level=logging.INFO)  # logging.DEBUG for debug output
    logger = logging.getLogger()
    logger.propagate = False  # no duplicate logging outputs
    fh = logging.FileHandler(os.path.join(logdir, 'bohb.log'))
    fh.setLevel(logging.INFO)
    fh.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s:%(name)s: %(message)s'))
    logger.addHandler(fh)

    # Build configuration space and define all hyperparameters
    cs = ConfigurationSpace()
    lr = UniformFloatHyperparameter("lr", 1e-4, 1e-2, default_value=1e-3)
    units_shared_layer1 = UniformIntegerHyperparameter("units_shared_layer1", 8, 100, default_value=24)
    units_shared_layer2 = UniformIntegerHyperparameter("units_shared_layer2", 8, 100, default_value=24)
    units_policy_layer = UniformIntegerHyperparameter("units_policy_layer", 8, 100, default_value=24)
    vf_coeff = UniformFloatHyperparameter("vf_coeff", 1e-2, 0.5, default_value=0.1)
    ent_coeff = UniformFloatHyperparameter("ent_coeff", 5e-6, 1e-4, default_value=1e-5)
    gamma = UniformFloatHyperparameter("gamma", 0.6, 1., default_value=0.90)
    activ_fcn = CategoricalHyperparameter("activ_fcn", ['relu6', 'elu', 'mixed'], default_value='relu6')
    cs.add_hyperparameters([units_shared_layer1, units_shared_layer2, units_policy_layer,
                            vf_coeff, ent_coeff, gamma, lr, activ_fcn])  # batch_size

    logger.info('##############################################')
    logger.info('Run Optimization')
    logger.info('##############################################')
    if params["array_id"] == 1:
        # Setup directories where live data is logged
        if not os.path.isdir(a2c_output_dir):
            os.makedirs(a2c_output_dir)
        if not os.path.isdir(bohb_output_dir):
            os.makedirs(bohb_output_dir)

        # start nameserver
        NS = hpns.NameServer(run_id=params["instance_id"], nic_name=params["nic_name"],
                             working_directory=bohb_output_dir)
        ns_host, ns_port = NS.start()  # stores information for workers to find in working directory

        # BOHB is usually so cheap, that we can affort to run a worker on the master node, too.
        worker = A2CWorker(nameserver=ns_host, nameserver_port=ns_port, run_id=params["instance_id"], **params)
        worker.run(background=True)

        # Create scenario object
        logger.info('##############################################')
        logger.info('Setup BOHB instance')
        logger.info('##############################################')

        logger.info('Output_dir: %s' % bohb_output_dir)
        HB = BOHB(configspace=cs,
                  run_id=params["instance_id"],
                  eta=3,
                  min_budget=params["min_resource"],
                  max_budget=params["max_resource"],
                  host=ns_host,
                  nameserver=ns_host,
                  nameserver_port=ns_port,
                  ping_interval=3600)

        res = HB.run(n_iterations=4,
                     min_n_workers=4)  # BOHB can wait until a minimum number of workers is online before starting

        # pickle result here for later analysis
        with open(os.path.join(bohb_output_dir, 'results.pkl'), 'wb') as f:
            pickle.dump(res, f)

        id2config = res.get_id2config_mapping()
        print('A total of %i unique configurations where sampled.' % len(id2config.keys()))
        print('A total of %i runs where executed.' % len(res.get_all_runs()))

        # shutdown all workers
        HB.shutdown(shutdown_workers=True)

        # shutdown nameserver
        NS.shutdown()

    else:
        host = hpns.nic_name_to_host(params["nic_name"])

        # workers only instantiate the MyWorker, find the nameserver and start serving
        w = A2CWorker(run_id=params["instance_id"], host=host, **params)
        w.load_nameserver_credentials(bohb_output_dir)
        # run worker in the forground,
        w.run(background=False)
Example #14
0
    def pre_train(self, dataset):
        """ Pre-trains the model based on one or more datasets.

        """
        run_id = 'first_run'
        host = '127.0.0.1'
        n_workers = 4
        n_iterations = 300

        data = DataLoader(dataset, 1, cuda=False)
        data = data.data_loader
        np.random.shuffle(data)
        X_ = []
        y_ = []
        for point in data:
            X_.append(point[0].tolist()[0])
            y_.append(np.argmax(point[1].tolist()))
        global X
        global y
        X = np.array(X_)
        y = np.array(y_)

        # shuffle the data
        indices = np.arange(X.shape[0])
        np.random.shuffle(indices)

        X = X[indices]
        y = y[indices]

        min_budget = len(X) * 0.01
        max_budget = len(X) * 0.25

        NS = hpns.NameServer(run_id=run_id, host=host, port=None)
        NS.start()

        workers = []
        print('starting workers:')
        for i in range(n_workers):
            print(i)
            w = RFWorker(nameserver=host, run_id=run_id, id=i)
            w.run(background=True)
            workers.append(w)

        bohb = BOHB(configspace=workers[0].get_configspace(),
                    run_id=run_id,
                    min_budget=min_budget,
                    max_budget=max_budget)
        print('running BOHB')
        res = bohb.run(n_iterations=n_iterations, min_n_workers=n_workers)
        bohb.shutdown(shutdown_workers=True)
        NS.shutdown()

        id2config = res.get_id2config_mapping()
        incumbent = res.get_incumbent_id()

        all_runs = res.get_all_runs()

        print('Best found configuration:', id2config[incumbent]['config'])
        print('A total of %i unique configurations where sampled.' %
              len(id2config.keys()))
        print('A total of %i runs where executed.' % len(res.get_all_runs()))
        print('Total budget corresponds to %.1f full function evaluations.' %
              (sum([r.budget for r in all_runs]) / max_budget))
        print('The run took  %.1f seconds to complete.' %
              (all_runs[-1].time_stamps['finished'] -
               all_runs[0].time_stamps['started']))

        self.max_depth = id2config[incumbent]['config']['max_depth']
        self.n_trees = id2config[incumbent]['config']['n_trees']
        self.rf_classifier = RandomForestClassifier(n_estimators=self.n_trees,
                                                    max_depth=self.max_depth)
        self.rf_classifier.fit(X, y)
        with open('config.txt', 'a') as f:
            print('Best config: n_trees: {}, max_depth: {}'.format(
                self.n_trees, self.max_depth),
                  file=f)
Example #15
0
           nameserver_port=ns_port,
           timeout=120)
w.run(background=True)

# Let us load the old run now to use its results to warmstart a new run with slightly
# different budgets in terms of datapoints and epochs.
# Note that the search space has to be identical though!
previous_run = hpres.logged_results_to_HBS_result(args.previous_run_dir)

# Run an optimizer
bohb = BOHB(
    configspace=worker.get_configspace(),
    run_id=args.run_id,
    host=host,
    nameserver=ns_host,
    nameserver_port=ns_port,
    result_logger=result_logger,
    min_budget=args.min_budget,
    max_budget=args.max_budget,
    previous_result=
    previous_run,  # this is how you tell any optimizer about previous runs
)
res = bohb.run(n_iterations=args.n_iterations)

# store results
with open(os.path.join(args.shared_directory, 'results.pkl'), 'wb') as fh:
    pickle.dump(res, fh)

# shutdown
bohb.shutdown(shutdown_workers=True)
NS.shutdown()
Example #16
0
        config_space.add_hyperparameters([l3, l4, l5])

        latent = CSH.UniformIntegerHyperparameter('latent',
                                                  lower=20,
                                                  upper=100)
        #latent = CSH.CategoricalHyperparameter('latent', [33])
        config_space.add_hyperparameters([latent])

        return config_space


BUDGET = 10
ITERATIONS = 100

NS = hpns.NameServer(run_id='Search', host='127.0.0.1', port=None)
NS.start()
WK = MyWorker(nameserver='127.0.0.1', run_id='Search')
WK.run(background=True)

BS = BOHB(configspace=WK.get_configspace(),
          run_id='Search',
          nameserver='127.0.0.1',
          min_budget=BUDGET,
          max_budget=BUDGET)
BSP = BS.run(n_iterations=ITERATIONS)

BS.shutdown(shutdown_workers=True)
NS.shutdown()

print('Best found configuration:',
      setting(BSP.get_id2config_mapping()[BSP.get_incumbent_id()]['config']))
Example #17
0
def run_bohb(args):

    scorer = TrueRougeScorer()

    run_id = infer_run_id_from_args(args)

    run_dir = os.path.join(args.hpb_runs_dir, run_id)

    if args.hpb_worker:
        w = BeamWorker(args, scorer, nameserver='127.0.0.1', run_id=run_id)
        w.run(background=False)
        exit(0)

    result_logger = hpres.json_result_logger(directory=run_dir,
                                             overwrite=getattr(
                                                 args, 'hpb_overwrite_run',
                                                 False))

    # Step 1: Start a nameserver
    # Every run needs a nameserver. It could be a 'static' server with a
    # permanent address, but here it will be started for the local machine with the default port.
    # The nameserver manages the concurrent running workers across all possible threads or clusternodes.
    # Note the run_id argument. This uniquely identifies a run of any HpBandSter optimizer.
    NS = hpns.NameServer(run_id=run_id, host='127.0.0.1', port=None)
    NS.start()

    # Step 2: Start a worker
    # Now we can instantiate a worker, providing the mandatory information
    # Besides the sleep_interval, we need to define the nameserver information and
    # the same run_id as above. After that, we can start the worker in the background,
    # where it will wait for incoming configurations to evaluate.

    # Step 3: Run an optimizer
    # Now we can create an optimizer object and start the run.
    # Here, we run BOHB, but that is not essential.
    # The run method will return the `Result` that contains all runs performed.
    bohb = BOHB(configspace=get_conf_space(args),
                run_id=run_id,
                nameserver='127.0.0.1',
                result_logger=result_logger,
                min_budget=args.hpb_min_budget,
                max_budget=args.hpb_max_budget)
    res = bohb.run(n_iterations=args.hpb_n_iterations,
                   min_n_workers=args.hpb_n_workers)

    # Step 4: Shutdown
    # After the optimizer run, we must shutdown the master and the nameserver.
    bohb.shutdown(shutdown_workers=True)
    NS.shutdown()

    # Step 5: Analysis
    # Each optimizer returns a hpbandster.core.result.Result object.
    # It holds informations about the optimization run like the incumbent (=best) configuration.
    # For further details about the Result object, see its documentation.
    # Here we simply print out the best config and some statistics about the performed runs.
    id2config = res.get_id2config_mapping()
    incumbent = res.get_incumbent_id()

    print('Best found configuration:', id2config[incumbent]['config'])
    print('A total of %i unique configurations where sampled.' %
          len(id2config.keys()))
    print('A total of %i runs where executed.' % len(res.get_all_runs()))
    print('Total budget corresponds to %.1f full function evaluations.' %
          (sum([r.budget for r in res.get_all_runs()]) / args.hpb_max_budget))
Example #18
0
    isKeras = False

elif ALGORITHM == 'Keras':
    worker = KerasRegressor(X_train, X_val, y_train, y_val, nameserver='127.0.0.1', run_id='example1')
    worker.run(background=True)
    isKeras = True
else:
    isKeras = False

# Step 3: Run an optimizer
result_logger = hpres.json_result_logger(directory='.',
                                         overwrite=True)
if not isKeras:
    if OPTIMIZER == 'BOHB':
        optimizer = BOHB(configspace=worker.get_warmstart_configspace(), run_id='example1',
                         nameserver='127.0.0.1', min_budget=10, max_budget=10, eta=3.0,
                         result_logger=result_logger)
        res = optimizer.run(n_iterations=1)

    elif OPTIMIZER == 'RandomSearch':
        optimizer = RandomSearch(configspace=worker.get_configspace(), run_id='example1',
                                 nameserver='127.0.0.1', min_budget=1, max_budget=9, eta=3.0,
                                 result_logger=result_logger)
        res = optimizer.run(n_iterations=10)
else:
    if OPTIMIZER == 'BOHB':
        optimizer = BOHB(configspace=worker.get_configspace(), run_id='example1',
                         nameserver='127.0.0.1', min_budget=3, max_budget=100, eta=3.0,
                         result_logger=result_logger)
        res = optimizer.run(n_iterations=5)
    elif OPTIMIZER == 'RandomSearch':
Example #19
0
			parser.add_argument('--shared_directory',type=str, help='A directory that is accessible for all processes, e.g. a NFS share.', default='.')

			args=parser.parse_args()

			#Define a realtime result logger
			result_logger = hpres.json_result_logger(directory=result_dir, overwrite=True)


			#Start a nameserver
			NS = hpns.NameServer(run_id='MAXWEL', host='127.0.0.1', port=None)
			NS.start()

			#Start the workers
			workers=[]
			for i in range(args.n_workers):
				w = worker(nameserver='127.0.0.1',run_id='MAXWEL', id=i)
				w.run(background=True)
				workers.append(w)

			#Define and run an optimizer
			bohb = BOHB(configspace = w.get_configspace(),
						run_id = 'MAXWEL',
						result_logger=result_logger,
						min_budget=args.min_budget, 
						max_budget=args.max_budget) 

			res = bohb.run(n_iterations=args.n_iterations, min_n_workers=args.n_workers)

			#Shutdown the nameserver
			bohb.shutdown(shutdown_workers=True)
			NS.shutdown()
Example #20
0
        dataset = {'train': train_set, 'val': val_set, 'test': test_set}

        name_server = hpns.NameServer(run_id=args.name,
                                      host=args.nameserver,
                                      port=None)
        name_server.start()

        worker = COVIDWorker(sleep_interval=0,
                             nameserver=args.nameserver,
                             run_id=args.name)
        worker.set_dataset(args, dataset)
        worker.run(background=True)

        bohb = BOHB(configspace=worker.get_configspace(),
                    run_id=args.name,
                    nameserver=args.nameserver,
                    min_budget=args.min_budget,
                    max_budget=args.max_budget)

        result = bohb.run(n_iterations=args.n_iterations)

        bohb.shutdown(shutdown_workers=True)
        name_server.shutdown()

        id2config = result.get_id2config_mapping()
        incumbent = result.get_incumbent_id()

        print('Best found configurations:', id2config[incumbent]['config'])
        print('A total of %i unique configurations where sampled.' %
              len(id2config.keys()))
        print('A total of %i runs where excuted.' % len(result.get_all_runs()))
Example #21
0
	# start nameserver
	NS = hpns.NameServer(run_id=args.run_id, nic_name=args.nic_name,
							working_directory=args.working_dir)


	ns_host, ns_port = NS.start()	# stores information for workers to find in working_directory

	# BOHB is usually so cheap, that we can affort to run a worker on the master node, too.
	worker = MyWorker(nameserver=ns_host, nameserver_port=ns_port, run_id=args.run_id)
	worker.run(background=True)


	HB = BOHB(	configspace = config_space,
				run_id = args.run_id,
                eta=3,min_budget=27, max_budget=243,
                host=ns_host,
				nameserver=ns_host,
				nameserver_port = ns_port,
				ping_interval=3600,	
		)
	
	res = HB.run(	n_iterations = 4,
					min_n_workers = 4		# BOHB can wait until a minimum number of workers is online before starting
		)
	
	# pickle result here for later analysis
	with open(os.path.join(args.working_dir, 'results.pkl'), 'wb') as fh:
		pickle.dump(res, fh)
	
	# shutdown all workers
	HB.shutdown(shutdown_workers=True)
	
    w = MyWorker(sleep_interval=0.05,
                 nameserver='127.0.0.1',
                 run_id='example2',
                 id=i,
                 n_jobs=args.n_jobs)
    w.run(background=True)
    workers.append(w)

# Step 3: Run an optimizer
# Now we can create an optimizer object and start the run.
# We add the min_n_workers argument to the run methods to make the optimizer wait
# for all workers to start. This is not mandatory, and workers can be added
# at any time, but if the timing of the run is essential, this can be used to
# synchronize all workers right at the start.
bohb = BOHB(configspace=w.get_configspace(),
            run_id='example2',
            min_budget=args.min_budget,
            max_budget=args.max_budget)
res = bohb.run(n_iterations=args.n_iterations, min_n_workers=args.n_workers)

# Step 4: Shutdown
# After the optimizer run, we must shutdown the master and the nameserver.
bohb.shutdown(shutdown_workers=True)
NS.shutdown()

# Step 5: Analysis
# Each optimizer returns a hpbandster.core.result.Result object.
# It holds informations about the optimization run like the incumbent (=best) configuration.
# For further details about the Result object, see its documentation.
# Here we simply print out the best config and some statistics about the performed runs.
id2config = res.get_id2config_mapping()
incumbent = res.get_incumbent_id()
    ns_host, ns_port = NS.start()

    # Start local worker
    worker = TrainEvalWorker(run_id=RUN_ID,
                             nameserver=ns_host,
                             nameserver_port=ns_port)
    worker.run(background=True)
    result_logger = hpres.json_result_logger(
        directory="data/hpbandster/{}".format(RUN_ID), overwrite=False)
    optim = BOHB(
        configspace=worker.get_configspace(),
        run_id=RUN_ID,
        nameserver=ns_host,
        nameserver_port=ns_port,
        result_logger=result_logger,
        eta=HPO_PARAMS["eta"],
        min_budget=HPO_PARAMS["min_budget"],
        max_budget=HPO_PARAMS["max_budget"],
        num_samples=HPO_PARAMS["num_samples"],
        top_n_percent=HPO_PARAMS["top_n_percent"],
        min_bandwidth=HPO_PARAMS["min_bandwidth"],
        bandwidth_factor=HPO_PARAMS["bandwidth_factor"],
    )
    study = optim.run(n_iterations=HPO_PARAMS["n_calls"])

    results = hpbandster2skopt(study)

    best_auc = -1.0 * results.fun
    best_params = results.x
    print(results)
    # log metrics
    print("Best Validation AUC: {}".format(best_auc))
Example #24
0
def get_parameters(data, target_feature_index):
    parser = argparse.ArgumentParser(
        description='Example 1 - sequential and local execution.')
    parser.add_argument('--min_budget',
                        type=float,
                        help='Minimum budget used during the optimization.',
                        default=9)
    parser.add_argument('--max_budget',
                        type=float,
                        help='Maximum budget used during the optimization.',
                        default=243)
    parser.add_argument('--n_iterations',
                        type=int,
                        help='Number of iterations performed by the optimizer',
                        default=4)
    parser.add_argument('--n_workers',
                        type=int,
                        help='Number of workers to run in parallel.',
                        default=2)
    parser.add_argument('--worker',
                        help='Flag to turn this into a worker process',
                        action='store_true')
    parser.add_argument(
        '--run_id',
        type=str,
        help=
        'A unique run id for this optimization run. An easy option is to use the job id of the clusters scheduler.'
    )
    parser.add_argument(
        '--nic_name',
        type=str,
        help='Which network interface to use for communication.',
        default='lo')
    parser.add_argument(
        '--shared_directory',
        type=str,
        help=
        'A directory that is accessible for all processes, e.g. a NFS share.',
        default='/home/lchen/parameters/result')

    args = parser.parse_args()

    host = hpns.nic_name_to_host(args.nic_name)

    if args.worker:
        time.sleep(
            5
        )  # short artificial delay to make sure the nameserver is already running
        w = worker(0.5,
                   data,
                   target_feature_index,
                   run_id=args.run_id,
                   host=host)
        w.load_nameserver_credentials(working_directory=args.shared_directory)
        w.run(background=False)
        exit(0)

    result_logger = hpres.json_result_logger(directory=args.shared_directory,
                                             overwrite=True)

    # Step 1: Start a nameserver
    # Every run needs a nameserver. It could be a 'static' server with a
    # permanent address, but here it will be started for the local machine with the default port.
    # The nameserver manages the concurrent running workers across all possible threads or clusternodes.
    # Note the run_id argument. This uniquely identifies a run of any HpBandSter optimizer.
    NS = hpns.NameServer(run_id='test1',
                         host=host,
                         port=0,
                         working_directory=args.shared_directory)
    ns_host, ns_port = NS.start()
    # Step 2: Start a worker
    # Now we can instantiate a worker, providing the mandatory information
    # Besides the sleep_interval, we need to define the nameserver information and
    # the same run_id as above. After that, we can start the worker in the background,
    # where it will wait for incoming configurations to evaluate.

    w = worker(0.5,
               data,
               target_feature_index,
               run_id='test1',
               host=host,
               nameserver=ns_host,
               nameserver_port=ns_port)
    w.run(background=True)
    # Step 3: Run an optimizer
    # Now we can create an optimizer object and start the run.
    # Here, we run BOHB, but that is not essential.
    # The run method will return the `Result` that contains all runs performed.
    bohb = BOHB(configspace=worker.get_configspace(),
                run_id=args.run_id,
                host=host,
                nameserver=ns_host,
                nameserver_port=ns_port,
                result_logger=result_logger,
                min_budget=args.min_budget,
                max_budget=args.max_budget)
    print("daozhele5")
    res = bohb.run(n_iterations=args.n_iterations)
    print("daozhele6")
    bohb.shutdown(shutdown_workers=True)
    NS.shutdown()

    # Step 5: Analysis
    # Each optimizer returns a hpbandster.core.result.Result object.
    # It holds informations about the optimization run like the incumbent (=best) configuration.
    # For further details about the Result object, see its documentation.
    # Here we simply print out the best config and some statistics about the performed runs.
    id2config = res.get_id2config_mapping()
    incumbent = res.get_incumbent_id()
    info = res.get_runs_by_id(incumbent)

    parameter = id2config[incumbent]['config']
    min_error = info[0]['loss']
    feature_importance_dict = info[0]['info']

    with open(os.path.join(args.shared_directory, 'results.pkl'), 'wb') as fh:
        pickle.dump(res, fh)

    return parameter, min_error, feature_importance_dict
Example #25
0
def run_experiment(out_path, on_travis):

    settings = {
        'min_budget': 1,
        'max_budget':
        9,  # number of repetitions; this is the fidelity for this bench
        'num_iterations': 10,  # Set this to a low number for demonstration
        'eta': 3,
        'output_dir': Path(out_path)
    }
    if on_travis:
        settings.update(get_travis_settings('bohb'))

    b = Benchmark(rng=1)

    b.get_configuration_space(seed=1)
    settings.get('output_dir').mkdir(exist_ok=True)

    cs = b.get_configuration_space()
    seed = get_rng(rng=0)
    run_id = 'BOHB_on_cartpole'

    result_logger = hpres.json_result_logger(directory=str(
        settings.get('output_dir')),
                                             overwrite=True)

    ns = hpns.NameServer(run_id=run_id,
                         host='localhost',
                         working_directory=str(settings.get('output_dir')))
    ns_host, ns_port = ns.start()

    worker = CustomWorker(seed=seed,
                          nameserver=ns_host,
                          nameserver_port=ns_port,
                          run_id=run_id,
                          max_budget=settings.get('max_budget'))
    worker.run(background=True)

    master = BOHB(configspace=cs,
                  run_id=run_id,
                  host=ns_host,
                  nameserver=ns_host,
                  nameserver_port=ns_port,
                  eta=settings.get('eta'),
                  min_budget=settings.get('min_budget'),
                  max_budget=settings.get('max_budget'),
                  result_logger=result_logger)

    result = master.run(n_iterations=settings.get('num_iterations'))
    master.shutdown(shutdown_workers=True)
    ns.shutdown()

    with open(settings.get('output_dir') / 'results.pkl', 'wb') as f:
        pickle.dump(result, f)

    id2config = result.get_id2config_mapping()
    incumbent = result.get_incumbent_id()
    inc_value = result.get_runs_by_id(incumbent)[-1]['loss']
    inc_cfg = id2config[incumbent]['config']

    logger.info(f'Inc Config:\n{inc_cfg}\n'
                f'with Performance: {inc_value:.2f}')

    if not on_travis:
        benchmark = Benchmark(container_source='library://phmueller/automl')
        incumbent_result = benchmark.objective_function_test(
            configuration=inc_cfg, fidelity={"budget": settings['max_budget']})
        print(incumbent_result)
Example #26
0
def MAXWEL_bandster_station(raw_name, width):

    name = raw_name.split('.')
    if name[-1] != "csv":
        return

    name = name[0]
    df = pd.read_csv('Train_Data/Width_' + width + '/' + name +
                     '.csv').drop(columns=['time'])

    try:
        result_dir = 'Results/' + name
        os.mkdir(result_dir)
    except:
        result_dir = 'Results/' + name

    assert df.isna().sum().sum() == 0, name + " has NANs"

    data_info = {}
    data_info['primary'] = name
    data_info['num_features'] = df.shape[1]
    data_info['train_time'] = 12
    data_info['predict_time'] = 4
    data_info['num_samples'] = df.shape[0]

    X = np.zeros(
        (data_info['num_samples'], data_info['train_time'], df.shape[1]))
    Y = np.zeros((data_info['num_samples'], data_info['predict_time']))
    for i in range(data_info['num_samples'] -
                   (data_info['train_time'] + data_info['predict_time'] + 1)):
        X[i, :, :] = df.iloc[i:i + data_info['train_time']].values
        Y[i, :] = np.asarray(
            df[name + '_scaled_demand'][i + data_info['train_time']:i +
                                        data_info['train_time'] +
                                        data_info['predict_time']].values)

    idx = sample(range(len(X)), 9000)
    train_idx = idx[:6000]
    valid_idx = idx[6000:8000]
    test_idx = idx[8000:]

    data_info['num_train_samples'] = len(train_idx)
    data_info['train_idx'] = train_idx

    data_info['num_valid_samples'] = len(valid_idx)
    data_info['valid_idx'] = valid_idx

    data_info['num_test_samples'] = len(test_idx)
    data_info['test_idx'] = test_idx

    data_info['fit_cnt'] = 1

    # data_info['station_cnt']=station_cnt
    # data_info['num_stations']=len(names)

    dill.dump(data_info, open("Temp_Data/data_info.pkl", 'wb'))

    X_train = np.zeros((data_info['num_train_samples'],
                        data_info['train_time'], data_info['num_features']))
    Y_train = np.zeros(
        (data_info['num_train_samples'], data_info['predict_time']))

    X_valid = np.zeros((data_info['num_valid_samples'],
                        data_info['train_time'], data_info['num_features']))
    Y_valid = np.zeros(
        (data_info['num_valid_samples'], data_info['predict_time']))

    X_test = np.zeros((data_info['num_test_samples'], data_info['train_time'],
                       data_info['num_features']))
    Y_test = np.zeros(
        (data_info['num_test_samples'], data_info['predict_time']))

    for i, ind in enumerate(data_info['train_idx']):
        X_train[i, :, :] = X[ind, :, :]
        Y_train[i, :] = Y[ind, :]

    for i, ind in enumerate(data_info['valid_idx']):
        X_valid[i, :, :] = X[ind, :, :]
        Y_valid[i, :] = Y[ind, :]

    for i, ind in enumerate(data_info['test_idx']):
        X_test[i, :, :] = X[ind, :, :]
        Y_test[i, :] = Y[ind, :]

    dill.dump(X_train, open("Temp_Data/X_train.pkl", 'wb'))
    dill.dump(Y_train, open("Temp_Data/Y_train.pkl", 'wb'))

    dill.dump(X_valid, open("Temp_Data/X_valid.pkl", 'wb'))
    dill.dump(Y_valid, open("Temp_Data/Y_valid.pkl", 'wb'))

    dill.dump(X_test, open("Temp_Data/X_test.pkl", 'wb'))
    dill.dump(Y_test, open("Temp_Data/Y_test.pkl", 'wb'))

    # Import a worker class
    from MAXWEL_worker import MAXWEL_worker as worker

    #Build an argument parser
    parser = argparse.ArgumentParser(
        description='MAXWEL - sequential execution.')
    parser.add_argument('--min_budget',
                        type=float,
                        help='Minimum budget used during the optimization.',
                        default=5)
    parser.add_argument('--max_budget',
                        type=float,
                        help='Maximum budget used during the optimization.',
                        default=20)
    parser.add_argument('--n_iterations',
                        type=int,
                        help='Number of iterations performed by the optimizer',
                        default=10)
    parser.add_argument('--n_workers',
                        type=int,
                        help='Number of workers to run in parallel.',
                        default=1)
    parser.add_argument(
        '--shared_directory',
        type=str,
        help=
        'A directory that is accessible for all processes, e.g. a NFS share.',
        default='.')

    args = parser.parse_args()

    #Define a realtime result logger
    result_logger = hpres.json_result_logger(directory=result_dir,
                                             overwrite=True)

    #Start a nameserver
    NS = hpns.NameServer(run_id='MAXWEL', host='127.0.0.1', port=None)
    NS.start()

    #Start the workers
    workers = []
    for i in range(args.n_workers):
        w = worker(nameserver='127.0.0.1', run_id='MAXWEL', id=i)
        w.run(background=True)
        workers.append(w)

    #Define and run an optimizer
    bohb = BOHB(configspace=w.get_configspace(),
                run_id='MAXWEL',
                result_logger=result_logger,
                min_budget=args.min_budget,
                max_budget=args.max_budget)

    res = bohb.run(n_iterations=args.n_iterations,
                   min_n_workers=args.n_workers)

    #Shutdown the nameserver
    bohb.shutdown(shutdown_workers=True)
    NS.shutdown()
Example #27
0
             dataID = args.openml_dataid)
w.run(background=True)

#if True:
#    time.sleep(5)   # short artificial delay to make sure the nameserver is already running
#    w = MyWorker(sleep_interval = 0.5,run_id=args.run_id, host=host, dataID = args.openml_dataid)
#    w.load_nameserver_credentials(working_directory=args.shared_directory)
#    w.run(background=False)
#    exit(0)

# Run an optimizer
# We now have to specify the host, and the nameserver information
bohb = BOHB(configspace=MyWorker.get_configspace(),
            run_id=args.run_id,
            host=host,
            nameserver=ns_host,
            nameserver_port=ns_port,
            min_budget=args.min_budget,
            max_budget=args.max_budget)

res = bohb.run(n_iterations=args.n_iterations, min_n_workers=args.n_workers)

d1 = res.get_pandas_dataframe()[0]
loss = res.get_pandas_dataframe()[1]

d1['loss'] = loss

d1.to_csv(os.path.join(args.shared_directory, outputName + ".csv"))

# In a cluster environment, you usually want to store the results for later analysis.
# One option is to simply pickle the Result object
Example #28
0
def main():
    parser = argparse.ArgumentParser(description='Tensorforce hyperparameter tuner')
    parser.add_argument(
        'environment', help='Environment (name, configuration JSON file, or library module)'
    )
    parser.add_argument(
        '-l', '--level', type=str, default=None,
        help='Level or game id, like `CartPole-v1`, if supported'
    )
    parser.add_argument(
        '-m', '--max-repeats', type=int, default=1, help='Maximum number of repetitions'
    )
    parser.add_argument(
        '-n', '--num-iterations', type=int, default=1, help='Number of BOHB iterations'
    )
    parser.add_argument(
        '-d', '--directory', type=str, default='tuner', help='Output directory'
    )
    parser.add_argument(
        '-r', '--restore', type=str, default=None, help='Restore from given directory'
    )
    parser.add_argument('--id', type=str, default='worker', help='Unique worker id')
    args = parser.parse_args()

    if args.level is None:
        environment = Environment.create(environment=args.environment)
    else:
        environment = Environment.create(environment=args.environment, level=args.level)

    if False:
        host = nic_name_to_host(nic_name=None)
        port = 123
    else:
        host = 'localhost'
        port = None

    server = NameServer(run_id=args.id, working_directory=args.directory, host=host, port=port)
    nameserver, nameserver_port = server.start()

    worker = TensorforceWorker(
        environment=environment, run_id=args.id, nameserver=nameserver,
        nameserver_port=nameserver_port, host=host
    )
    # TensorforceWorker(run_id, nameserver=None, nameserver_port=None, logger=None, host=None, id=None, timeout=None)
    # logger: logging.logger instance, logger used for debugging output
    # id: anything with a __str__method, if multiple workers are started in the same process, you MUST provide a unique id for each one of them using the `id` argument.
    # timeout: int or float, specifies the timeout a worker will wait for a new after finishing a computation before shutting down. Towards the end of a long run with multiple workers, this helps to shutdown idling workers. We recommend a timeout that is roughly half the time it would take for the second largest budget to finish. The default (None) means that the worker will wait indefinitely and never shutdown on its own.

    worker.run(background=True)

    # config = cs.sample_configuration().get_dictionary()
    # print(config)
    # res = worker.compute(config=config, budget=1, working_directory='.')
    # print(res)

    if args.restore is None:
        previous_result = None
    else:
        previous_result = logged_results_to_HBS_result(directory=args.restore)

    result_logger = json_result_logger(directory=args.directory, overwrite=True)  # ???

    optimizer = BOHB(
        configspace=worker.get_configspace(), min_budget=0.5, max_budget=float(args.max_repeats),
        run_id=args.id, working_directory=args.directory,
        nameserver=nameserver, nameserver_port=nameserver_port, host=host,
        result_logger=result_logger, previous_result=previous_result
    )
    # BOHB(configspace=None, eta=3, min_budget=0.01, max_budget=1, min_points_in_model=None, top_n_percent=15, num_samples=64, random_fraction=1 / 3, bandwidth_factor=3, min_bandwidth=1e-3, **kwargs)
    # Master(run_id, config_generator, working_directory='.', ping_interval=60, nameserver='127.0.0.1', nameserver_port=None, host=None, shutdown_workers=True, job_queue_sizes=(-1,0), dynamic_queue_size=True, logger=None, result_logger=None, previous_result = None)
    # logger: logging.logger like object, the logger to output some (more or less meaningful) information

    results = optimizer.run(n_iterations=args.num_iterations)
    # optimizer.run(n_iterations=1, min_n_workers=1, iteration_kwargs={})
    # min_n_workers: int, minimum number of workers before starting the run

    optimizer.shutdown(shutdown_workers=True)
    server.shutdown()
    environment.close()

    with open(os.path.join(args.directory, 'results.pkl'), 'wb') as filehandle:
        pickle.dump(results, filehandle)

    print('Best found configuration:', results.get_id2config_mapping()[results.get_incumbent_id()]['config'])
    print('Runs:', results.get_runs_by_id(config_id=results.get_incumbent_id()))
    print('A total of {} unique configurations where sampled.'.format(len(results.get_id2config_mapping())))
    print('A total of {} runs where executed.'.format(len(results.get_all_runs())))
    print('Total budget corresponds to {:.1f} full function evaluations.'.format(
        sum([r.budget for r in results.get_all_runs()]) / args.max_repeats)
    )
Example #29
0
class OptimizerBOHB(SearchStrategy, RandomSeed):
    def __init__(
            self,
            base_task_id,  # type: str
            hyper_parameters,  # type: Sequence[Parameter]
            objective_metric,  # type: Objective
            execution_queue,  # type: str
            num_concurrent_workers,  # type: int
            min_iteration_per_job,  # type: Optional[int]
            max_iteration_per_job,  # type: Optional[int]
            total_max_jobs,  # type: Optional[int]
            pool_period_min=2.,  # type: float
            time_limit_per_job=None,  # type: Optional[float]
            local_port=9090,  # type: int
            **bohb_kwargs,  # type: Any
    ):
        # type: (...) -> None
        """
        Initialize a BOHB search strategy optimizer
        BOHB performs robust and efficient hyperparameter optimization at scale by combining
        the speed of Hyperband searches with the guidance and guarantees of convergence of Bayesian
        Optimization. Instead of sampling new configurations at random,
        BOHB uses kernel density estimators to select promising candidates.

        For reference: ::

            @InProceedings{falkner-icml-18,
              title =        {{BOHB}: Robust and Efficient Hyperparameter Optimization at Scale},
              author =       {Falkner, Stefan and Klein, Aaron and Hutter, Frank},
              booktitle =    {Proceedings of the 35th International Conference on Machine Learning},
              pages =        {1436--1445},
              year =         {2018},
            }

        :param str base_task_id: Task ID (str)
        :param list hyper_parameters: list of Parameter objects to optimize over
        :param Objective objective_metric: Objective metric to maximize / minimize
        :param str execution_queue: execution queue to use for launching Tasks (experiments).
        :param int num_concurrent_workers: Limit number of concurrent running Tasks (machines)
        :param int min_iteration_per_job: minimum number of iterations for a job to run.
            'iterations' are the reported iterations for the specified objective,
            not the maximum reported iteration of the Task.
        :param int max_iteration_per_job: number of iteration per job
            'iterations' are the reported iterations for the specified objective,
            not the maximum reported iteration of the Task.
        :param int total_max_jobs: total maximum job for the optimization process.
            Must be provided in order to calculate the total budget for the optimization process.
            The total budget is measured by "iterations" (see above)
            and will be set to `max_iteration_per_job * total_max_jobs`
            This means more than total_max_jobs could be created, as long as the cumulative iterations
            (summed over all created jobs) will not exceed `max_iteration_per_job * total_max_jobs`
        :param float pool_period_min: time in minutes between two consecutive pools
        :param float time_limit_per_job: Optional, maximum execution time per single job in minutes,
            when time limit is exceeded job is aborted
        :param int local_port: default port 9090 tcp, this is a must for the BOHB workers to communicate, even locally.
        :param bohb_kwargs: arguments passed directly yo the BOHB object
        """
        if not max_iteration_per_job or not min_iteration_per_job or not total_max_jobs:
            raise ValueError(
                "OptimizerBOHB is missing a defined budget.\n"
                "The following arguments must be defined: "
                "max_iteration_per_job, min_iteration_per_job, total_max_jobs.\n"
                "Maximum optimization budget is: max_iteration_per_job * total_max_jobs\n"
            )

        super(OptimizerBOHB,
              self).__init__(base_task_id=base_task_id,
                             hyper_parameters=hyper_parameters,
                             objective_metric=objective_metric,
                             execution_queue=execution_queue,
                             num_concurrent_workers=num_concurrent_workers,
                             pool_period_min=pool_period_min,
                             time_limit_per_job=time_limit_per_job,
                             max_iteration_per_job=max_iteration_per_job,
                             total_max_jobs=total_max_jobs)
        self._max_iteration_per_job = max_iteration_per_job
        self._min_iteration_per_job = min_iteration_per_job
        self._bohb_kwargs = bohb_kwargs or {}
        self._param_iterator = None
        self._namespace = None
        self._bohb = None
        self._res = None
        self._nameserver_port = local_port

    def set_optimization_args(
            self,
            eta=3,  # type: float
            min_budget=None,  # type: Optional[float]
            max_budget=None,  # type: Optional[float]
            min_points_in_model=None,  # type: Optional[int]
            top_n_percent=15,  # type: Optional[int]
            num_samples=None,  # type: Optional[int]
            random_fraction=1 / 3.,  # type: Optional[float]
            bandwidth_factor=3,  # type: Optional[float]
            min_bandwidth=1e-3,  # type: Optional[float]
    ):
        # type: (...) -> ()
        """
        Defaults copied from BOHB constructor, see details in BOHB.__init__

            BOHB performs robust and efficient hyperparameter optimization
            at scale by combining the speed of Hyperband searches with the
            guidance and guarantees of convergence of Bayesian
            Optimization. Instead of sampling new configurations at random,
            BOHB uses kernel density estimators to select promising candidates.

            .. highlight:: none

            For reference: ::

                @InProceedings{falkner-icml-18,
                  title =        {{BOHB}: Robust and Efficient Hyperparameter Optimization at Scale},
                  author =       {Falkner, Stefan and Klein, Aaron and Hutter, Frank},
                  booktitle =    {Proceedings of the 35th International Conference on Machine Learning},
                  pages =        {1436--1445},
                  year =         {2018},
                }

            Parameters
            ----------
            eta : float (3)
                In each iteration, a complete run of sequential halving is executed. In it,
                after evaluating each configuration on the same subset size, only a fraction of
                1/eta of them 'advances' to the next round.
                Must be greater or equal to 2.
            min_budget : float (0.01)
                The smallest budget to consider. Needs to be positive!
            max_budget : float (1)
                The largest budget to consider. Needs to be larger than min_budget!
                The budgets will be geometrically distributed
                            :math:`a^2 + b^2 = c^2 /sim /eta^k` for :math:`k/in [0, 1, ... , num/_subsets - 1]`.
            min_points_in_model: int (None)
                number of observations to start building a KDE. Default 'None' means
                dim+1, the bare minimum.
            top_n_percent: int (15)
                percentage ( between 1 and 99, default 15) of the observations that are considered good.
            num_samples: int (64)
                number of samples to optimize EI (default 64)
            random_fraction: float (1/3.)
                fraction of purely random configurations that are sampled from the
                prior without the model.
            bandwidth_factor: float (3.)
                to encourage diversity, the points proposed to optimize EI, are sampled
                from a 'widened' KDE where the bandwidth is multiplied by this factor (default: 3)
            min_bandwidth: float (1e-3)
                to keep diversity, even when all (good) samples have the same value for one of the parameters,
                a minimum bandwidth (Default: 1e-3) is used instead of zero.
        """
        if min_budget:
            self._bohb_kwargs['min_budget'] = min_budget
        if max_budget:
            self._bohb_kwargs['max_budget'] = max_budget
        if num_samples:
            self._bohb_kwargs['num_samples'] = num_samples
        self._bohb_kwargs['eta'] = eta
        self._bohb_kwargs['min_points_in_model'] = min_points_in_model
        self._bohb_kwargs['top_n_percent'] = top_n_percent
        self._bohb_kwargs['random_fraction'] = random_fraction
        self._bohb_kwargs['bandwidth_factor'] = bandwidth_factor
        self._bohb_kwargs['min_bandwidth'] = min_bandwidth

    def start(self):
        # type: () -> ()
        """
        Start the Optimizer controller function loop()
        If the calling process is stopped, the controller will stop as well.

        Notice: This function returns only after optimization is completed! or stop() was called.
        """
        # Step 1: Start a NameServer
        fake_run_id = 'OptimizerBOHB_{}'.format(time())
        # default port is 9090, we must have one, this is how BOHB workers communicate (even locally)
        self._namespace = hpns.NameServer(run_id=fake_run_id,
                                          host='127.0.0.1',
                                          port=self._nameserver_port)
        self._namespace.start()

        # we have to scale the budget to the iterations per job, otherwise numbers might be too high
        budget_iteration_scale = self._max_iteration_per_job

        # Step 2: Start the workers
        workers = []
        for i in range(self._num_concurrent_workers):
            w = _TrainsBandsterWorker(
                optimizer=self,
                sleep_interval=int(self.pool_period_minutes * 60),
                budget_iteration_scale=budget_iteration_scale,
                base_task_id=self._base_task_id,
                objective=self._objective_metric,
                queue_name=self._execution_queue,
                nameserver='127.0.0.1',
                nameserver_port=self._nameserver_port,
                run_id=fake_run_id,
                id=i)
            w.run(background=True)
            workers.append(w)

        # Step 3: Run an optimizer
        self._bohb = BOHB(configspace=self._convert_hyper_parameters_to_cs(),
                          run_id=fake_run_id,
                          num_samples=self.total_max_jobs,
                          min_budget=float(self._min_iteration_per_job) /
                          float(self._max_iteration_per_job),
                          **self._bohb_kwargs)
        self._res = self._bohb.run(n_iterations=self.total_max_jobs,
                                   min_n_workers=self._num_concurrent_workers)

        # Step 4: if we get here, Shutdown
        self.stop()

    def stop(self):
        # type: () -> ()
        """
        Stop the current running optimization loop,
        Called from a different thread than the start()
        """
        # After the optimizer run, we must shutdown the master and the nameserver.
        self._bohb.shutdown(shutdown_workers=True)
        self._namespace.shutdown()

        if not self._res:
            return

        # Step 5: Analysis
        id2config = self._res.get_id2config_mapping()
        incumbent = self._res.get_incumbent_id()
        all_runs = self._res.get_all_runs()

        # Step 6: Print Analysis
        print('Best found configuration:', id2config[incumbent]['config'])
        print('A total of {} unique configurations where sampled.'.format(
            len(id2config.keys())))
        print('A total of {} runs where executed.'.format(
            len(self._res.get_all_runs())))
        print('Total budget corresponds to {:.1f} full function evaluations.'.
              format(
                  sum([r.budget for r in all_runs]) /
                  self._bohb_kwargs.get('max_budget', 1.0)))
        print('Total budget corresponds to {:.1f} full function evaluations.'.
              format(
                  sum([r.budget for r in all_runs]) /
                  self._bohb_kwargs.get('max_budget', 1.0)))
        print('The run took {:.1f} seconds to complete.'.format(
            all_runs[-1].time_stamps['finished'] -
            all_runs[0].time_stamps['started']))

    def _convert_hyper_parameters_to_cs(self):
        # type: () -> CS.ConfigurationSpace
        cs = CS.ConfigurationSpace(seed=self._seed)
        for p in self._hyper_parameters:
            if isinstance(p, UniformParameterRange):
                hp = CSH.UniformFloatHyperparameter(p.name,
                                                    lower=p.min_value,
                                                    upper=p.max_value,
                                                    log=False,
                                                    q=p.step_size)
            elif isinstance(p, UniformIntegerParameterRange):
                hp = CSH.UniformIntegerHyperparameter(p.name,
                                                      lower=p.min_value,
                                                      upper=p.max_value,
                                                      log=False,
                                                      q=p.step_size)
            elif isinstance(p, DiscreteParameterRange):
                hp = CSH.CategoricalHyperparameter(p.name, choices=p.values)
            else:
                raise ValueError(
                    "HyperParameter type {} not supported yet with OptimizerBOHB"
                    .format(type(p)))
            cs.add_hyperparameter(hp)

        return cs
Example #30
0
def main():
    # Check quantities of train, validation and test images
    train_images = np.array(glob("data/train/*/*"))
    valid_images = np.array(glob("data/valid/*/*"))
    test_images = np.array(glob("data/test/*/*"))

    # Check relative percentages of image types
    train_images_mel = np.array(glob("data/train/melanoma/*"))
    train_images_nev = np.array(glob("data/train/nevus/*"))
    train_images_seb = np.array(glob("data/train/seborrheic_keratosis/*"))

    valid_images_mel = np.array(glob("data/valid/melanoma/*"))
    valid_images_nev = np.array(glob("data/valid/nevus/*"))
    valid_images_seb = np.array(glob("data/valid/seborrheic_keratosis/*"))

    test_images_mel = np.array(glob("data/test/melanoma/*"))
    test_images_nev = np.array(glob("data/test/nevus/*"))
    test_images_seb = np.array(glob("data/test/seborrheic_keratosis/*"))

    print(
        "There are {} training images, {} validation images and {} test images."
        .format(len(train_images), len(valid_images), len(test_images)))
    print(
        "For the training images, {mel:=.1f}% ({mel2}) are of melanoma, {nev:=.1f}% ({nev2}) are of nevus and {seb:=.1f}% ({seb2}) are for seborrheic keratosis."
        .format(mel=len(train_images_mel) / len(train_images) * 100,
                mel2=len(train_images_mel),
                nev=len(train_images_nev) / len(train_images) * 100,
                nev2=len(train_images_nev),
                seb=len(train_images_seb) / len(train_images) * 100,
                seb2=len(train_images_seb)))
    print(
        "For the validation images, {mel:=.1f}% ({mel2}) are of melanoma, {nev:=.1f}% ({nev2}) are of nevus and {seb:=.1f}% ({seb2}) are for seborrheic keratosis."
        .format(mel=len(valid_images_mel) / len(valid_images) * 100,
                mel2=len(valid_images_mel),
                nev=len(valid_images_nev) / len(valid_images) * 100,
                nev2=len(valid_images_nev),
                seb=len(valid_images_seb) / len(valid_images) * 100,
                seb2=len(valid_images_seb)))
    print(
        "For the test images, {mel:=.1f}% ({mel2}) are of melanoma, {nev:=.1f}% ({nev2}) are of nevus and {seb:=.1f}% ({seb2}) are for seborrheic keratosis."
        .format(mel=len(test_images_mel) / len(test_images) * 100,
                mel2=len(test_images_mel),
                nev=len(test_images_nev) / len(test_images) * 100,
                nev2=len(test_images_nev),
                seb=len(test_images_seb) / len(test_images) * 100,
                seb2=len(test_images_seb)))

    # Set HpBandSter logging
    logging.basicConfig(level=logging.DEBUG)

    # Define the parser. Note that key parametres are the min_budget, max_budget, shared_directory and n_iterations.
    parser = argparse.ArgumentParser(
        description='ISIC2017 - CNN on Derm Dataset')
    parser.add_argument('--min_budget',
                        type=float,
                        help='Minimum number of epochs for training.',
                        default=1)
    parser.add_argument('--max_budget',
                        type=float,
                        help='Maximum number of epochs for training.',
                        default=3)
    parser.add_argument('--n_iterations',
                        type=int,
                        help='Number of iterations performed by the optimizer',
                        default=16)
    parser.add_argument('--worker',
                        help='Flag to turn this into a worker process',
                        action='store_true')
    parser.add_argument(
        '--run_id',
        type=str,
        help=
        'A unique run id for this optimization run. An easy option is to use the job id of the clusters scheduler.'
    )
    parser.add_argument(
        '--nic_name',
        type=str,
        help='Which network interface to use for communication.',
        default='lo')
    parser.add_argument(
        '--shared_directory',
        type=str,
        help=
        'A directory that is accessible for all processes, e.g. a NFS share.',
        default='/home/ubuntu/src/derm-ai/data')
    parser.add_argument(
        '--backend',
        help=
        'Toggles which worker is used. Choose between a pytorch and a keras implementation.',
        choices=['pytorch', 'keras'],
        default='pytorch')
    args = parser.parse_args([])

    host = hpns.nic_name_to_host(args.nic_name)
    # This example shows how to log live results. This is most useful
    # for really long runs, where intermediate results could already be
    # interesting. The core.result submodule contains the functionality to
    # read the two generated files (results.json and configs.json) and
    # create a Result object.
    result_logger = hpres.json_result_logger(directory=args.shared_directory,
                                             overwrite=True)
    # Start a nameserver:
    NS = hpns.NameServer(run_id=args.run_id,
                         host=host,
                         port=0,
                         working_directory=args.shared_directory)
    ns_host, ns_port = NS.start()

    # Start local worker
    w = worker(run_id=args.run_id,
               host=host,
               nameserver=ns_host,
               nameserver_port=ns_port,
               timeout=120)
    w.run(background=True)

    bohb = BOHB(
        configspace=w.get_configspace(),
        run_id=args.run_id,
        host=host,
        nameserver=ns_host,
        nameserver_port=ns_port,
        result_logger=result_logger,
        min_budget=args.min_budget,
        max_budget=args.max_budget,
    )

    # Run an optimizer

    res = bohb.run(n_iterations=args.n_iterations)
    bohb.shutdown(shutdown_workers=True)
    NS.shutdown()