def main(): """ Main Function. """ # Choose which objective to minimise and the configuration file objective_to_min, config_file, fidel_cost_func = _CHOOSER_DICT[(PROBLEM, IS_MF)] config = load_config_file(config_file) log_stream = open(LOG_FILE, 'w') # Call the optimiser if IS_MF: opt_val, opt_pt, history = minimise_multifidelity_function( objective_to_min, config.fidel_space, config.domain, config.fidel_to_opt, fidel_cost_func, MAX_CAPITAL, capital_type='realtime', config=config, reporter=log_stream) else: opt_val, opt_pt, history = minimise_function(objective_to_min, config.domain, MAX_CAPITAL, capital_type='realtime', config=config, reporter=log_stream) # Print out result log_stream.close() print('Optimum Value found in %02.f time (%d evals): %0.4f' % (MAX_CAPITAL, len(history.curr_opt_points), opt_val)) print('Optimum Point: %s.' % (str(opt_pt)))
def _preprocess_arguments(domain, funcs, config): """ Preprocess domain arguments and configuration file. """ # Preprocess config argument converted_cp_to_euclidean = False if isinstance(config, str): config = load_config_file(config) if domain is None: domain = config.domain # The function if config is not None: proc_funcs = [ get_processed_func_from_raw_func_for_cp_domain( f, config.domain, config.domain_orderings.index_ordering, config.domain_orderings.dim_ordering) for f in funcs ] else: proc_funcs = funcs ret_funcs = proc_funcs # Preprocess domain argument if isinstance(domain, (list, tuple)): domain = EuclideanDomain(domain) elif domain.get_type() == 'euclidean': pass elif domain.get_type() == 'cartesian_product': if domain.num_domains == 1 and domain.list_of_domains[0].get_type( ) == 'euclidean': domain = domain.list_of_domains[0] config.domain_orderings.dim_ordering = config.domain_orderings.dim_ordering[ 0] config.domain_orderings.index_ordering = config.domain_orderings.index_ordering[ 0] config.domain_orderings.kernel_ordering = config.domain_orderings.kernel_ordering[ 0] config.domain_orderings.name_ordering = config.domain_orderings.name_ordering[ 0] config.domain = domain converted_cp_to_euclidean = True # The function def _get_ret_func_from_proc_func_for_euc_domains(_proc_func): """ Get function to return. """ return lambda x: _proc_func([x]) ret_funcs = [ _get_ret_func_from_proc_func_for_euc_domains(pf) for pf in proc_funcs ] else: raise ValueError( 'domain should be an instance of EuclideanDomain or ' + 'CartesianProductDomain.') return domain, ret_funcs, config, converted_cp_to_euclidean
def _get_cpfc_args_from_config(config): """ Return arguments as a dict. """ # pylint: disable=maybe-no-member if isinstance(config, str): from dragonfly.exd.cp_domain_utils import load_config_file config = load_config_file(config) ret = { 'domain': config.domain, 'domain_orderings': config.domain_orderings, 'fidel_space': config.fidel_space, 'fidel_to_opt': config.fidel_to_opt, 'fidel_space_orderings': config.fidel_space_orderings } return ret
def main(): """ Main Function""" # Choose configuration file wp = os.getcwd() config_file = wp+'\\examples\\dm\\diabetes'+'\\config_skrfr_mf.json' config = load_config_file(config_file) log_stream = open(LOG_FILE,'w') # Call the optimiser opt_val, opt_pt, history = minimise_multifidelity_function(rfr_mf_obj, config.fidel_space, config.domain, config.fidel_to_opt, rfr_mf_cost, MAX_CAPITAL,capital_type='realtime', config=config, reporter=log_stream) log_stream.close() print('Optimum Value found in %02.f time (%d evals): %0.4f'%( MAX_CAPITAL, len(history.curr_opt_points), opt_val)) print('Optimum Point: %s.'%(str(opt_pt)))
total += labels.size(0) correct += (predicted == labels.cuda()).sum().item() accuracy = 100 * correct / total return accuracy def parallel_exec(draw): train_instance = Trainable_cifar10() train_instance.reset(*draw) return train_instance.train_and_eval() if __name__ == "__main__": DEBUG = True config = load_config_file('cifar10-dom.json') domain, domain_orderings = config.domain, config.domain_orderings func_caller = CPFunctionCaller(None, domain, domain_orderings=domain_orderings) opt = gp_bandit.CPGPBandit(func_caller, ask_tell_mode=True) opt.initialise() parallel_jobs = 10 train_instance = Trainable_cifar10() while True: results = {} if parallel_jobs > 0: draws = [opt.ask() for _ in range(parallel_jobs)] with Pool(parallel_jobs) as p: accuracies = p.map(parallel_exec, draws) # accuracies = [parallel_exec(d) for d in draws]
def _preprocess_multifidelity_arguments(fidel_space, domain, funcs, fidel_cost_func, fidel_to_opt, config): """ Preprocess fidel_space, domain arguments and configuration file. """ # Preprocess config argument converted_cp_to_euclidean = False if isinstance(config, str): config = load_config_file(config) if fidel_space is None: fidel_space = config.fidel_space if domain is None: domain = config.domain if fidel_to_opt is None: fidel_to_opt = config.fidel_to_opt # The function if config is not None: proc_funcs = [ get_processed_func_from_raw_func_for_cp_domain_fidelity(f, config) for f in funcs ] proc_fidel_cost_func = get_processed_func_from_raw_func_for_cp_domain( fidel_cost_func, config.fidel_space, config.fidel_space_orderings.index_ordering, config.fidel_space_orderings.dim_ordering) else: proc_funcs = funcs proc_fidel_cost_func = fidel_cost_func ret_funcs = proc_funcs ret_fidel_cost_func = proc_fidel_cost_func # Preprocess domain argument if isinstance(fidel_space, (list, tuple)) and isinstance(domain, (list, tuple)): domain = EuclideanDomain(domain) fidel_space = EuclideanDomain(fidel_space) elif fidel_space.get_type() == 'euclidean' and domain.get_type( ) == 'euclidean': pass elif fidel_space.get_type() == 'cartesian_product' and \ domain.get_type() == 'cartesian_product': if fidel_space.num_domains == 1 and \ fidel_space.list_of_domains[0].get_type() == 'euclidean' and \ domain.num_domains == 1 and domain.list_of_domains[0].get_type() == 'euclidean': # Change the fidelity space fidel_space = fidel_space.list_of_domains[0] config.fidel_space_orderings.dim_ordering = \ config.fidel_space_orderings.dim_ordering[0] config.fidel_space_orderings.index_ordering = \ config.fidel_space_orderings.index_ordering[0] config.fidel_space_orderings.kernel_ordering = \ config.fidel_space_orderings.kernel_ordering[0] config.fidel_space_orderings.name_ordering = \ config.fidel_space_orderings.name_ordering[0] config.fidel_to_opt = config.fidel_to_opt[0] fidel_to_opt = fidel_to_opt[0] # Change the domain domain = domain.list_of_domains[0] config.domain_orderings.dim_ordering = config.domain_orderings.dim_ordering[ 0] config.domain_orderings.index_ordering = config.domain_orderings.index_ordering[ 0] config.domain_orderings.kernel_ordering = config.domain_orderings.kernel_ordering[ 0] config.domain_orderings.name_ordering = config.domain_orderings.name_ordering[ 0] # Add to config config.fidel_space = fidel_space config.domain = domain converted_cp_to_euclidean = True # Functions def _get_ret_func_from_proc_func_for_euc_domains(_proc_func): """ Get function to return. """ return lambda z, x: _proc_func([z], [x]) ret_funcs = [ _get_ret_func_from_proc_func_for_euc_domains(pf) for pf in proc_funcs ] ret_fidel_cost_func = lambda z: proc_fidel_cost_func([z]) else: raise ValueError( 'fidel_space and domain should be either both instances of ' + 'EuclideanDomain or both CartesianProductDomain.') return (fidel_space, domain, ret_funcs, ret_fidel_cost_func, fidel_to_opt, config, converted_cp_to_euclidean)
def get_prob_params(): """ Returns the problem parameters. """ prob = Namespace() prob.study_name = STUDY_NAME if IS_DEBUG: prob.num_trials = 3 prob.max_num_evals = 20 else: prob.num_trials = NUM_TRIALS prob.max_num_evals = MAX_NUM_EVALS # Common prob.num_workers = NUM_WORKERS # study_params in order config_file, objective, cost_func, budget in hours. _study_params = { 'supernova': ('../demos_real/supernova/config_mf.json', supernova_obj_mf, supernova_cost_mf, 4.0), 'salsa': ('../demos_real/salsa/config_salsa_energy_mf.json', salsa_obj_mf, salsa_cost_mf, 8.0), 'gbcsensorless': ('../demos_real/skltree/config_gbc_mf.json', gbcsensorless_obj_mf, gbcsensorless_cost_mf, 4.0), 'gbrprotein': ('../demos_real/skltree/config_gbr_mf.json', gbrprotein_obj_mf, gbrprotein_cost_mf, 3.0), 'gbrnaval': ('../demos_real/skltree/config_naval_gbr_mf.json', gbrnaval_obj_mf, gbrnaval_cost_mf, 3.0), 'rfrnews': ('../demos_real/skltree/config_rfr_mf.json', rfrnews_obj_mf, rfrnews_cost_mf, 6.0), } # _study_params = { # 'supernova': ('../demos_real/supernova/config_mf_duplicate.json', # supernova_obj_mf, supernova_cost_mf, 2.0), # 'salsa': ('../demos_real/salsa/config_salsa_energy_mf.json', # salsa_obj_mf, salsa_cost_mf, 4.0), # } domain_config_file, raw_func, raw_fidel_cost_func, budget_in_hours = \ _study_params[prob.study_name] # noisy prob.noisy_evals = False noise_type = 'no_noise' noise_scale = None # Create domain, function_caller and worker_manager config = load_config_file(domain_config_file) func_caller = get_multifunction_caller_from_config(raw_func, config, raw_fidel_cost_func=raw_fidel_cost_func, noise_type=noise_type, noise_scale=noise_scale) # Set max_capital if IS_DEBUG: prob.max_capital = 0.05 * 60 * 60 else: prob.max_capital = budget_in_hours * 60 * 60 # Store everything in prob prob.func_caller = func_caller prob.tmp_dir = get_evaluation_tmp_dir(prob.study_name) prob.worker_manager = RealWorkerManager(prob.num_workers, prob.tmp_dir) prob.save_file_prefix = prob.study_name + ('-debug' if IS_DEBUG else '') prob.methods = METHODS prob.save_results_dir = SAVE_RESULTS_DIR prob.reporter = get_reporter('default') # evaluation options prob.evaluation_options = Namespace(prev_eval_points='none', initial_pool_size=0) return prob
def get_prob_params(): """ Returns the problem parameters. """ prob = Namespace() prob.study_name = STUDY_NAME if IS_DEBUG: prob.num_trials = 3 prob.max_capital = 10 else: prob.num_trials = NUM_TRIALS prob.max_capital = MAX_CAPITAL # Common prob.time_distro = TIME_DISTRO prob.num_workers = NUM_WORKERS _study_params = { 'branin': ('synthetic/branin/config_mf.json', branin_mf, cost_branin_mf, 0.1, 0, 1), 'hartmann3_2': ('synthetic/hartmann3_2/config_mf.json', hartmann3_2_mf, cost_hartmann3_2_mf, 0.1, 0, 1), 'hartmann6_4': ('synthetic/hartmann6_4/config_mf.json', hartmann6_4_mf, cost_hartmann6_4_mf, 0.1, 0, 1), 'borehole_6': ('synthetic/borehole_6/config_mf.json', borehole_6_mf, cost_borehole_6_mf, 1, 0, 1), 'park2_4': ('synthetic/park2_4/config_mf.json', park2_4_mf, cost_park2_4_mf, 0.3, 0, 1), 'park2_3': ('synthetic/park2_3/config_mf.json', park2_3_mf, cost_park2_3_mf, 0.1, 0, 1), 'park1_3': ('synthetic/park1_3/config_mf.json', park1_3_mf, cost_park1_3_mf, 0.5, 0, 1), } (domain_config_file_suffix, raw_func, raw_fidel_cost_func, _fc_noise_scale, _initial_pool_size, _) = _study_params[prob.study_name] domain_config_file = os.path.join(DRAGONFLY_EXPERIMENTS_DIR, domain_config_file_suffix) # noisy prob.noisy_evals = NOISY_EVALS if NOISY_EVALS: noise_type = 'gauss' noise_scale = _fc_noise_scale else: noise_type = 'no_noise' noise_scale = None # Create domain, function_caller and worker_manager config = load_config_file(domain_config_file) func_caller = get_multifunction_caller_from_config( raw_func, config, raw_fidel_cost_func=raw_fidel_cost_func, noise_type=noise_type, noise_scale=noise_scale) # Set max_capital if hasattr(func_caller, 'fidel_cost_func'): prob.max_capital = prob.max_capital * \ func_caller.fidel_cost_func(func_caller.fidel_to_opt) else: prob.max_capital = prob.max_capital # Store everything in prob prob.func_caller = func_caller prob.worker_manager = SyntheticWorkerManager( prob.num_workers, time_distro='caller_eval_cost') prob.save_file_prefix = prob.study_name + ('-debug' if IS_DEBUG else '') prob.methods = METHODS prob.save_results_dir = SAVE_RESULTS_DIR prob.reporter = get_reporter('default') # evaluation options prob.evaluation_options = Namespace(prev_eval_points='none', initial_pool_size=_initial_pool_size) return prob
def main(): """ Main function. """ options = load_options(get_command_line_args(), cmd_line=True) # Load domain and objective config = load_config_file(options.config) if hasattr(config, 'fidel_space'): is_mf = True else: is_mf = False # Load module expt_dir = os.path.dirname( os.path.abspath(os.path.realpath(options.config))) if not os.path.exists(expt_dir): raise ValueError("Experiment directory does not exist.") sys.path.append(expt_dir) obj_module = import_module(config.name, expt_dir) sys.path.remove(expt_dir) # Set capital if options.max_capital < 0: raise ValueError( 'max_capital (time or number of evaluations) must be positive.') # Call optimiser _print_prefix = 'Maximising' if options.max_or_min == 'max' else 'Minimising' call_to_optimise = { 'single': { 'max': maximise_function, 'min': minimise_function }, 'single_mf': { 'max': maximise_multifidelity_function, 'min': minimise_multifidelity_function }, 'multi': { 'max': multiobjective_maximise_functions, 'min': multiobjective_minimise_functions }, } if not options.is_multi_objective: if is_mf: print( '%s multi-fidelity function on\n Fidelity-Space: %s.\n Domain: %s.\n' % (_print_prefix, config.fidel_space, config.domain)) opt_val, opt_pt, history = call_to_optimise['single_mf'][ options.max_or_min](obj_module.objective, fidel_space=None, domain=None, fidel_to_opt=config.fidel_to_opt, fidel_cost_func=obj_module.cost, max_capital=options.max_capital, capital_type=options.capital_type, opt_method=options.opt_method, config=config, options=options, reporter=options.report_progress) else: print('%s function on Domain: %s.\n' % (_print_prefix, config.domain)) opt_val, opt_pt, history = call_to_optimise['single'][ options.max_or_min](obj_module.objective, domain=None, max_capital=options.max_capital, capital_type=options.capital_type, opt_method=options.opt_method, config=config, options=options, reporter=options.report_progress) print('Optimum Value in %d evals: %0.4f' % (len(history.curr_opt_points), opt_val)) print('Optimum Point: %s.' % (opt_pt)) else: if is_mf: raise ValueError( 'Multi-objective multi-fidelity optimisation has not been ' + 'implemented yet.') else: # Check format of function caller if hasattr(obj_module, 'objectives'): objectives_to_pass = obj_module.objectives num_objectives = len(objectives_to_pass) else: num_objectives = obj_module.num_objectives objectives_to_pass = (obj_module.compute_objectives, obj_module.num_objectives) print('%s %d multiobjective functions on Domain: %s.\n' % (_print_prefix, num_objectives, config.domain)) print(objectives_to_pass) pareto_values, pareto_points, history = \ call_to_optimise['multi'][options.max_or_min](objectives_to_pass, domain=None, max_capital=options.max_capital, capital_type=options.capital_type, opt_method=options.opt_method, config=config, options=options, reporter=options.report_progress) num_pareto_points = len(pareto_points) print('Found %d Pareto Points: %s.' % (num_pareto_points, pareto_points)) print('Corresponding Pareto Values: %s.' % (pareto_values))
def main(): """ Main function. """ # First load arguments all_args = dragonfly_args + get_all_euc_gp_bandit_args() + get_all_cp_gp_bandit_args() \ + get_all_mf_euc_gp_bandit_args() + get_all_mf_cp_gp_bandit_args() \ + get_all_euc_moo_gp_bandit_args() + get_all_cp_moo_gp_bandit_args() all_args = get_unique_list_of_option_args(all_args) options = load_options(all_args, cmd_line=True) # Load domain and objective config = load_config_file(options.config) if hasattr(config, 'fidel_space'): is_mf = True else: is_mf = False expt_dir = os.path.dirname( os.path.abspath(os.path.realpath(options.config))) if not os.path.exists(expt_dir): raise ValueError("Experiment directory does not exist.") objective_file_name = config.name obj_module = imp.load_source( objective_file_name, os.path.join(expt_dir, objective_file_name + '.py')) # Set capital options.capital_type = 'return_value' if options.budget < 0: budget = options.max_capital else: budget = options.budget if budget < 0: raise ValueError( 'Specify the budget via argument budget or max_capital.') options.max_capital = budget # Call optimiser _print_prefix = 'Maximising' if options.max_or_min == 'max' else 'Minimising' call_to_optimise = { 'single': { 'max': maximise_function, 'min': minimise_function }, 'single_mf': { 'max': maximise_multifidelity_function, 'min': minimise_multifidelity_function }, 'multi': { 'max': multiobjective_maximise_functions, 'min': multiobjective_minimise_functions }, } if not options.is_multi_objective: if is_mf: print('%s function on fidel_space: %s, domain %s.' % (_print_prefix, config.fidel_space, config.domain)) opt_val, opt_pt, history = call_to_optimise['single_mf'][ options.max_or_min](obj_module.objective, domain=None, fidel_space=None, fidel_to_opt=config.fidel_to_opt, fidel_cost_func=obj_module.cost, max_capital=options.max_capital, config=config, options=options) else: print('%s function on domain %s.' % (_print_prefix, config.domain)) opt_val, opt_pt, history = call_to_optimise['single'][ options.max_or_min](obj_module.objective, domain=None, max_capital=options.max_capital, config=config, options=options) print('Optimum Value in %d evals: %0.4f' % (len(history.curr_opt_points), opt_val)) print('Optimum Point: %s.' % (opt_pt)) else: if is_mf: raise ValueError( 'Multi-objective multi-fidelity optimisation has not been ' + 'implemented yet.') else: print( '%s multiobjective functions on domain %s with %d functions.' % (_print_prefix, config.domain, len(obj_module.objectives))) pareto_values, pareto_points, history = \ call_to_optimise['multi'][options.max_or_min](obj_module.objectives, domain=None, max_capital=options.max_capital, config=config, options=options) num_pareto_points = len(pareto_points) print('Found %d Pareto Points: %s.' % (num_pareto_points, pareto_points)) print('Corresponding Pareto Values: %s.' % (pareto_values))